| 1 | /* |
| 2 | * Copyright (c) 2007 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. Please obtain a copy of the License at |
| 10 | * http://www.opensource.apple.com/apsl/ and read it before using this |
| 11 | * file. |
| 12 | * |
| 13 | * The Original Code and all software distributed under the License are |
| 14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 18 | * Please see the License for the specific language governing rights and |
| 19 | * limitations under the License. |
| 20 | * |
| 21 | * @APPLE_LICENSE_HEADER_END@ |
| 22 | */ |
| 23 | |
| 24 | /* |
| 25 | * Shared region (... and comm page) |
| 26 | * |
| 27 | * This file handles the VM shared region and comm page. |
| 28 | * |
| 29 | */ |
| 30 | /* |
| 31 | * SHARED REGIONS |
| 32 | * -------------- |
| 33 | * |
| 34 | * A shared region is a submap that contains the most common system shared |
| 35 | * libraries for a given environment. |
| 36 | * An environment is defined by (cpu-type, 64-bitness, root directory). |
| 37 | * |
| 38 | * The point of a shared region is to reduce the setup overhead when exec'ing |
| 39 | * a new process. |
| 40 | * A shared region uses a shared VM submap that gets mapped automatically |
| 41 | * at exec() time (see vm_map_exec()). The first process of a given |
| 42 | * environment sets up the shared region and all further processes in that |
| 43 | * environment can re-use that shared region without having to re-create |
| 44 | * the same mappings in their VM map. All they need is contained in the shared |
| 45 | * region. |
| 46 | * It can also shared a pmap (mostly for read-only parts but also for the |
| 47 | * initial version of some writable parts), which gets "nested" into the |
| 48 | * process's pmap. This reduces the number of soft faults: once one process |
| 49 | * brings in a page in the shared region, all the other processes can access |
| 50 | * it without having to enter it in their own pmap. |
| 51 | * |
| 52 | * |
| 53 | * When a process is being exec'ed, vm_map_exec() calls vm_shared_region_enter() |
| 54 | * to map the appropriate shared region in the process's address space. |
| 55 | * We look up the appropriate shared region for the process's environment. |
| 56 | * If we can't find one, we create a new (empty) one and add it to the list. |
| 57 | * Otherwise, we just take an extra reference on the shared region we found. |
| 58 | * |
| 59 | * The "dyld" runtime (mapped into the process's address space at exec() time) |
| 60 | * will then use the shared_region_check_np() and shared_region_map_np() |
| 61 | * system call to validate and/or populate the shared region with the |
| 62 | * appropriate dyld_shared_cache file. |
| 63 | * |
| 64 | * The shared region is inherited on fork() and the child simply takes an |
| 65 | * extra reference on its parent's shared region. |
| 66 | * |
| 67 | * When the task terminates, we release a reference on its shared region. |
| 68 | * When the last reference is released, we destroy the shared region. |
| 69 | * |
| 70 | * After a chroot(), the calling process keeps using its original shared region, |
| 71 | * since that's what was mapped when it was started. But its children |
| 72 | * will use a different shared region, because they need to use the shared |
| 73 | * cache that's relative to the new root directory. |
| 74 | */ |
| 75 | /* |
| 76 | * COMM PAGE |
| 77 | * |
| 78 | * A "comm page" is an area of memory that is populated by the kernel with |
| 79 | * the appropriate platform-specific version of some commonly used code. |
| 80 | * There is one "comm page" per platform (cpu-type, 64-bitness) but only |
| 81 | * for the native cpu-type. No need to overly optimize translated code |
| 82 | * for hardware that is not really there ! |
| 83 | * |
| 84 | * The comm pages are created and populated at boot time. |
| 85 | * |
| 86 | * The appropriate comm page is mapped into a process's address space |
| 87 | * at exec() time, in vm_map_exec(). |
| 88 | * It is then inherited on fork(). |
| 89 | * |
| 90 | * The comm page is shared between the kernel and all applications of |
| 91 | * a given platform. Only the kernel can modify it. |
| 92 | * |
| 93 | * Applications just branch to fixed addresses in the comm page and find |
| 94 | * the right version of the code for the platform. There is also some |
| 95 | * data provided and updated by the kernel for processes to retrieve easily |
| 96 | * without having to do a system call. |
| 97 | */ |
| 98 | |
| 99 | #include <debug.h> |
| 100 | |
| 101 | #include <kern/ipc_tt.h> |
| 102 | #include <kern/kalloc.h> |
| 103 | #include <kern/thread_call.h> |
| 104 | |
| 105 | #include <mach/mach_vm.h> |
| 106 | |
| 107 | #include <vm/vm_map.h> |
| 108 | #include <vm/vm_shared_region.h> |
| 109 | |
| 110 | #include <vm/vm_protos.h> |
| 111 | |
| 112 | #include <machine/commpage.h> |
| 113 | #include <machine/cpu_capabilities.h> |
| 114 | |
| 115 | #if defined (__arm__) || defined(__arm64__) |
| 116 | #include <arm/cpu_data_internal.h> |
| 117 | #endif |
| 118 | |
| 119 | /* |
| 120 | * the following codes are used in the subclass |
| 121 | * of the DBG_MACH_SHAREDREGION class |
| 122 | */ |
| 123 | #define PROCESS_SHARED_CACHE_LAYOUT 0x00 |
| 124 | |
| 125 | |
| 126 | /* "dyld" uses this to figure out what the kernel supports */ |
| 127 | int shared_region_version = 3; |
| 128 | |
| 129 | /* trace level, output is sent to the system log file */ |
| 130 | int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL; |
| 131 | |
| 132 | /* should local (non-chroot) shared regions persist when no task uses them ? */ |
| 133 | int shared_region_persistence = 0; /* no by default */ |
| 134 | |
| 135 | /* delay before reclaiming an unused shared region */ |
| 136 | int shared_region_destroy_delay = 120; /* in seconds */ |
| 137 | |
| 138 | struct vm_shared_region *init_task_shared_region = NULL; |
| 139 | |
| 140 | #ifndef CONFIG_EMBEDDED |
| 141 | /* |
| 142 | * Only one cache gets to slide on Desktop, since we can't |
| 143 | * tear down slide info properly today and the desktop actually |
| 144 | * produces lots of shared caches. |
| 145 | */ |
| 146 | boolean_t shared_region_completed_slide = FALSE; |
| 147 | #endif |
| 148 | |
| 149 | /* this lock protects all the shared region data structures */ |
| 150 | lck_grp_t *vm_shared_region_lck_grp; |
| 151 | lck_mtx_t vm_shared_region_lock; |
| 152 | |
| 153 | #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock) |
| 154 | #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock) |
| 155 | #define vm_shared_region_sleep(event, interruptible) \ |
| 156 | lck_mtx_sleep(&vm_shared_region_lock, \ |
| 157 | LCK_SLEEP_DEFAULT, \ |
| 158 | (event_t) (event), \ |
| 159 | (interruptible)) |
| 160 | |
| 161 | /* the list of currently available shared regions (one per environment) */ |
| 162 | queue_head_t vm_shared_region_queue; |
| 163 | |
| 164 | static void vm_shared_region_reference_locked(vm_shared_region_t shared_region); |
| 165 | static vm_shared_region_t vm_shared_region_create( |
| 166 | void *root_dir, |
| 167 | cpu_type_t cputype, |
| 168 | cpu_subtype_t cpu_subtype, |
| 169 | boolean_t is_64bit); |
| 170 | static void vm_shared_region_destroy(vm_shared_region_t shared_region); |
| 171 | |
| 172 | static void vm_shared_region_timeout(thread_call_param_t param0, |
| 173 | thread_call_param_t param1); |
| 174 | kern_return_t vm_shared_region_slide_mapping( |
| 175 | vm_shared_region_t sr, |
| 176 | mach_vm_size_t slide_info_size, |
| 177 | mach_vm_offset_t start, |
| 178 | mach_vm_size_t size, |
| 179 | mach_vm_offset_t slid_mapping, |
| 180 | uint32_t slide, |
| 181 | memory_object_control_t); /* forward */ |
| 182 | |
| 183 | static int __commpage_setup = 0; |
| 184 | #if defined(__i386__) || defined(__x86_64__) |
| 185 | static int __system_power_source = 1; /* init to extrnal power source */ |
| 186 | static void post_sys_powersource_internal(int i, int internal); |
| 187 | #endif /* __i386__ || __x86_64__ */ |
| 188 | |
| 189 | |
| 190 | /* |
| 191 | * Initialize the module... |
| 192 | */ |
| 193 | void |
| 194 | vm_shared_region_init(void) |
| 195 | { |
| 196 | SHARED_REGION_TRACE_DEBUG( |
| 197 | ("shared_region: -> init\n" )); |
| 198 | |
| 199 | vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region" , |
| 200 | LCK_GRP_ATTR_NULL); |
| 201 | lck_mtx_init(&vm_shared_region_lock, |
| 202 | vm_shared_region_lck_grp, |
| 203 | LCK_ATTR_NULL); |
| 204 | |
| 205 | queue_init(&vm_shared_region_queue); |
| 206 | |
| 207 | SHARED_REGION_TRACE_DEBUG( |
| 208 | ("shared_region: <- init\n" )); |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * Retrieve a task's shared region and grab an extra reference to |
| 213 | * make sure it doesn't disappear while the caller is using it. |
| 214 | * The caller is responsible for consuming that extra reference if |
| 215 | * necessary. |
| 216 | */ |
| 217 | vm_shared_region_t |
| 218 | vm_shared_region_get( |
| 219 | task_t task) |
| 220 | { |
| 221 | vm_shared_region_t shared_region; |
| 222 | |
| 223 | SHARED_REGION_TRACE_DEBUG( |
| 224 | ("shared_region: -> get(%p)\n" , |
| 225 | (void *)VM_KERNEL_ADDRPERM(task))); |
| 226 | |
| 227 | task_lock(task); |
| 228 | vm_shared_region_lock(); |
| 229 | shared_region = task->shared_region; |
| 230 | if (shared_region) { |
| 231 | assert(shared_region->sr_ref_count > 0); |
| 232 | vm_shared_region_reference_locked(shared_region); |
| 233 | } |
| 234 | vm_shared_region_unlock(); |
| 235 | task_unlock(task); |
| 236 | |
| 237 | SHARED_REGION_TRACE_DEBUG( |
| 238 | ("shared_region: get(%p) <- %p\n" , |
| 239 | (void *)VM_KERNEL_ADDRPERM(task), |
| 240 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 241 | |
| 242 | return shared_region; |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Get the base address of the shared region. |
| 247 | * That's the address at which it needs to be mapped in the process's address |
| 248 | * space. |
| 249 | * No need to lock since this data is set when the shared region is |
| 250 | * created and is never modified after that. The caller must hold an extra |
| 251 | * reference on the shared region to prevent it from being destroyed. |
| 252 | */ |
| 253 | mach_vm_offset_t |
| 254 | vm_shared_region_base_address( |
| 255 | vm_shared_region_t shared_region) |
| 256 | { |
| 257 | SHARED_REGION_TRACE_DEBUG( |
| 258 | ("shared_region: -> base_address(%p)\n" , |
| 259 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 260 | assert(shared_region->sr_ref_count > 1); |
| 261 | SHARED_REGION_TRACE_DEBUG( |
| 262 | ("shared_region: base_address(%p) <- 0x%llx\n" , |
| 263 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 264 | (long long)shared_region->sr_base_address)); |
| 265 | return shared_region->sr_base_address; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Get the size of the shared region. |
| 270 | * That's the size that needs to be mapped in the process's address |
| 271 | * space. |
| 272 | * No need to lock since this data is set when the shared region is |
| 273 | * created and is never modified after that. The caller must hold an extra |
| 274 | * reference on the shared region to prevent it from being destroyed. |
| 275 | */ |
| 276 | mach_vm_size_t |
| 277 | vm_shared_region_size( |
| 278 | vm_shared_region_t shared_region) |
| 279 | { |
| 280 | SHARED_REGION_TRACE_DEBUG( |
| 281 | ("shared_region: -> size(%p)\n" , |
| 282 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 283 | assert(shared_region->sr_ref_count > 1); |
| 284 | SHARED_REGION_TRACE_DEBUG( |
| 285 | ("shared_region: size(%p) <- 0x%llx\n" , |
| 286 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 287 | (long long)shared_region->sr_size)); |
| 288 | return shared_region->sr_size; |
| 289 | } |
| 290 | |
| 291 | /* |
| 292 | * Get the memory entry of the shared region. |
| 293 | * That's the "memory object" that needs to be mapped in the process's address |
| 294 | * space. |
| 295 | * No need to lock since this data is set when the shared region is |
| 296 | * created and is never modified after that. The caller must hold an extra |
| 297 | * reference on the shared region to prevent it from being destroyed. |
| 298 | */ |
| 299 | ipc_port_t |
| 300 | vm_shared_region_mem_entry( |
| 301 | vm_shared_region_t shared_region) |
| 302 | { |
| 303 | SHARED_REGION_TRACE_DEBUG( |
| 304 | ("shared_region: -> mem_entry(%p)\n" , |
| 305 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 306 | assert(shared_region->sr_ref_count > 1); |
| 307 | SHARED_REGION_TRACE_DEBUG( |
| 308 | ("shared_region: mem_entry(%p) <- %p\n" , |
| 309 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 310 | (void *)VM_KERNEL_ADDRPERM(shared_region->sr_mem_entry))); |
| 311 | return shared_region->sr_mem_entry; |
| 312 | } |
| 313 | |
| 314 | vm_map_t |
| 315 | vm_shared_region_vm_map( |
| 316 | vm_shared_region_t shared_region) |
| 317 | { |
| 318 | ipc_port_t sr_handle; |
| 319 | vm_named_entry_t sr_mem_entry; |
| 320 | vm_map_t sr_map; |
| 321 | |
| 322 | SHARED_REGION_TRACE_DEBUG( |
| 323 | ("shared_region: -> vm_map(%p)\n" , |
| 324 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 325 | assert(shared_region->sr_ref_count > 1); |
| 326 | |
| 327 | sr_handle = shared_region->sr_mem_entry; |
| 328 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; |
| 329 | sr_map = sr_mem_entry->backing.map; |
| 330 | assert(sr_mem_entry->is_sub_map); |
| 331 | |
| 332 | SHARED_REGION_TRACE_DEBUG( |
| 333 | ("shared_region: vm_map(%p) <- %p\n" , |
| 334 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 335 | (void *)VM_KERNEL_ADDRPERM(sr_map))); |
| 336 | return sr_map; |
| 337 | } |
| 338 | uint32_t |
| 339 | vm_shared_region_get_slide( |
| 340 | vm_shared_region_t shared_region) |
| 341 | { |
| 342 | SHARED_REGION_TRACE_DEBUG( |
| 343 | ("shared_region: -> vm_shared_region_get_slide(%p)\n" , |
| 344 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 345 | assert(shared_region->sr_ref_count > 1); |
| 346 | SHARED_REGION_TRACE_DEBUG( |
| 347 | ("shared_region: vm_shared_region_get_slide(%p) <- %u\n" , |
| 348 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 349 | shared_region->sr_slide_info.slide)); |
| 350 | |
| 351 | /* 0 if we haven't slid */ |
| 352 | assert(shared_region->sr_slide_info.slide_object != NULL || |
| 353 | shared_region->sr_slide_info.slide == 0); |
| 354 | |
| 355 | return shared_region->sr_slide_info.slide; |
| 356 | } |
| 357 | |
| 358 | vm_shared_region_slide_info_t |
| 359 | vm_shared_region_get_slide_info( |
| 360 | vm_shared_region_t shared_region) |
| 361 | { |
| 362 | SHARED_REGION_TRACE_DEBUG( |
| 363 | ("shared_region: -> vm_shared_region_get_slide_info(%p)\n" , |
| 364 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 365 | assert(shared_region->sr_ref_count > 1); |
| 366 | SHARED_REGION_TRACE_DEBUG( |
| 367 | ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n" , |
| 368 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 369 | (void *)VM_KERNEL_ADDRPERM(&shared_region->sr_slide_info))); |
| 370 | return &shared_region->sr_slide_info; |
| 371 | } |
| 372 | |
| 373 | /* |
| 374 | * Set the shared region the process should use. |
| 375 | * A NULL new shared region means that we just want to release the old |
| 376 | * shared region. |
| 377 | * The caller should already have an extra reference on the new shared region |
| 378 | * (if any). We release a reference on the old shared region (if any). |
| 379 | */ |
| 380 | void |
| 381 | vm_shared_region_set( |
| 382 | task_t task, |
| 383 | vm_shared_region_t new_shared_region) |
| 384 | { |
| 385 | vm_shared_region_t old_shared_region; |
| 386 | |
| 387 | SHARED_REGION_TRACE_DEBUG( |
| 388 | ("shared_region: -> set(%p, %p)\n" , |
| 389 | (void *)VM_KERNEL_ADDRPERM(task), |
| 390 | (void *)VM_KERNEL_ADDRPERM(new_shared_region))); |
| 391 | |
| 392 | task_lock(task); |
| 393 | vm_shared_region_lock(); |
| 394 | |
| 395 | old_shared_region = task->shared_region; |
| 396 | if (new_shared_region) { |
| 397 | assert(new_shared_region->sr_ref_count > 0); |
| 398 | } |
| 399 | |
| 400 | task->shared_region = new_shared_region; |
| 401 | |
| 402 | vm_shared_region_unlock(); |
| 403 | task_unlock(task); |
| 404 | |
| 405 | if (old_shared_region) { |
| 406 | assert(old_shared_region->sr_ref_count > 0); |
| 407 | vm_shared_region_deallocate(old_shared_region); |
| 408 | } |
| 409 | |
| 410 | SHARED_REGION_TRACE_DEBUG( |
| 411 | ("shared_region: set(%p) <- old=%p new=%p\n" , |
| 412 | (void *)VM_KERNEL_ADDRPERM(task), |
| 413 | (void *)VM_KERNEL_ADDRPERM(old_shared_region), |
| 414 | (void *)VM_KERNEL_ADDRPERM(new_shared_region))); |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * Lookup up the shared region for the desired environment. |
| 419 | * If none is found, create a new (empty) one. |
| 420 | * Grab an extra reference on the returned shared region, to make sure |
| 421 | * it doesn't get destroyed before the caller is done with it. The caller |
| 422 | * is responsible for consuming that extra reference if necessary. |
| 423 | */ |
| 424 | vm_shared_region_t |
| 425 | vm_shared_region_lookup( |
| 426 | void *root_dir, |
| 427 | cpu_type_t cputype, |
| 428 | cpu_subtype_t cpu_subtype, |
| 429 | boolean_t is_64bit) |
| 430 | { |
| 431 | vm_shared_region_t shared_region; |
| 432 | vm_shared_region_t new_shared_region; |
| 433 | |
| 434 | SHARED_REGION_TRACE_DEBUG( |
| 435 | ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n" , |
| 436 | |
| 437 | (void *)VM_KERNEL_ADDRPERM(root_dir), |
| 438 | cputype, cpu_subtype, is_64bit)); |
| 439 | |
| 440 | shared_region = NULL; |
| 441 | new_shared_region = NULL; |
| 442 | |
| 443 | vm_shared_region_lock(); |
| 444 | for (;;) { |
| 445 | queue_iterate(&vm_shared_region_queue, |
| 446 | shared_region, |
| 447 | vm_shared_region_t, |
| 448 | sr_q) { |
| 449 | assert(shared_region->sr_ref_count > 0); |
| 450 | if (shared_region->sr_cpu_type == cputype && |
| 451 | shared_region->sr_cpu_subtype == cpu_subtype && |
| 452 | shared_region->sr_root_dir == root_dir && |
| 453 | shared_region->sr_64bit == is_64bit) { |
| 454 | /* found a match ! */ |
| 455 | vm_shared_region_reference_locked(shared_region); |
| 456 | goto done; |
| 457 | } |
| 458 | } |
| 459 | if (new_shared_region == NULL) { |
| 460 | /* no match: create a new one */ |
| 461 | vm_shared_region_unlock(); |
| 462 | new_shared_region = vm_shared_region_create(root_dir, |
| 463 | cputype, |
| 464 | cpu_subtype, |
| 465 | is_64bit); |
| 466 | /* do the lookup again, in case we lost a race */ |
| 467 | vm_shared_region_lock(); |
| 468 | continue; |
| 469 | } |
| 470 | /* still no match: use our new one */ |
| 471 | shared_region = new_shared_region; |
| 472 | new_shared_region = NULL; |
| 473 | queue_enter(&vm_shared_region_queue, |
| 474 | shared_region, |
| 475 | vm_shared_region_t, |
| 476 | sr_q); |
| 477 | break; |
| 478 | } |
| 479 | |
| 480 | done: |
| 481 | vm_shared_region_unlock(); |
| 482 | |
| 483 | if (new_shared_region) { |
| 484 | /* |
| 485 | * We lost a race with someone else to create a new shared |
| 486 | * region for that environment. Get rid of our unused one. |
| 487 | */ |
| 488 | assert(new_shared_region->sr_ref_count == 1); |
| 489 | new_shared_region->sr_ref_count--; |
| 490 | vm_shared_region_destroy(new_shared_region); |
| 491 | new_shared_region = NULL; |
| 492 | } |
| 493 | |
| 494 | SHARED_REGION_TRACE_DEBUG( |
| 495 | ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n" , |
| 496 | (void *)VM_KERNEL_ADDRPERM(root_dir), |
| 497 | cputype, cpu_subtype, is_64bit, |
| 498 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 499 | |
| 500 | assert(shared_region->sr_ref_count > 0); |
| 501 | return shared_region; |
| 502 | } |
| 503 | |
| 504 | /* |
| 505 | * Take an extra reference on a shared region. |
| 506 | * The vm_shared_region_lock should already be held by the caller. |
| 507 | */ |
| 508 | static void |
| 509 | vm_shared_region_reference_locked( |
| 510 | vm_shared_region_t shared_region) |
| 511 | { |
| 512 | LCK_MTX_ASSERT(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED); |
| 513 | |
| 514 | SHARED_REGION_TRACE_DEBUG( |
| 515 | ("shared_region: -> reference_locked(%p)\n" , |
| 516 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 517 | assert(shared_region->sr_ref_count > 0); |
| 518 | shared_region->sr_ref_count++; |
| 519 | |
| 520 | if (shared_region->sr_timer_call != NULL) { |
| 521 | boolean_t cancelled; |
| 522 | |
| 523 | /* cancel and free any pending timeout */ |
| 524 | cancelled = thread_call_cancel(shared_region->sr_timer_call); |
| 525 | if (cancelled) { |
| 526 | thread_call_free(shared_region->sr_timer_call); |
| 527 | shared_region->sr_timer_call = NULL; |
| 528 | /* release the reference held by the cancelled timer */ |
| 529 | shared_region->sr_ref_count--; |
| 530 | } else { |
| 531 | /* the timer will drop the reference and free itself */ |
| 532 | } |
| 533 | } |
| 534 | |
| 535 | SHARED_REGION_TRACE_DEBUG( |
| 536 | ("shared_region: reference_locked(%p) <- %d\n" , |
| 537 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 538 | shared_region->sr_ref_count)); |
| 539 | } |
| 540 | |
| 541 | /* |
| 542 | * Release a reference on the shared region. |
| 543 | * Destroy it if there are no references left. |
| 544 | */ |
| 545 | void |
| 546 | vm_shared_region_deallocate( |
| 547 | vm_shared_region_t shared_region) |
| 548 | { |
| 549 | SHARED_REGION_TRACE_DEBUG( |
| 550 | ("shared_region: -> deallocate(%p)\n" , |
| 551 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 552 | |
| 553 | vm_shared_region_lock(); |
| 554 | |
| 555 | assert(shared_region->sr_ref_count > 0); |
| 556 | |
| 557 | if (shared_region->sr_root_dir == NULL) { |
| 558 | /* |
| 559 | * Local (i.e. based on the boot volume) shared regions |
| 560 | * can persist or not based on the "shared_region_persistence" |
| 561 | * sysctl. |
| 562 | * Make sure that this one complies. |
| 563 | * |
| 564 | * See comments in vm_shared_region_slide() for notes about |
| 565 | * shared regions we have slid (which are not torn down currently). |
| 566 | */ |
| 567 | if (shared_region_persistence && |
| 568 | !shared_region->sr_persists) { |
| 569 | /* make this one persistent */ |
| 570 | shared_region->sr_ref_count++; |
| 571 | shared_region->sr_persists = TRUE; |
| 572 | } else if (!shared_region_persistence && |
| 573 | shared_region->sr_persists) { |
| 574 | /* make this one no longer persistent */ |
| 575 | assert(shared_region->sr_ref_count > 1); |
| 576 | shared_region->sr_ref_count--; |
| 577 | shared_region->sr_persists = FALSE; |
| 578 | } |
| 579 | } |
| 580 | |
| 581 | assert(shared_region->sr_ref_count > 0); |
| 582 | shared_region->sr_ref_count--; |
| 583 | SHARED_REGION_TRACE_DEBUG( |
| 584 | ("shared_region: deallocate(%p): ref now %d\n" , |
| 585 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 586 | shared_region->sr_ref_count)); |
| 587 | |
| 588 | if (shared_region->sr_ref_count == 0) { |
| 589 | uint64_t deadline; |
| 590 | |
| 591 | assert(!shared_region->sr_slid); |
| 592 | |
| 593 | if (shared_region->sr_timer_call == NULL) { |
| 594 | /* hold one reference for the timer */ |
| 595 | assert(! shared_region->sr_mapping_in_progress); |
| 596 | shared_region->sr_ref_count++; |
| 597 | |
| 598 | /* set up the timer */ |
| 599 | shared_region->sr_timer_call = thread_call_allocate( |
| 600 | (thread_call_func_t) vm_shared_region_timeout, |
| 601 | (thread_call_param_t) shared_region); |
| 602 | |
| 603 | /* schedule the timer */ |
| 604 | clock_interval_to_deadline(shared_region_destroy_delay, |
| 605 | 1000 * 1000 * 1000, |
| 606 | &deadline); |
| 607 | thread_call_enter_delayed(shared_region->sr_timer_call, |
| 608 | deadline); |
| 609 | |
| 610 | SHARED_REGION_TRACE_DEBUG( |
| 611 | ("shared_region: deallocate(%p): armed timer\n" , |
| 612 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 613 | |
| 614 | vm_shared_region_unlock(); |
| 615 | } else { |
| 616 | /* timer expired: let go of this shared region */ |
| 617 | |
| 618 | /* |
| 619 | * We can't properly handle teardown of a slid object today. |
| 620 | */ |
| 621 | assert(!shared_region->sr_slid); |
| 622 | |
| 623 | /* |
| 624 | * Remove it from the queue first, so no one can find |
| 625 | * it... |
| 626 | */ |
| 627 | queue_remove(&vm_shared_region_queue, |
| 628 | shared_region, |
| 629 | vm_shared_region_t, |
| 630 | sr_q); |
| 631 | vm_shared_region_unlock(); |
| 632 | |
| 633 | /* ... and destroy it */ |
| 634 | vm_shared_region_destroy(shared_region); |
| 635 | shared_region = NULL; |
| 636 | } |
| 637 | } else { |
| 638 | vm_shared_region_unlock(); |
| 639 | } |
| 640 | |
| 641 | SHARED_REGION_TRACE_DEBUG( |
| 642 | ("shared_region: deallocate(%p) <-\n" , |
| 643 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 644 | } |
| 645 | |
| 646 | void |
| 647 | vm_shared_region_timeout( |
| 648 | thread_call_param_t param0, |
| 649 | __unused thread_call_param_t param1) |
| 650 | { |
| 651 | vm_shared_region_t shared_region; |
| 652 | |
| 653 | shared_region = (vm_shared_region_t) param0; |
| 654 | |
| 655 | vm_shared_region_deallocate(shared_region); |
| 656 | } |
| 657 | |
| 658 | /* |
| 659 | * Create a new (empty) shared region for a new environment. |
| 660 | */ |
| 661 | static vm_shared_region_t |
| 662 | vm_shared_region_create( |
| 663 | void *root_dir, |
| 664 | cpu_type_t cputype, |
| 665 | cpu_subtype_t cpu_subtype, |
| 666 | boolean_t is_64bit) |
| 667 | { |
| 668 | kern_return_t kr; |
| 669 | vm_named_entry_t mem_entry; |
| 670 | ipc_port_t mem_entry_port; |
| 671 | vm_shared_region_t shared_region; |
| 672 | vm_shared_region_slide_info_t si; |
| 673 | vm_map_t sub_map; |
| 674 | mach_vm_offset_t base_address, pmap_nesting_start; |
| 675 | mach_vm_size_t size, pmap_nesting_size; |
| 676 | |
| 677 | SHARED_REGION_TRACE_INFO( |
| 678 | ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n" , |
| 679 | (void *)VM_KERNEL_ADDRPERM(root_dir), |
| 680 | cputype, cpu_subtype, is_64bit)); |
| 681 | |
| 682 | base_address = 0; |
| 683 | size = 0; |
| 684 | mem_entry = NULL; |
| 685 | mem_entry_port = IPC_PORT_NULL; |
| 686 | sub_map = VM_MAP_NULL; |
| 687 | |
| 688 | /* create a new shared region structure... */ |
| 689 | shared_region = kalloc(sizeof (*shared_region)); |
| 690 | if (shared_region == NULL) { |
| 691 | SHARED_REGION_TRACE_ERROR( |
| 692 | ("shared_region: create: couldn't allocate\n" )); |
| 693 | goto done; |
| 694 | } |
| 695 | |
| 696 | /* figure out the correct settings for the desired environment */ |
| 697 | if (is_64bit) { |
| 698 | switch (cputype) { |
| 699 | #if defined(__arm64__) |
| 700 | case CPU_TYPE_ARM64: |
| 701 | base_address = SHARED_REGION_BASE_ARM64; |
| 702 | size = SHARED_REGION_SIZE_ARM64; |
| 703 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM64; |
| 704 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM64; |
| 705 | break; |
| 706 | #elif !defined(__arm__) |
| 707 | case CPU_TYPE_I386: |
| 708 | base_address = SHARED_REGION_BASE_X86_64; |
| 709 | size = SHARED_REGION_SIZE_X86_64; |
| 710 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_X86_64; |
| 711 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_X86_64; |
| 712 | break; |
| 713 | case CPU_TYPE_POWERPC: |
| 714 | base_address = SHARED_REGION_BASE_PPC64; |
| 715 | size = SHARED_REGION_SIZE_PPC64; |
| 716 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC64; |
| 717 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC64; |
| 718 | break; |
| 719 | #endif |
| 720 | default: |
| 721 | SHARED_REGION_TRACE_ERROR( |
| 722 | ("shared_region: create: unknown cpu type %d\n" , |
| 723 | cputype)); |
| 724 | kfree(shared_region, sizeof (*shared_region)); |
| 725 | shared_region = NULL; |
| 726 | goto done; |
| 727 | } |
| 728 | } else { |
| 729 | switch (cputype) { |
| 730 | #if defined(__arm__) || defined(__arm64__) |
| 731 | case CPU_TYPE_ARM: |
| 732 | case CPU_TYPE_ARM64: |
| 733 | base_address = SHARED_REGION_BASE_ARM; |
| 734 | size = SHARED_REGION_SIZE_ARM; |
| 735 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_ARM; |
| 736 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_ARM; |
| 737 | break; |
| 738 | #else |
| 739 | case CPU_TYPE_I386: |
| 740 | base_address = SHARED_REGION_BASE_I386; |
| 741 | size = SHARED_REGION_SIZE_I386; |
| 742 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_I386; |
| 743 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_I386; |
| 744 | break; |
| 745 | case CPU_TYPE_POWERPC: |
| 746 | base_address = SHARED_REGION_BASE_PPC; |
| 747 | size = SHARED_REGION_SIZE_PPC; |
| 748 | pmap_nesting_start = SHARED_REGION_NESTING_BASE_PPC; |
| 749 | pmap_nesting_size = SHARED_REGION_NESTING_SIZE_PPC; |
| 750 | break; |
| 751 | #endif |
| 752 | default: |
| 753 | SHARED_REGION_TRACE_ERROR( |
| 754 | ("shared_region: create: unknown cpu type %d\n" , |
| 755 | cputype)); |
| 756 | kfree(shared_region, sizeof (*shared_region)); |
| 757 | shared_region = NULL; |
| 758 | goto done; |
| 759 | } |
| 760 | } |
| 761 | |
| 762 | /* create a memory entry structure and a Mach port handle */ |
| 763 | kr = mach_memory_entry_allocate(&mem_entry, |
| 764 | &mem_entry_port); |
| 765 | if (kr != KERN_SUCCESS) { |
| 766 | kfree(shared_region, sizeof (*shared_region)); |
| 767 | shared_region = NULL; |
| 768 | SHARED_REGION_TRACE_ERROR( |
| 769 | ("shared_region: create: " |
| 770 | "couldn't allocate mem_entry\n" )); |
| 771 | goto done; |
| 772 | } |
| 773 | |
| 774 | #if defined(__arm__) || defined(__arm64__) |
| 775 | { |
| 776 | struct pmap *pmap_nested; |
| 777 | |
| 778 | pmap_nested = pmap_create(NULL, 0, is_64bit); |
| 779 | if (pmap_nested != PMAP_NULL) { |
| 780 | pmap_set_nested(pmap_nested); |
| 781 | sub_map = vm_map_create(pmap_nested, 0, size, TRUE); |
| 782 | #if defined(__arm64__) |
| 783 | if (is_64bit || |
| 784 | page_shift_user32 == SIXTEENK_PAGE_SHIFT) { |
| 785 | /* enforce 16KB alignment of VM map entries */ |
| 786 | vm_map_set_page_shift(sub_map, |
| 787 | SIXTEENK_PAGE_SHIFT); |
| 788 | } |
| 789 | #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS) |
| 790 | /* enforce 16KB alignment for watch targets with new ABI */ |
| 791 | vm_map_set_page_shift(sub_map, SIXTEENK_PAGE_SHIFT); |
| 792 | #endif /* __arm64__ */ |
| 793 | } else { |
| 794 | sub_map = VM_MAP_NULL; |
| 795 | } |
| 796 | } |
| 797 | #else |
| 798 | /* create a VM sub map and its pmap */ |
| 799 | sub_map = vm_map_create(pmap_create(NULL, 0, is_64bit), |
| 800 | 0, size, |
| 801 | TRUE); |
| 802 | #endif |
| 803 | if (sub_map == VM_MAP_NULL) { |
| 804 | ipc_port_release_send(mem_entry_port); |
| 805 | kfree(shared_region, sizeof (*shared_region)); |
| 806 | shared_region = NULL; |
| 807 | SHARED_REGION_TRACE_ERROR( |
| 808 | ("shared_region: create: " |
| 809 | "couldn't allocate map\n" )); |
| 810 | goto done; |
| 811 | } |
| 812 | |
| 813 | assert(!sub_map->disable_vmentry_reuse); |
| 814 | sub_map->is_nested_map = TRUE; |
| 815 | |
| 816 | /* make the memory entry point to the VM sub map */ |
| 817 | mem_entry->is_sub_map = TRUE; |
| 818 | mem_entry->backing.map = sub_map; |
| 819 | mem_entry->size = size; |
| 820 | mem_entry->protection = VM_PROT_ALL; |
| 821 | |
| 822 | /* make the shared region point at the memory entry */ |
| 823 | shared_region->sr_mem_entry = mem_entry_port; |
| 824 | |
| 825 | /* fill in the shared region's environment and settings */ |
| 826 | shared_region->sr_base_address = base_address; |
| 827 | shared_region->sr_size = size; |
| 828 | shared_region->sr_pmap_nesting_start = pmap_nesting_start; |
| 829 | shared_region->sr_pmap_nesting_size = pmap_nesting_size; |
| 830 | shared_region->sr_cpu_type = cputype; |
| 831 | shared_region->sr_cpu_subtype = cpu_subtype; |
| 832 | shared_region->sr_64bit = is_64bit; |
| 833 | shared_region->sr_root_dir = root_dir; |
| 834 | |
| 835 | queue_init(&shared_region->sr_q); |
| 836 | shared_region->sr_mapping_in_progress = FALSE; |
| 837 | shared_region->sr_slide_in_progress = FALSE; |
| 838 | shared_region->sr_persists = FALSE; |
| 839 | shared_region->sr_slid = FALSE; |
| 840 | shared_region->sr_timer_call = NULL; |
| 841 | shared_region->sr_first_mapping = (mach_vm_offset_t) -1; |
| 842 | |
| 843 | /* grab a reference for the caller */ |
| 844 | shared_region->sr_ref_count = 1; |
| 845 | |
| 846 | /* And set up slide info */ |
| 847 | si = &shared_region->sr_slide_info; |
| 848 | si->start = 0; |
| 849 | si->end = 0; |
| 850 | si->slide = 0; |
| 851 | si->slide_object = NULL; |
| 852 | si->slide_info_size = 0; |
| 853 | si->slide_info_entry = NULL; |
| 854 | |
| 855 | /* Initialize UUID and other metadata */ |
| 856 | memset(&shared_region->sr_uuid, '\0', sizeof(shared_region->sr_uuid)); |
| 857 | shared_region->sr_uuid_copied = FALSE; |
| 858 | shared_region->sr_images_count = 0; |
| 859 | shared_region->sr_images = NULL; |
| 860 | done: |
| 861 | if (shared_region) { |
| 862 | SHARED_REGION_TRACE_INFO( |
| 863 | ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d," |
| 864 | "base=0x%llx,size=0x%llx) <- " |
| 865 | "%p mem=(%p,%p) map=%p pmap=%p\n" , |
| 866 | (void *)VM_KERNEL_ADDRPERM(root_dir), |
| 867 | cputype, cpu_subtype, is_64bit, |
| 868 | (long long)base_address, |
| 869 | (long long)size, |
| 870 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 871 | (void *)VM_KERNEL_ADDRPERM(mem_entry_port), |
| 872 | (void *)VM_KERNEL_ADDRPERM(mem_entry), |
| 873 | (void *)VM_KERNEL_ADDRPERM(sub_map), |
| 874 | (void *)VM_KERNEL_ADDRPERM(sub_map->pmap))); |
| 875 | } else { |
| 876 | SHARED_REGION_TRACE_INFO( |
| 877 | ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d," |
| 878 | "base=0x%llx,size=0x%llx) <- NULL" , |
| 879 | (void *)VM_KERNEL_ADDRPERM(root_dir), |
| 880 | cputype, cpu_subtype, is_64bit, |
| 881 | (long long)base_address, |
| 882 | (long long)size)); |
| 883 | } |
| 884 | return shared_region; |
| 885 | } |
| 886 | |
| 887 | /* |
| 888 | * Destroy a now-unused shared region. |
| 889 | * The shared region is no longer in the queue and can not be looked up. |
| 890 | */ |
| 891 | static void |
| 892 | vm_shared_region_destroy( |
| 893 | vm_shared_region_t shared_region) |
| 894 | { |
| 895 | vm_named_entry_t mem_entry; |
| 896 | vm_map_t map; |
| 897 | |
| 898 | SHARED_REGION_TRACE_INFO( |
| 899 | ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n" , |
| 900 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 901 | (void *)VM_KERNEL_ADDRPERM(shared_region->sr_root_dir), |
| 902 | shared_region->sr_cpu_type, |
| 903 | shared_region->sr_cpu_subtype, |
| 904 | shared_region->sr_64bit)); |
| 905 | |
| 906 | assert(shared_region->sr_ref_count == 0); |
| 907 | assert(!shared_region->sr_persists); |
| 908 | assert(!shared_region->sr_slid); |
| 909 | |
| 910 | mem_entry = (vm_named_entry_t) shared_region->sr_mem_entry->ip_kobject; |
| 911 | assert(mem_entry->is_sub_map); |
| 912 | assert(!mem_entry->internal); |
| 913 | assert(!mem_entry->is_copy); |
| 914 | map = mem_entry->backing.map; |
| 915 | |
| 916 | /* |
| 917 | * Clean up the pmap first. The virtual addresses that were |
| 918 | * entered in this possibly "nested" pmap may have different values |
| 919 | * than the VM map's min and max offsets, if the VM sub map was |
| 920 | * mapped at a non-zero offset in the processes' main VM maps, which |
| 921 | * is usually the case, so the clean-up we do in vm_map_destroy() would |
| 922 | * not be enough. |
| 923 | */ |
| 924 | if (map->pmap) { |
| 925 | pmap_remove(map->pmap, |
| 926 | shared_region->sr_base_address, |
| 927 | (shared_region->sr_base_address + |
| 928 | shared_region->sr_size)); |
| 929 | } |
| 930 | |
| 931 | /* |
| 932 | * Release our (one and only) handle on the memory entry. |
| 933 | * This will generate a no-senders notification, which will be processed |
| 934 | * by ipc_kobject_notify(), which will release the one and only |
| 935 | * reference on the memory entry and cause it to be destroyed, along |
| 936 | * with the VM sub map and its pmap. |
| 937 | */ |
| 938 | mach_memory_entry_port_release(shared_region->sr_mem_entry); |
| 939 | mem_entry = NULL; |
| 940 | shared_region->sr_mem_entry = IPC_PORT_NULL; |
| 941 | |
| 942 | if (shared_region->sr_timer_call) { |
| 943 | thread_call_free(shared_region->sr_timer_call); |
| 944 | } |
| 945 | |
| 946 | #if 0 |
| 947 | /* |
| 948 | * If slid, free those resources. We'll want this eventually, |
| 949 | * but can't handle it properly today. |
| 950 | */ |
| 951 | si = &shared_region->sr_slide_info; |
| 952 | if (si->slide_info_entry) { |
| 953 | kmem_free(kernel_map, |
| 954 | (vm_offset_t) si->slide_info_entry, |
| 955 | (vm_size_t) si->slide_info_size); |
| 956 | vm_object_deallocate(si->slide_object); |
| 957 | } |
| 958 | #endif |
| 959 | |
| 960 | /* release the shared region structure... */ |
| 961 | kfree(shared_region, sizeof (*shared_region)); |
| 962 | |
| 963 | SHARED_REGION_TRACE_DEBUG( |
| 964 | ("shared_region: destroy(%p) <-\n" , |
| 965 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 966 | shared_region = NULL; |
| 967 | |
| 968 | } |
| 969 | |
| 970 | /* |
| 971 | * Gets the address of the first (in time) mapping in the shared region. |
| 972 | */ |
| 973 | kern_return_t |
| 974 | vm_shared_region_start_address( |
| 975 | vm_shared_region_t shared_region, |
| 976 | mach_vm_offset_t *start_address) |
| 977 | { |
| 978 | kern_return_t kr; |
| 979 | mach_vm_offset_t sr_base_address; |
| 980 | mach_vm_offset_t sr_first_mapping; |
| 981 | |
| 982 | SHARED_REGION_TRACE_DEBUG( |
| 983 | ("shared_region: -> start_address(%p)\n" , |
| 984 | (void *)VM_KERNEL_ADDRPERM(shared_region))); |
| 985 | assert(shared_region->sr_ref_count > 1); |
| 986 | |
| 987 | vm_shared_region_lock(); |
| 988 | |
| 989 | /* |
| 990 | * Wait if there's another thread establishing a mapping |
| 991 | * in this shared region right when we're looking at it. |
| 992 | * We want a consistent view of the map... |
| 993 | */ |
| 994 | while (shared_region->sr_mapping_in_progress) { |
| 995 | /* wait for our turn... */ |
| 996 | assert(shared_region->sr_ref_count > 1); |
| 997 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, |
| 998 | THREAD_UNINT); |
| 999 | } |
| 1000 | assert(! shared_region->sr_mapping_in_progress); |
| 1001 | assert(shared_region->sr_ref_count > 1); |
| 1002 | |
| 1003 | sr_base_address = shared_region->sr_base_address; |
| 1004 | sr_first_mapping = shared_region->sr_first_mapping; |
| 1005 | |
| 1006 | if (sr_first_mapping == (mach_vm_offset_t) -1) { |
| 1007 | /* shared region is empty */ |
| 1008 | kr = KERN_INVALID_ADDRESS; |
| 1009 | } else { |
| 1010 | kr = KERN_SUCCESS; |
| 1011 | *start_address = sr_base_address + sr_first_mapping; |
| 1012 | } |
| 1013 | |
| 1014 | vm_shared_region_unlock(); |
| 1015 | |
| 1016 | SHARED_REGION_TRACE_DEBUG( |
| 1017 | ("shared_region: start_address(%p) <- 0x%llx\n" , |
| 1018 | (void *)VM_KERNEL_ADDRPERM(shared_region), |
| 1019 | (long long)shared_region->sr_base_address)); |
| 1020 | |
| 1021 | return kr; |
| 1022 | } |
| 1023 | |
| 1024 | void |
| 1025 | vm_shared_region_undo_mappings( |
| 1026 | vm_map_t sr_map, |
| 1027 | mach_vm_offset_t sr_base_address, |
| 1028 | struct shared_file_mapping_np *mappings, |
| 1029 | unsigned int mappings_count) |
| 1030 | { |
| 1031 | unsigned int j = 0; |
| 1032 | vm_shared_region_t shared_region = NULL; |
| 1033 | boolean_t reset_shared_region_state = FALSE; |
| 1034 | |
| 1035 | shared_region = vm_shared_region_get(current_task()); |
| 1036 | if (shared_region == NULL) { |
| 1037 | printf("Failed to undo mappings because of NULL shared region.\n" ); |
| 1038 | return; |
| 1039 | } |
| 1040 | |
| 1041 | |
| 1042 | if (sr_map == NULL) { |
| 1043 | ipc_port_t sr_handle; |
| 1044 | vm_named_entry_t sr_mem_entry; |
| 1045 | |
| 1046 | vm_shared_region_lock(); |
| 1047 | assert(shared_region->sr_ref_count > 1); |
| 1048 | |
| 1049 | while (shared_region->sr_mapping_in_progress) { |
| 1050 | /* wait for our turn... */ |
| 1051 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, |
| 1052 | THREAD_UNINT); |
| 1053 | } |
| 1054 | assert(! shared_region->sr_mapping_in_progress); |
| 1055 | assert(shared_region->sr_ref_count > 1); |
| 1056 | /* let others know we're working in this shared region */ |
| 1057 | shared_region->sr_mapping_in_progress = TRUE; |
| 1058 | |
| 1059 | vm_shared_region_unlock(); |
| 1060 | |
| 1061 | reset_shared_region_state = TRUE; |
| 1062 | |
| 1063 | /* no need to lock because this data is never modified... */ |
| 1064 | sr_handle = shared_region->sr_mem_entry; |
| 1065 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; |
| 1066 | sr_map = sr_mem_entry->backing.map; |
| 1067 | sr_base_address = shared_region->sr_base_address; |
| 1068 | } |
| 1069 | /* |
| 1070 | * Undo the mappings we've established so far. |
| 1071 | */ |
| 1072 | for (j = 0; j < mappings_count; j++) { |
| 1073 | kern_return_t kr2; |
| 1074 | |
| 1075 | if (mappings[j].sfm_size == 0) { |
| 1076 | /* |
| 1077 | * We didn't establish this |
| 1078 | * mapping, so nothing to undo. |
| 1079 | */ |
| 1080 | continue; |
| 1081 | } |
| 1082 | SHARED_REGION_TRACE_INFO( |
| 1083 | ("shared_region: mapping[%d]: " |
| 1084 | "address:0x%016llx " |
| 1085 | "size:0x%016llx " |
| 1086 | "offset:0x%016llx " |
| 1087 | "maxprot:0x%x prot:0x%x: " |
| 1088 | "undoing...\n" , |
| 1089 | j, |
| 1090 | (long long)mappings[j].sfm_address, |
| 1091 | (long long)mappings[j].sfm_size, |
| 1092 | (long long)mappings[j].sfm_file_offset, |
| 1093 | mappings[j].sfm_max_prot, |
| 1094 | mappings[j].sfm_init_prot)); |
| 1095 | kr2 = mach_vm_deallocate( |
| 1096 | sr_map, |
| 1097 | (mappings[j].sfm_address - |
| 1098 | sr_base_address), |
| 1099 | mappings[j].sfm_size); |
| 1100 | assert(kr2 == KERN_SUCCESS); |
| 1101 | } |
| 1102 | |
| 1103 | if (reset_shared_region_state) { |
| 1104 | vm_shared_region_lock(); |
| 1105 | assert(shared_region->sr_ref_count > 1); |
| 1106 | assert(shared_region->sr_mapping_in_progress); |
| 1107 | /* we're done working on that shared region */ |
| 1108 | shared_region->sr_mapping_in_progress = FALSE; |
| 1109 | thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); |
| 1110 | vm_shared_region_unlock(); |
| 1111 | reset_shared_region_state = FALSE; |
| 1112 | } |
| 1113 | |
| 1114 | vm_shared_region_deallocate(shared_region); |
| 1115 | } |
| 1116 | |
| 1117 | /* |
| 1118 | * Establish some mappings of a file in the shared region. |
| 1119 | * This is used by "dyld" via the shared_region_map_np() system call |
| 1120 | * to populate the shared region with the appropriate shared cache. |
| 1121 | * |
| 1122 | * One could also call it several times to incrementally load several |
| 1123 | * libraries, as long as they do not overlap. |
| 1124 | * It will return KERN_SUCCESS if the mappings were successfully established |
| 1125 | * or if they were already established identically by another process. |
| 1126 | */ |
| 1127 | kern_return_t |
| 1128 | vm_shared_region_map_file( |
| 1129 | vm_shared_region_t shared_region, |
| 1130 | unsigned int mappings_count, |
| 1131 | struct shared_file_mapping_np *mappings, |
| 1132 | memory_object_control_t file_control, |
| 1133 | memory_object_size_t file_size, |
| 1134 | void *root_dir, |
| 1135 | uint32_t slide, |
| 1136 | user_addr_t slide_start, |
| 1137 | user_addr_t slide_size) |
| 1138 | { |
| 1139 | kern_return_t kr; |
| 1140 | vm_object_t file_object; |
| 1141 | ipc_port_t sr_handle; |
| 1142 | vm_named_entry_t sr_mem_entry; |
| 1143 | vm_map_t sr_map; |
| 1144 | mach_vm_offset_t sr_base_address; |
| 1145 | unsigned int i; |
| 1146 | mach_port_t map_port; |
| 1147 | vm_map_offset_t target_address; |
| 1148 | vm_object_t object; |
| 1149 | vm_object_size_t obj_size; |
| 1150 | struct shared_file_mapping_np *mapping_to_slide = NULL; |
| 1151 | mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1; |
| 1152 | mach_vm_offset_t slid_mapping = (mach_vm_offset_t) -1; |
| 1153 | vm_map_offset_t lowest_unnestable_addr = 0; |
| 1154 | vm_map_kernel_flags_t vmk_flags; |
| 1155 | mach_vm_offset_t sfm_min_address = ~0; |
| 1156 | mach_vm_offset_t sfm_max_address = 0; |
| 1157 | struct _dyld_cache_header ; |
| 1158 | |
| 1159 | #if __arm64__ |
| 1160 | if ((shared_region->sr_64bit || |
| 1161 | page_shift_user32 == SIXTEENK_PAGE_SHIFT) && |
| 1162 | ((slide & SIXTEENK_PAGE_MASK) != 0)) { |
| 1163 | printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n" , |
| 1164 | __FUNCTION__, slide); |
| 1165 | kr = KERN_INVALID_ARGUMENT; |
| 1166 | goto done; |
| 1167 | } |
| 1168 | #endif /* __arm64__ */ |
| 1169 | |
| 1170 | kr = KERN_SUCCESS; |
| 1171 | |
| 1172 | vm_shared_region_lock(); |
| 1173 | assert(shared_region->sr_ref_count > 1); |
| 1174 | |
| 1175 | if (shared_region->sr_root_dir != root_dir) { |
| 1176 | /* |
| 1177 | * This shared region doesn't match the current root |
| 1178 | * directory of this process. Deny the mapping to |
| 1179 | * avoid tainting the shared region with something that |
| 1180 | * doesn't quite belong into it. |
| 1181 | */ |
| 1182 | vm_shared_region_unlock(); |
| 1183 | kr = KERN_PROTECTION_FAILURE; |
| 1184 | goto done; |
| 1185 | } |
| 1186 | |
| 1187 | /* |
| 1188 | * Make sure we handle only one mapping at a time in a given |
| 1189 | * shared region, to avoid race conditions. This should not |
| 1190 | * happen frequently... |
| 1191 | */ |
| 1192 | while (shared_region->sr_mapping_in_progress) { |
| 1193 | /* wait for our turn... */ |
| 1194 | vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, |
| 1195 | THREAD_UNINT); |
| 1196 | } |
| 1197 | assert(! shared_region->sr_mapping_in_progress); |
| 1198 | assert(shared_region->sr_ref_count > 1); |
| 1199 | /* let others know we're working in this shared region */ |
| 1200 | shared_region->sr_mapping_in_progress = TRUE; |
| 1201 | |
| 1202 | vm_shared_region_unlock(); |
| 1203 | |
| 1204 | /* no need to lock because this data is never modified... */ |
| 1205 | sr_handle = shared_region->sr_mem_entry; |
| 1206 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; |
| 1207 | sr_map = sr_mem_entry->backing.map; |
| 1208 | sr_base_address = shared_region->sr_base_address; |
| 1209 | |
| 1210 | SHARED_REGION_TRACE_DEBUG( |
| 1211 | ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n" , |
| 1212 | (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, |
| 1213 | (void *)VM_KERNEL_ADDRPERM(mappings), |
| 1214 | (void *)VM_KERNEL_ADDRPERM(file_control), file_size)); |
| 1215 | |
| 1216 | /* get the VM object associated with the file to be mapped */ |
| 1217 | file_object = memory_object_control_to_vm_object(file_control); |
| 1218 | |
| 1219 | assert(file_object); |
| 1220 | |
| 1221 | /* establish the mappings */ |
| 1222 | for (i = 0; i < mappings_count; i++) { |
| 1223 | SHARED_REGION_TRACE_INFO( |
| 1224 | ("shared_region: mapping[%d]: " |
| 1225 | "address:0x%016llx size:0x%016llx offset:0x%016llx " |
| 1226 | "maxprot:0x%x prot:0x%x\n" , |
| 1227 | i, |
| 1228 | (long long)mappings[i].sfm_address, |
| 1229 | (long long)mappings[i].sfm_size, |
| 1230 | (long long)mappings[i].sfm_file_offset, |
| 1231 | mappings[i].sfm_max_prot, |
| 1232 | mappings[i].sfm_init_prot)); |
| 1233 | |
| 1234 | if (mappings[i].sfm_address < sfm_min_address) { |
| 1235 | sfm_min_address = mappings[i].sfm_address; |
| 1236 | } |
| 1237 | |
| 1238 | if ((mappings[i].sfm_address + mappings[i].sfm_size) > sfm_max_address) { |
| 1239 | sfm_max_address = mappings[i].sfm_address + mappings[i].sfm_size; |
| 1240 | } |
| 1241 | |
| 1242 | if (mappings[i].sfm_init_prot & VM_PROT_ZF) { |
| 1243 | /* zero-filled memory */ |
| 1244 | map_port = MACH_PORT_NULL; |
| 1245 | } else { |
| 1246 | /* file-backed memory */ |
| 1247 | __IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager); |
| 1248 | } |
| 1249 | |
| 1250 | if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) { |
| 1251 | /* |
| 1252 | * This is the mapping that needs to be slid. |
| 1253 | */ |
| 1254 | if (mapping_to_slide != NULL) { |
| 1255 | SHARED_REGION_TRACE_INFO( |
| 1256 | ("shared_region: mapping[%d]: " |
| 1257 | "address:0x%016llx size:0x%016llx " |
| 1258 | "offset:0x%016llx " |
| 1259 | "maxprot:0x%x prot:0x%x " |
| 1260 | "will not be slid as only one such mapping is allowed...\n" , |
| 1261 | i, |
| 1262 | (long long)mappings[i].sfm_address, |
| 1263 | (long long)mappings[i].sfm_size, |
| 1264 | (long long)mappings[i].sfm_file_offset, |
| 1265 | mappings[i].sfm_max_prot, |
| 1266 | mappings[i].sfm_init_prot)); |
| 1267 | } else { |
| 1268 | mapping_to_slide = &mappings[i]; |
| 1269 | } |
| 1270 | } |
| 1271 | |
| 1272 | /* mapping's address is relative to the shared region base */ |
| 1273 | target_address = |
| 1274 | mappings[i].sfm_address - sr_base_address; |
| 1275 | |
| 1276 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
| 1277 | vmk_flags.vmkf_already = TRUE; |
| 1278 | |
| 1279 | /* establish that mapping, OK if it's "already" there */ |
| 1280 | if (map_port == MACH_PORT_NULL) { |
| 1281 | /* |
| 1282 | * We want to map some anonymous memory in a |
| 1283 | * shared region. |
| 1284 | * We have to create the VM object now, so that it |
| 1285 | * can be mapped "copy-on-write". |
| 1286 | */ |
| 1287 | obj_size = vm_map_round_page(mappings[i].sfm_size, |
| 1288 | VM_MAP_PAGE_MASK(sr_map)); |
| 1289 | object = vm_object_allocate(obj_size); |
| 1290 | if (object == VM_OBJECT_NULL) { |
| 1291 | kr = KERN_RESOURCE_SHORTAGE; |
| 1292 | } else { |
| 1293 | kr = vm_map_enter( |
| 1294 | sr_map, |
| 1295 | &target_address, |
| 1296 | vm_map_round_page(mappings[i].sfm_size, |
| 1297 | VM_MAP_PAGE_MASK(sr_map)), |
| 1298 | 0, |
| 1299 | VM_FLAGS_FIXED, |
| 1300 | vmk_flags, |
| 1301 | VM_KERN_MEMORY_NONE, |
| 1302 | object, |
| 1303 | 0, |
| 1304 | TRUE, |
| 1305 | mappings[i].sfm_init_prot & VM_PROT_ALL, |
| 1306 | mappings[i].sfm_max_prot & VM_PROT_ALL, |
| 1307 | VM_INHERIT_DEFAULT); |
| 1308 | } |
| 1309 | } else { |
| 1310 | object = VM_OBJECT_NULL; /* no anonymous memory here */ |
| 1311 | kr = vm_map_enter_mem_object( |
| 1312 | sr_map, |
| 1313 | &target_address, |
| 1314 | vm_map_round_page(mappings[i].sfm_size, |
| 1315 | VM_MAP_PAGE_MASK(sr_map)), |
| 1316 | 0, |
| 1317 | VM_FLAGS_FIXED, |
| 1318 | vmk_flags, |
| 1319 | VM_KERN_MEMORY_NONE, |
| 1320 | map_port, |
| 1321 | mappings[i].sfm_file_offset, |
| 1322 | TRUE, |
| 1323 | mappings[i].sfm_init_prot & VM_PROT_ALL, |
| 1324 | mappings[i].sfm_max_prot & VM_PROT_ALL, |
| 1325 | VM_INHERIT_DEFAULT); |
| 1326 | |
| 1327 | } |
| 1328 | |
| 1329 | if (kr == KERN_SUCCESS) { |
| 1330 | /* |
| 1331 | * Record the first (chronologically) successful |
| 1332 | * mapping in this shared region. |
| 1333 | * We're protected by "sr_mapping_in_progress" here, |
| 1334 | * so no need to lock "shared_region". |
| 1335 | */ |
| 1336 | if (first_mapping == (mach_vm_offset_t) -1) { |
| 1337 | first_mapping = target_address; |
| 1338 | } |
| 1339 | |
| 1340 | if ((slid_mapping == (mach_vm_offset_t) -1) && |
| 1341 | (mapping_to_slide == &mappings[i])) { |
| 1342 | slid_mapping = target_address; |
| 1343 | } |
| 1344 | |
| 1345 | /* |
| 1346 | * Record the lowest writable address in this |
| 1347 | * sub map, to log any unexpected unnesting below |
| 1348 | * that address (see log_unnest_badness()). |
| 1349 | */ |
| 1350 | if ((mappings[i].sfm_init_prot & VM_PROT_WRITE) && |
| 1351 | sr_map->is_nested_map && |
| 1352 | (lowest_unnestable_addr == 0 || |
| 1353 | (target_address < lowest_unnestable_addr))) { |
| 1354 | lowest_unnestable_addr = target_address; |
| 1355 | } |
| 1356 | } else { |
| 1357 | if (map_port == MACH_PORT_NULL) { |
| 1358 | /* |
| 1359 | * Get rid of the VM object we just created |
| 1360 | * but failed to map. |
| 1361 | */ |
| 1362 | vm_object_deallocate(object); |
| 1363 | object = VM_OBJECT_NULL; |
| 1364 | } |
| 1365 | if (kr == KERN_MEMORY_PRESENT) { |
| 1366 | /* |
| 1367 | * This exact mapping was already there: |
| 1368 | * that's fine. |
| 1369 | */ |
| 1370 | SHARED_REGION_TRACE_INFO( |
| 1371 | ("shared_region: mapping[%d]: " |
| 1372 | "address:0x%016llx size:0x%016llx " |
| 1373 | "offset:0x%016llx " |
| 1374 | "maxprot:0x%x prot:0x%x " |
| 1375 | "already mapped...\n" , |
| 1376 | i, |
| 1377 | (long long)mappings[i].sfm_address, |
| 1378 | (long long)mappings[i].sfm_size, |
| 1379 | (long long)mappings[i].sfm_file_offset, |
| 1380 | mappings[i].sfm_max_prot, |
| 1381 | mappings[i].sfm_init_prot)); |
| 1382 | /* |
| 1383 | * We didn't establish this mapping ourselves; |
| 1384 | * let's reset its size, so that we do not |
| 1385 | * attempt to undo it if an error occurs later. |
| 1386 | */ |
| 1387 | mappings[i].sfm_size = 0; |
| 1388 | kr = KERN_SUCCESS; |
| 1389 | } else { |
| 1390 | /* this mapping failed ! */ |
| 1391 | SHARED_REGION_TRACE_ERROR( |
| 1392 | ("shared_region: mapping[%d]: " |
| 1393 | "address:0x%016llx size:0x%016llx " |
| 1394 | "offset:0x%016llx " |
| 1395 | "maxprot:0x%x prot:0x%x failed 0x%x\n" , |
| 1396 | i, |
| 1397 | (long long)mappings[i].sfm_address, |
| 1398 | (long long)mappings[i].sfm_size, |
| 1399 | (long long)mappings[i].sfm_file_offset, |
| 1400 | mappings[i].sfm_max_prot, |
| 1401 | mappings[i].sfm_init_prot, |
| 1402 | kr)); |
| 1403 | |
| 1404 | vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i); |
| 1405 | break; |
| 1406 | } |
| 1407 | |
| 1408 | } |
| 1409 | |
| 1410 | } |
| 1411 | |
| 1412 | if (kr == KERN_SUCCESS && |
| 1413 | slide_size != 0 && |
| 1414 | mapping_to_slide != NULL) { |
| 1415 | kr = vm_shared_region_slide(slide, |
| 1416 | mapping_to_slide->sfm_file_offset, |
| 1417 | mapping_to_slide->sfm_size, |
| 1418 | slide_start, |
| 1419 | slide_size, |
| 1420 | slid_mapping, |
| 1421 | file_control); |
| 1422 | if (kr != KERN_SUCCESS) { |
| 1423 | SHARED_REGION_TRACE_ERROR( |
| 1424 | ("shared_region: region_slide(" |
| 1425 | "slide:0x%x start:0x%016llx " |
| 1426 | "size:0x%016llx) failed 0x%x\n" , |
| 1427 | slide, |
| 1428 | (long long)slide_start, |
| 1429 | (long long)slide_size, |
| 1430 | kr)); |
| 1431 | vm_shared_region_undo_mappings(sr_map, |
| 1432 | sr_base_address, |
| 1433 | mappings, |
| 1434 | mappings_count); |
| 1435 | } |
| 1436 | } |
| 1437 | |
| 1438 | if (kr == KERN_SUCCESS) { |
| 1439 | /* adjust the map's "lowest_unnestable_start" */ |
| 1440 | lowest_unnestable_addr &= ~(pmap_nesting_size_min-1); |
| 1441 | if (lowest_unnestable_addr != |
| 1442 | sr_map->lowest_unnestable_start) { |
| 1443 | vm_map_lock(sr_map); |
| 1444 | sr_map->lowest_unnestable_start = |
| 1445 | lowest_unnestable_addr; |
| 1446 | vm_map_unlock(sr_map); |
| 1447 | } |
| 1448 | } |
| 1449 | |
| 1450 | vm_shared_region_lock(); |
| 1451 | assert(shared_region->sr_ref_count > 1); |
| 1452 | assert(shared_region->sr_mapping_in_progress); |
| 1453 | |
| 1454 | /* set "sr_first_mapping"; dyld uses it to validate the shared cache */ |
| 1455 | if (kr == KERN_SUCCESS && |
| 1456 | shared_region->sr_first_mapping == (mach_vm_offset_t) -1) { |
| 1457 | shared_region->sr_first_mapping = first_mapping; |
| 1458 | } |
| 1459 | |
| 1460 | /* |
| 1461 | * copy in the shared region UUID to the shared region structure. |
| 1462 | * we do this indirectly by first copying in the shared cache header |
| 1463 | * and then copying the UUID from there because we'll need to look |
| 1464 | * at other content from the shared cache header. |
| 1465 | */ |
| 1466 | if (kr == KERN_SUCCESS && !shared_region->sr_uuid_copied) { |
| 1467 | int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping), |
| 1468 | (char *)&sr_cache_header, |
| 1469 | sizeof(sr_cache_header)); |
| 1470 | if (error == 0) { |
| 1471 | memcpy(&shared_region->sr_uuid, &sr_cache_header.uuid, sizeof(shared_region->sr_uuid)); |
| 1472 | shared_region->sr_uuid_copied = TRUE; |
| 1473 | } else { |
| 1474 | #if DEVELOPMENT || DEBUG |
| 1475 | panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx " |
| 1476 | "offset:0 size:0x%016llx) failed with %d\n" , |
| 1477 | (long long)shared_region->sr_base_address, |
| 1478 | (long long)shared_region->sr_first_mapping, |
| 1479 | (long long)sizeof(sr_cache_header), |
| 1480 | error); |
| 1481 | #endif /* DEVELOPMENT || DEBUG */ |
| 1482 | shared_region->sr_uuid_copied = FALSE; |
| 1483 | } |
| 1484 | } |
| 1485 | |
| 1486 | /* |
| 1487 | * If the shared cache is associated with the init task (and is therefore the system shared cache), |
| 1488 | * check whether it is a custom built shared cache and copy in the shared cache layout accordingly. |
| 1489 | */ |
| 1490 | boolean_t is_init_task = (task_pid(current_task()) == 1); |
| 1491 | if (shared_region->sr_uuid_copied && is_init_task) { |
| 1492 | /* Copy in the shared cache layout if we're running with a locally built shared cache */ |
| 1493 | if (sr_cache_header.locallyBuiltCache) { |
| 1494 | KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_START); |
| 1495 | size_t image_array_length = (sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info)); |
| 1496 | struct _dyld_cache_image_text_info *sr_image_layout = kalloc(image_array_length); |
| 1497 | int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping + |
| 1498 | sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length); |
| 1499 | if (error == 0) { |
| 1500 | shared_region->sr_images = kalloc(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64)); |
| 1501 | for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) { |
| 1502 | memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid, |
| 1503 | sizeof(shared_region->sr_images[index].imageUUID)); |
| 1504 | shared_region->sr_images[index].imageLoadAddress = sr_image_layout[index].loadAddress; |
| 1505 | } |
| 1506 | |
| 1507 | assert(sr_cache_header.imagesTextCount < UINT32_MAX); |
| 1508 | shared_region->sr_images_count = (uint32_t) sr_cache_header.imagesTextCount; |
| 1509 | } else { |
| 1510 | #if DEVELOPMENT || DEBUG |
| 1511 | panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx " |
| 1512 | "offset:0x%016llx size:0x%016llx) failed with %d\n" , |
| 1513 | (long long)shared_region->sr_base_address, |
| 1514 | (long long)shared_region->sr_first_mapping, |
| 1515 | (long long)sr_cache_header.imagesTextOffset, |
| 1516 | (long long)image_array_length, |
| 1517 | error); |
| 1518 | #endif /* DEVELOPMENT || DEBUG */ |
| 1519 | } |
| 1520 | KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count); |
| 1521 | kfree(sr_image_layout, image_array_length); |
| 1522 | sr_image_layout = NULL; |
| 1523 | } |
| 1524 | init_task_shared_region = shared_region; |
| 1525 | } |
| 1526 | |
| 1527 | if (kr == KERN_SUCCESS) { |
| 1528 | /* |
| 1529 | * If we succeeded, we know the bounds of the shared region. |
| 1530 | * Trim our pmaps to only cover this range (if applicable to |
| 1531 | * this platform). |
| 1532 | */ |
| 1533 | pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_min_address, sfm_max_address - sfm_min_address); |
| 1534 | } |
| 1535 | |
| 1536 | /* we're done working on that shared region */ |
| 1537 | shared_region->sr_mapping_in_progress = FALSE; |
| 1538 | thread_wakeup((event_t) &shared_region->sr_mapping_in_progress); |
| 1539 | vm_shared_region_unlock(); |
| 1540 | |
| 1541 | done: |
| 1542 | SHARED_REGION_TRACE_DEBUG( |
| 1543 | ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n" , |
| 1544 | (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, |
| 1545 | (void *)VM_KERNEL_ADDRPERM(mappings), |
| 1546 | (void *)VM_KERNEL_ADDRPERM(file_control), file_size, kr)); |
| 1547 | return kr; |
| 1548 | } |
| 1549 | |
| 1550 | /* |
| 1551 | * Retrieve a task's shared region and grab an extra reference to |
| 1552 | * make sure it doesn't disappear while the caller is using it. |
| 1553 | * The caller is responsible for consuming that extra reference if |
| 1554 | * necessary. |
| 1555 | * |
| 1556 | * This also tries to trim the pmap for the shared region. |
| 1557 | */ |
| 1558 | vm_shared_region_t |
| 1559 | vm_shared_region_trim_and_get(task_t task) |
| 1560 | { |
| 1561 | vm_shared_region_t shared_region; |
| 1562 | ipc_port_t sr_handle; |
| 1563 | vm_named_entry_t sr_mem_entry; |
| 1564 | vm_map_t sr_map; |
| 1565 | |
| 1566 | /* Get the shared region and the map. */ |
| 1567 | shared_region = vm_shared_region_get(task); |
| 1568 | if (shared_region == NULL) { |
| 1569 | return NULL; |
| 1570 | } |
| 1571 | |
| 1572 | sr_handle = shared_region->sr_mem_entry; |
| 1573 | sr_mem_entry = (vm_named_entry_t) sr_handle->ip_kobject; |
| 1574 | sr_map = sr_mem_entry->backing.map; |
| 1575 | |
| 1576 | /* Trim the pmap if possible. */ |
| 1577 | pmap_trim(task->map->pmap, sr_map->pmap, 0, 0, 0); |
| 1578 | |
| 1579 | return shared_region; |
| 1580 | } |
| 1581 | |
| 1582 | /* |
| 1583 | * Enter the appropriate shared region into "map" for "task". |
| 1584 | * This involves looking up the shared region (and possibly creating a new |
| 1585 | * one) for the desired environment, then mapping the VM sub map into the |
| 1586 | * task's VM "map", with the appropriate level of pmap-nesting. |
| 1587 | */ |
| 1588 | kern_return_t |
| 1589 | vm_shared_region_enter( |
| 1590 | struct _vm_map *map, |
| 1591 | struct task *task, |
| 1592 | boolean_t is_64bit, |
| 1593 | void *fsroot, |
| 1594 | cpu_type_t cpu, |
| 1595 | cpu_subtype_t cpu_subtype) |
| 1596 | { |
| 1597 | kern_return_t kr; |
| 1598 | vm_shared_region_t shared_region; |
| 1599 | vm_map_offset_t sr_address, sr_offset, target_address; |
| 1600 | vm_map_size_t sr_size, mapping_size; |
| 1601 | vm_map_offset_t sr_pmap_nesting_start; |
| 1602 | vm_map_size_t sr_pmap_nesting_size; |
| 1603 | ipc_port_t sr_handle; |
| 1604 | vm_prot_t cur_prot, max_prot; |
| 1605 | |
| 1606 | SHARED_REGION_TRACE_DEBUG( |
| 1607 | ("shared_region: -> " |
| 1608 | "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n" , |
| 1609 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1610 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1611 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1612 | cpu, cpu_subtype, is_64bit)); |
| 1613 | |
| 1614 | /* lookup (create if needed) the shared region for this environment */ |
| 1615 | shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit); |
| 1616 | if (shared_region == NULL) { |
| 1617 | /* this should not happen ! */ |
| 1618 | SHARED_REGION_TRACE_ERROR( |
| 1619 | ("shared_region: -> " |
| 1620 | "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): " |
| 1621 | "lookup failed !\n" , |
| 1622 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1623 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1624 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1625 | cpu, cpu_subtype, is_64bit)); |
| 1626 | //panic("shared_region_enter: lookup failed\n"); |
| 1627 | return KERN_FAILURE; |
| 1628 | } |
| 1629 | |
| 1630 | kr = KERN_SUCCESS; |
| 1631 | /* no need to lock since this data is never modified */ |
| 1632 | sr_address = shared_region->sr_base_address; |
| 1633 | sr_size = shared_region->sr_size; |
| 1634 | sr_handle = shared_region->sr_mem_entry; |
| 1635 | sr_pmap_nesting_start = shared_region->sr_pmap_nesting_start; |
| 1636 | sr_pmap_nesting_size = shared_region->sr_pmap_nesting_size; |
| 1637 | |
| 1638 | cur_prot = VM_PROT_READ; |
| 1639 | #if __x86_64__ |
| 1640 | /* |
| 1641 | * XXX BINARY COMPATIBILITY |
| 1642 | * java6 apparently needs to modify some code in the |
| 1643 | * dyld shared cache and needs to be allowed to add |
| 1644 | * write access... |
| 1645 | */ |
| 1646 | max_prot = VM_PROT_ALL; |
| 1647 | #else /* __x86_64__ */ |
| 1648 | max_prot = VM_PROT_READ; |
| 1649 | #endif /* __x86_64__ */ |
| 1650 | /* |
| 1651 | * Start mapping the shared region's VM sub map into the task's VM map. |
| 1652 | */ |
| 1653 | sr_offset = 0; |
| 1654 | |
| 1655 | if (sr_pmap_nesting_start > sr_address) { |
| 1656 | /* we need to map a range without pmap-nesting first */ |
| 1657 | target_address = sr_address; |
| 1658 | mapping_size = sr_pmap_nesting_start - sr_address; |
| 1659 | kr = vm_map_enter_mem_object( |
| 1660 | map, |
| 1661 | &target_address, |
| 1662 | mapping_size, |
| 1663 | 0, |
| 1664 | VM_FLAGS_FIXED, |
| 1665 | VM_MAP_KERNEL_FLAGS_NONE, |
| 1666 | VM_KERN_MEMORY_NONE, |
| 1667 | sr_handle, |
| 1668 | sr_offset, |
| 1669 | TRUE, |
| 1670 | cur_prot, |
| 1671 | max_prot, |
| 1672 | VM_INHERIT_SHARE); |
| 1673 | if (kr != KERN_SUCCESS) { |
| 1674 | SHARED_REGION_TRACE_ERROR( |
| 1675 | ("shared_region: enter(%p,%p,%p,%d,%d,%d): " |
| 1676 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n" , |
| 1677 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1678 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1679 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1680 | cpu, cpu_subtype, is_64bit, |
| 1681 | (long long)target_address, |
| 1682 | (long long)mapping_size, |
| 1683 | (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); |
| 1684 | goto done; |
| 1685 | } |
| 1686 | SHARED_REGION_TRACE_DEBUG( |
| 1687 | ("shared_region: enter(%p,%p,%p,%d,%d,%d): " |
| 1688 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n" , |
| 1689 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1690 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1691 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1692 | cpu, cpu_subtype, is_64bit, |
| 1693 | (long long)target_address, (long long)mapping_size, |
| 1694 | (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); |
| 1695 | sr_offset += mapping_size; |
| 1696 | sr_size -= mapping_size; |
| 1697 | } |
| 1698 | /* |
| 1699 | * We may need to map several pmap-nested portions, due to platform |
| 1700 | * specific restrictions on pmap nesting. |
| 1701 | * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias... |
| 1702 | */ |
| 1703 | for (; |
| 1704 | sr_pmap_nesting_size > 0; |
| 1705 | sr_offset += mapping_size, |
| 1706 | sr_size -= mapping_size, |
| 1707 | sr_pmap_nesting_size -= mapping_size) { |
| 1708 | target_address = sr_address + sr_offset; |
| 1709 | mapping_size = sr_pmap_nesting_size; |
| 1710 | if (mapping_size > pmap_nesting_size_max) { |
| 1711 | mapping_size = (vm_map_offset_t) pmap_nesting_size_max; |
| 1712 | } |
| 1713 | kr = vm_map_enter_mem_object( |
| 1714 | map, |
| 1715 | &target_address, |
| 1716 | mapping_size, |
| 1717 | 0, |
| 1718 | VM_FLAGS_FIXED, |
| 1719 | VM_MAP_KERNEL_FLAGS_NONE, |
| 1720 | VM_MEMORY_SHARED_PMAP, |
| 1721 | sr_handle, |
| 1722 | sr_offset, |
| 1723 | TRUE, |
| 1724 | cur_prot, |
| 1725 | max_prot, |
| 1726 | VM_INHERIT_SHARE); |
| 1727 | if (kr != KERN_SUCCESS) { |
| 1728 | SHARED_REGION_TRACE_ERROR( |
| 1729 | ("shared_region: enter(%p,%p,%p,%d,%d,%d): " |
| 1730 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n" , |
| 1731 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1732 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1733 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1734 | cpu, cpu_subtype, is_64bit, |
| 1735 | (long long)target_address, |
| 1736 | (long long)mapping_size, |
| 1737 | (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); |
| 1738 | goto done; |
| 1739 | } |
| 1740 | SHARED_REGION_TRACE_DEBUG( |
| 1741 | ("shared_region: enter(%p,%p,%p,%d,%d,%d): " |
| 1742 | "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n" , |
| 1743 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1744 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1745 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1746 | cpu, cpu_subtype, is_64bit, |
| 1747 | (long long)target_address, (long long)mapping_size, |
| 1748 | (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); |
| 1749 | } |
| 1750 | if (sr_size > 0) { |
| 1751 | /* and there's some left to be mapped without pmap-nesting */ |
| 1752 | target_address = sr_address + sr_offset; |
| 1753 | mapping_size = sr_size; |
| 1754 | kr = vm_map_enter_mem_object( |
| 1755 | map, |
| 1756 | &target_address, |
| 1757 | mapping_size, |
| 1758 | 0, |
| 1759 | VM_FLAGS_FIXED, |
| 1760 | VM_MAP_KERNEL_FLAGS_NONE, |
| 1761 | VM_KERN_MEMORY_NONE, |
| 1762 | sr_handle, |
| 1763 | sr_offset, |
| 1764 | TRUE, |
| 1765 | cur_prot, |
| 1766 | max_prot, |
| 1767 | VM_INHERIT_SHARE); |
| 1768 | if (kr != KERN_SUCCESS) { |
| 1769 | SHARED_REGION_TRACE_ERROR( |
| 1770 | ("shared_region: enter(%p,%p,%p,%d,%d,%d): " |
| 1771 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n" , |
| 1772 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1773 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1774 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1775 | cpu, cpu_subtype, is_64bit, |
| 1776 | (long long)target_address, |
| 1777 | (long long)mapping_size, |
| 1778 | (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); |
| 1779 | goto done; |
| 1780 | } |
| 1781 | SHARED_REGION_TRACE_DEBUG( |
| 1782 | ("shared_region: enter(%p,%p,%p,%d,%d,%d): " |
| 1783 | "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n" , |
| 1784 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1785 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1786 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1787 | cpu, cpu_subtype, is_64bit, |
| 1788 | (long long)target_address, (long long)mapping_size, |
| 1789 | (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); |
| 1790 | sr_offset += mapping_size; |
| 1791 | sr_size -= mapping_size; |
| 1792 | } |
| 1793 | assert(sr_size == 0); |
| 1794 | |
| 1795 | done: |
| 1796 | if (kr == KERN_SUCCESS) { |
| 1797 | /* let the task use that shared region */ |
| 1798 | vm_shared_region_set(task, shared_region); |
| 1799 | } else { |
| 1800 | /* drop our reference since we're not using it */ |
| 1801 | vm_shared_region_deallocate(shared_region); |
| 1802 | vm_shared_region_set(task, NULL); |
| 1803 | } |
| 1804 | |
| 1805 | SHARED_REGION_TRACE_DEBUG( |
| 1806 | ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n" , |
| 1807 | (void *)VM_KERNEL_ADDRPERM(map), |
| 1808 | (void *)VM_KERNEL_ADDRPERM(task), |
| 1809 | (void *)VM_KERNEL_ADDRPERM(fsroot), |
| 1810 | cpu, cpu_subtype, is_64bit, kr)); |
| 1811 | return kr; |
| 1812 | } |
| 1813 | |
| 1814 | #define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/ |
| 1815 | struct vm_shared_region_slide_info slide_info; |
| 1816 | |
| 1817 | kern_return_t |
| 1818 | vm_shared_region_sliding_valid(uint32_t slide) |
| 1819 | { |
| 1820 | kern_return_t kr = KERN_SUCCESS; |
| 1821 | vm_shared_region_t sr = vm_shared_region_get(current_task()); |
| 1822 | |
| 1823 | /* No region yet? we're fine. */ |
| 1824 | if (sr == NULL) { |
| 1825 | return kr; |
| 1826 | } |
| 1827 | |
| 1828 | if ((sr->sr_slid == TRUE) && slide) { |
| 1829 | if (slide != vm_shared_region_get_slide_info(sr)->slide) { |
| 1830 | printf("Only one shared region can be slid\n" ); |
| 1831 | kr = KERN_FAILURE; |
| 1832 | } else { |
| 1833 | /* |
| 1834 | * Request for sliding when we've |
| 1835 | * already done it with exactly the |
| 1836 | * same slide value before. |
| 1837 | * This isn't wrong technically but |
| 1838 | * we don't want to slide again and |
| 1839 | * so we return this value. |
| 1840 | */ |
| 1841 | kr = KERN_INVALID_ARGUMENT; |
| 1842 | } |
| 1843 | } |
| 1844 | vm_shared_region_deallocate(sr); |
| 1845 | return kr; |
| 1846 | } |
| 1847 | |
| 1848 | kern_return_t |
| 1849 | vm_shared_region_slide_mapping( |
| 1850 | vm_shared_region_t sr, |
| 1851 | mach_vm_size_t slide_info_size, |
| 1852 | mach_vm_offset_t start, |
| 1853 | mach_vm_size_t size, |
| 1854 | mach_vm_offset_t slid_mapping, |
| 1855 | uint32_t slide, |
| 1856 | memory_object_control_t sr_file_control) |
| 1857 | { |
| 1858 | kern_return_t kr; |
| 1859 | vm_object_t object; |
| 1860 | vm_shared_region_slide_info_t si; |
| 1861 | vm_offset_t slide_info_entry; |
| 1862 | vm_map_entry_t slid_entry, tmp_entry; |
| 1863 | struct vm_map_entry tmp_entry_store; |
| 1864 | memory_object_t ; |
| 1865 | vm_map_t sr_map; |
| 1866 | int vm_flags; |
| 1867 | vm_map_kernel_flags_t vmk_flags; |
| 1868 | vm_map_offset_t map_addr; |
| 1869 | |
| 1870 | tmp_entry = VM_MAP_ENTRY_NULL; |
| 1871 | sr_pager = MEMORY_OBJECT_NULL; |
| 1872 | object = VM_OBJECT_NULL; |
| 1873 | slide_info_entry = 0; |
| 1874 | |
| 1875 | assert(sr->sr_slide_in_progress); |
| 1876 | assert(!sr->sr_slid); |
| 1877 | |
| 1878 | si = vm_shared_region_get_slide_info(sr); |
| 1879 | assert(si->slide_object == VM_OBJECT_NULL); |
| 1880 | assert(si->slide_info_entry == NULL); |
| 1881 | |
| 1882 | if (sr_file_control == MEMORY_OBJECT_CONTROL_NULL) { |
| 1883 | return KERN_INVALID_ARGUMENT; |
| 1884 | } |
| 1885 | if (slide_info_size > SANE_SLIDE_INFO_SIZE) { |
| 1886 | printf("Slide_info_size too large: %lx\n" , (uintptr_t)slide_info_size); |
| 1887 | return KERN_FAILURE; |
| 1888 | } |
| 1889 | |
| 1890 | kr = kmem_alloc(kernel_map, |
| 1891 | (vm_offset_t *) &slide_info_entry, |
| 1892 | (vm_size_t) slide_info_size, VM_KERN_MEMORY_OSFMK); |
| 1893 | if (kr != KERN_SUCCESS) { |
| 1894 | return kr; |
| 1895 | } |
| 1896 | |
| 1897 | object = memory_object_control_to_vm_object(sr_file_control); |
| 1898 | if (object == VM_OBJECT_NULL || object->internal) { |
| 1899 | object = VM_OBJECT_NULL; |
| 1900 | kr = KERN_INVALID_ADDRESS; |
| 1901 | goto done; |
| 1902 | } |
| 1903 | |
| 1904 | vm_object_lock(object); |
| 1905 | vm_object_reference_locked(object); /* for si->slide_object */ |
| 1906 | object->object_is_shared_cache = TRUE; |
| 1907 | vm_object_unlock(object); |
| 1908 | |
| 1909 | si->slide_info_entry = (vm_shared_region_slide_info_entry_t)slide_info_entry; |
| 1910 | si->slide_info_size = slide_info_size; |
| 1911 | |
| 1912 | assert(slid_mapping != (mach_vm_offset_t) -1); |
| 1913 | si->slid_address = slid_mapping + sr->sr_base_address; |
| 1914 | si->slide_object = object; |
| 1915 | si->start = start; |
| 1916 | si->end = si->start + size; |
| 1917 | si->slide = slide; |
| 1918 | |
| 1919 | /* find the shared region's map entry to slide */ |
| 1920 | sr_map = vm_shared_region_vm_map(sr); |
| 1921 | vm_map_lock_read(sr_map); |
| 1922 | if (!vm_map_lookup_entry(sr_map, |
| 1923 | slid_mapping, |
| 1924 | &slid_entry)) { |
| 1925 | /* no mapping there */ |
| 1926 | vm_map_unlock(sr_map); |
| 1927 | kr = KERN_INVALID_ARGUMENT; |
| 1928 | goto done; |
| 1929 | } |
| 1930 | /* |
| 1931 | * We might want to clip the entry to cover only the portion that |
| 1932 | * needs sliding (offsets si->start to si->end in the shared cache |
| 1933 | * file at the bottom of the shadow chain). |
| 1934 | * In practice, it seems to cover the entire DATA segment... |
| 1935 | */ |
| 1936 | tmp_entry_store = *slid_entry; |
| 1937 | tmp_entry = &tmp_entry_store; |
| 1938 | slid_entry = VM_MAP_ENTRY_NULL; |
| 1939 | /* extra ref to keep object alive while map is unlocked */ |
| 1940 | vm_object_reference(VME_OBJECT(tmp_entry)); |
| 1941 | vm_map_unlock_read(sr_map); |
| 1942 | |
| 1943 | /* create a "shared_region" sliding pager */ |
| 1944 | sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry), |
| 1945 | VME_OFFSET(tmp_entry), |
| 1946 | si); |
| 1947 | if (sr_pager == NULL) { |
| 1948 | kr = KERN_RESOURCE_SHORTAGE; |
| 1949 | goto done; |
| 1950 | } |
| 1951 | |
| 1952 | /* map that pager over the portion of the mapping that needs sliding */ |
| 1953 | vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; |
| 1954 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
| 1955 | vmk_flags.vmkf_overwrite_immutable = TRUE; |
| 1956 | map_addr = tmp_entry->vme_start; |
| 1957 | kr = vm_map_enter_mem_object(sr_map, |
| 1958 | &map_addr, |
| 1959 | (tmp_entry->vme_end - |
| 1960 | tmp_entry->vme_start), |
| 1961 | (mach_vm_offset_t) 0, |
| 1962 | vm_flags, |
| 1963 | vmk_flags, |
| 1964 | VM_KERN_MEMORY_NONE, |
| 1965 | (ipc_port_t)(uintptr_t) sr_pager, |
| 1966 | 0, |
| 1967 | TRUE, |
| 1968 | tmp_entry->protection, |
| 1969 | tmp_entry->max_protection, |
| 1970 | tmp_entry->inheritance); |
| 1971 | assertf(kr == KERN_SUCCESS, "kr = 0x%x\n" , kr); |
| 1972 | assertf(map_addr == tmp_entry->vme_start, |
| 1973 | "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n" , |
| 1974 | (uint64_t)map_addr, |
| 1975 | (uint64_t) tmp_entry->vme_start, |
| 1976 | tmp_entry); |
| 1977 | |
| 1978 | /* success! */ |
| 1979 | kr = KERN_SUCCESS; |
| 1980 | |
| 1981 | done: |
| 1982 | if (sr_pager) { |
| 1983 | /* |
| 1984 | * Release the sr_pager reference obtained by |
| 1985 | * shared_region_pager_setup(). |
| 1986 | * The mapping (if it succeeded) is now holding a reference on |
| 1987 | * the memory object. |
| 1988 | */ |
| 1989 | memory_object_deallocate(sr_pager); |
| 1990 | sr_pager = MEMORY_OBJECT_NULL; |
| 1991 | } |
| 1992 | if (tmp_entry) { |
| 1993 | /* release extra ref on tmp_entry's VM object */ |
| 1994 | vm_object_deallocate(VME_OBJECT(tmp_entry)); |
| 1995 | tmp_entry = VM_MAP_ENTRY_NULL; |
| 1996 | } |
| 1997 | |
| 1998 | if (kr != KERN_SUCCESS) { |
| 1999 | /* cleanup */ |
| 2000 | if (slide_info_entry) { |
| 2001 | kmem_free(kernel_map, slide_info_entry, slide_info_size); |
| 2002 | slide_info_entry = 0; |
| 2003 | } |
| 2004 | if (si->slide_object) { |
| 2005 | vm_object_deallocate(si->slide_object); |
| 2006 | si->slide_object = VM_OBJECT_NULL; |
| 2007 | } |
| 2008 | } |
| 2009 | return kr; |
| 2010 | } |
| 2011 | |
| 2012 | void* |
| 2013 | vm_shared_region_get_slide_info_entry(vm_shared_region_t sr) { |
| 2014 | return (void*)sr->sr_slide_info.slide_info_entry; |
| 2015 | } |
| 2016 | |
| 2017 | static kern_return_t |
| 2018 | vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info) |
| 2019 | { |
| 2020 | uint32_t pageIndex=0; |
| 2021 | uint16_t entryIndex=0; |
| 2022 | uint16_t *toc = NULL; |
| 2023 | |
| 2024 | toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); |
| 2025 | for (;pageIndex < s_info->toc_count; pageIndex++) { |
| 2026 | |
| 2027 | entryIndex = (uint16_t)(toc[pageIndex]); |
| 2028 | |
| 2029 | if (entryIndex >= s_info->entry_count) { |
| 2030 | printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n" , pageIndex, entryIndex, s_info->entry_count); |
| 2031 | return KERN_FAILURE; |
| 2032 | } |
| 2033 | |
| 2034 | } |
| 2035 | return KERN_SUCCESS; |
| 2036 | } |
| 2037 | |
| 2038 | static kern_return_t |
| 2039 | vm_shared_region_slide_sanity_check_v2(vm_shared_region_slide_info_entry_v2_t s_info, mach_vm_size_t slide_info_size) |
| 2040 | { |
| 2041 | if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { |
| 2042 | return KERN_FAILURE; |
| 2043 | } |
| 2044 | |
| 2045 | /* Ensure that the slide info doesn't reference any data outside of its bounds. */ |
| 2046 | |
| 2047 | uint32_t page_starts_count = s_info->page_starts_count; |
| 2048 | uint32_t = s_info->page_extras_count; |
| 2049 | mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count; |
| 2050 | if (num_trailing_entries < page_starts_count) { |
| 2051 | return KERN_FAILURE; |
| 2052 | } |
| 2053 | |
| 2054 | /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */ |
| 2055 | mach_vm_size_t trailing_size = num_trailing_entries << 1; |
| 2056 | if (trailing_size >> 1 != num_trailing_entries) { |
| 2057 | return KERN_FAILURE; |
| 2058 | } |
| 2059 | |
| 2060 | mach_vm_size_t required_size = sizeof(*s_info) + trailing_size; |
| 2061 | if (required_size < sizeof(*s_info)) { |
| 2062 | return KERN_FAILURE; |
| 2063 | } |
| 2064 | |
| 2065 | if (required_size > slide_info_size) { |
| 2066 | return KERN_FAILURE; |
| 2067 | } |
| 2068 | |
| 2069 | return KERN_SUCCESS; |
| 2070 | } |
| 2071 | |
| 2072 | static kern_return_t |
| 2073 | vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_info, mach_vm_size_t slide_info_size) |
| 2074 | { |
| 2075 | if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { |
| 2076 | printf("vm_shared_region_slide_sanity_check_v3: s_info->page_size != PAGE_SIZE_FOR_SR_SL 0x%llx != 0x%llx\n" , (uint64_t)s_info->page_size, (uint64_t)PAGE_SIZE_FOR_SR_SLIDE); |
| 2077 | return KERN_FAILURE; |
| 2078 | } |
| 2079 | |
| 2080 | uint32_t page_starts_count = s_info->page_starts_count; |
| 2081 | mach_vm_size_t num_trailing_entries = page_starts_count; |
| 2082 | mach_vm_size_t trailing_size = num_trailing_entries << 1; |
| 2083 | mach_vm_size_t required_size = sizeof(*s_info) + trailing_size; |
| 2084 | if (required_size < sizeof(*s_info)) { |
| 2085 | printf("vm_shared_region_slide_sanity_check_v3: required_size != sizeof(*s_info) 0x%llx != 0x%llx\n" , (uint64_t)required_size, (uint64_t)sizeof(*s_info)); |
| 2086 | return KERN_FAILURE; |
| 2087 | } |
| 2088 | |
| 2089 | if (required_size > slide_info_size) { |
| 2090 | printf("vm_shared_region_slide_sanity_check_v3: required_size != slide_info_size 0x%llx != 0x%llx\n" , (uint64_t)required_size, (uint64_t)slide_info_size); |
| 2091 | return KERN_FAILURE; |
| 2092 | } |
| 2093 | |
| 2094 | return KERN_SUCCESS; |
| 2095 | } |
| 2096 | |
| 2097 | static kern_return_t |
| 2098 | vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info, mach_vm_size_t slide_info_size) |
| 2099 | { |
| 2100 | if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { |
| 2101 | return KERN_FAILURE; |
| 2102 | } |
| 2103 | |
| 2104 | /* Ensure that the slide info doesn't reference any data outside of its bounds. */ |
| 2105 | |
| 2106 | uint32_t page_starts_count = s_info->page_starts_count; |
| 2107 | uint32_t = s_info->page_extras_count; |
| 2108 | mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count; |
| 2109 | if (num_trailing_entries < page_starts_count) { |
| 2110 | return KERN_FAILURE; |
| 2111 | } |
| 2112 | |
| 2113 | /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */ |
| 2114 | mach_vm_size_t trailing_size = num_trailing_entries << 1; |
| 2115 | if (trailing_size >> 1 != num_trailing_entries) { |
| 2116 | return KERN_FAILURE; |
| 2117 | } |
| 2118 | |
| 2119 | mach_vm_size_t required_size = sizeof(*s_info) + trailing_size; |
| 2120 | if (required_size < sizeof(*s_info)) { |
| 2121 | return KERN_FAILURE; |
| 2122 | } |
| 2123 | |
| 2124 | if (required_size > slide_info_size) { |
| 2125 | return KERN_FAILURE; |
| 2126 | } |
| 2127 | |
| 2128 | return KERN_SUCCESS; |
| 2129 | } |
| 2130 | |
| 2131 | |
| 2132 | kern_return_t |
| 2133 | vm_shared_region_slide_sanity_check(vm_shared_region_t sr) |
| 2134 | { |
| 2135 | vm_shared_region_slide_info_t si; |
| 2136 | vm_shared_region_slide_info_entry_t s_info; |
| 2137 | kern_return_t kr; |
| 2138 | |
| 2139 | si = vm_shared_region_get_slide_info(sr); |
| 2140 | s_info = si->slide_info_entry; |
| 2141 | |
| 2142 | kr = mach_vm_protect(kernel_map, |
| 2143 | (mach_vm_offset_t)(vm_offset_t)s_info, |
| 2144 | (mach_vm_size_t) si->slide_info_size, |
| 2145 | TRUE, VM_PROT_READ); |
| 2146 | if (kr != KERN_SUCCESS) { |
| 2147 | panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n" , kr); |
| 2148 | } |
| 2149 | |
| 2150 | if (s_info->version == 1) { |
| 2151 | kr = vm_shared_region_slide_sanity_check_v1(&s_info->v1); |
| 2152 | } else if (s_info->version == 2) { |
| 2153 | kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, si->slide_info_size); |
| 2154 | } else if (s_info->version == 3) { |
| 2155 | kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, si->slide_info_size); |
| 2156 | } else if (s_info->version == 4) { |
| 2157 | kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, si->slide_info_size); |
| 2158 | } else { |
| 2159 | goto fail; |
| 2160 | } |
| 2161 | if (kr != KERN_SUCCESS) { |
| 2162 | goto fail; |
| 2163 | } |
| 2164 | |
| 2165 | return KERN_SUCCESS; |
| 2166 | fail: |
| 2167 | if (si->slide_info_entry != NULL) { |
| 2168 | kmem_free(kernel_map, |
| 2169 | (vm_offset_t) si->slide_info_entry, |
| 2170 | (vm_size_t) si->slide_info_size); |
| 2171 | |
| 2172 | vm_object_deallocate(si->slide_object); |
| 2173 | si->slide_object = NULL; |
| 2174 | si->start = 0; |
| 2175 | si->end = 0; |
| 2176 | si->slide = 0; |
| 2177 | si->slide_info_entry = NULL; |
| 2178 | si->slide_info_size = 0; |
| 2179 | } |
| 2180 | return KERN_FAILURE; |
| 2181 | } |
| 2182 | |
| 2183 | static kern_return_t |
| 2184 | vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) |
| 2185 | { |
| 2186 | uint16_t *toc = NULL; |
| 2187 | slide_info_entry_toc_t bitmap = NULL; |
| 2188 | uint32_t i=0, j=0; |
| 2189 | uint8_t b = 0; |
| 2190 | uint32_t slide = si->slide; |
| 2191 | int is_64 = task_has_64Bit_addr(current_task()); |
| 2192 | |
| 2193 | vm_shared_region_slide_info_entry_v1_t s_info = &si->slide_info_entry->v1; |
| 2194 | toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); |
| 2195 | |
| 2196 | if (pageIndex >= s_info->toc_count) { |
| 2197 | printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n" , pageIndex, s_info->toc_count); |
| 2198 | } else { |
| 2199 | uint16_t entryIndex = (uint16_t)(toc[pageIndex]); |
| 2200 | slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset); |
| 2201 | |
| 2202 | if (entryIndex >= s_info->entry_count) { |
| 2203 | printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n" , entryIndex, s_info->entry_count); |
| 2204 | } else { |
| 2205 | bitmap = &slide_info_entries[entryIndex]; |
| 2206 | |
| 2207 | for(i=0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) { |
| 2208 | b = bitmap->entry[i]; |
| 2209 | if (b!=0) { |
| 2210 | for (j=0; j <8; ++j) { |
| 2211 | if (b & (1 <<j)){ |
| 2212 | uint32_t *ptr_to_slide; |
| 2213 | uint32_t old_value; |
| 2214 | |
| 2215 | ptr_to_slide = (uint32_t*)((uintptr_t)(vaddr)+(sizeof(uint32_t)*(i*8 +j))); |
| 2216 | old_value = *ptr_to_slide; |
| 2217 | *ptr_to_slide += slide; |
| 2218 | if (is_64 && *ptr_to_slide < old_value) { |
| 2219 | /* |
| 2220 | * We just slid the low 32 bits of a 64-bit pointer |
| 2221 | * and it looks like there should have been a carry-over |
| 2222 | * to the upper 32 bits. |
| 2223 | * The sliding failed... |
| 2224 | */ |
| 2225 | printf("vm_shared_region_slide() carry over: i=%d j=%d b=0x%x slide=0x%x old=0x%x new=0x%x\n" , |
| 2226 | i, j, b, slide, old_value, *ptr_to_slide); |
| 2227 | return KERN_FAILURE; |
| 2228 | } |
| 2229 | } |
| 2230 | } |
| 2231 | } |
| 2232 | } |
| 2233 | } |
| 2234 | } |
| 2235 | |
| 2236 | return KERN_SUCCESS; |
| 2237 | } |
| 2238 | |
| 2239 | static kern_return_t |
| 2240 | rebase_chain_32( |
| 2241 | uint8_t *page_content, |
| 2242 | uint16_t start_offset, |
| 2243 | uint32_t slide_amount, |
| 2244 | vm_shared_region_slide_info_entry_v2_t s_info) |
| 2245 | { |
| 2246 | const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t); |
| 2247 | |
| 2248 | const uint32_t delta_mask = (uint32_t)(s_info->delta_mask); |
| 2249 | const uint32_t value_mask = ~delta_mask; |
| 2250 | const uint32_t value_add = (uint32_t)(s_info->value_add); |
| 2251 | const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2; |
| 2252 | |
| 2253 | uint32_t page_offset = start_offset; |
| 2254 | uint32_t delta = 1; |
| 2255 | |
| 2256 | while (delta != 0 && page_offset <= last_page_offset) { |
| 2257 | uint8_t *loc; |
| 2258 | uint32_t value; |
| 2259 | |
| 2260 | loc = page_content + page_offset; |
| 2261 | memcpy(&value, loc, sizeof(value)); |
| 2262 | delta = (value & delta_mask) >> delta_shift; |
| 2263 | value &= value_mask; |
| 2264 | |
| 2265 | if (value != 0) { |
| 2266 | value += value_add; |
| 2267 | value += slide_amount; |
| 2268 | } |
| 2269 | memcpy(loc, &value, sizeof(value)); |
| 2270 | page_offset += delta; |
| 2271 | } |
| 2272 | |
| 2273 | /* If the offset went past the end of the page, then the slide data is invalid. */ |
| 2274 | if (page_offset > last_page_offset) { |
| 2275 | return KERN_FAILURE; |
| 2276 | } |
| 2277 | return KERN_SUCCESS; |
| 2278 | } |
| 2279 | |
| 2280 | static kern_return_t |
| 2281 | rebase_chain_64( |
| 2282 | uint8_t *page_content, |
| 2283 | uint16_t start_offset, |
| 2284 | uint32_t slide_amount, |
| 2285 | vm_shared_region_slide_info_entry_v2_t s_info) |
| 2286 | { |
| 2287 | const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint64_t); |
| 2288 | |
| 2289 | const uint64_t delta_mask = s_info->delta_mask; |
| 2290 | const uint64_t value_mask = ~delta_mask; |
| 2291 | const uint64_t value_add = s_info->value_add; |
| 2292 | const uint64_t delta_shift = __builtin_ctzll(delta_mask) - 2; |
| 2293 | |
| 2294 | uint32_t page_offset = start_offset; |
| 2295 | uint32_t delta = 1; |
| 2296 | |
| 2297 | while (delta != 0 && page_offset <= last_page_offset) { |
| 2298 | uint8_t *loc; |
| 2299 | uint64_t value; |
| 2300 | |
| 2301 | loc = page_content + page_offset; |
| 2302 | memcpy(&value, loc, sizeof(value)); |
| 2303 | delta = (uint32_t)((value & delta_mask) >> delta_shift); |
| 2304 | value &= value_mask; |
| 2305 | |
| 2306 | if (value != 0) { |
| 2307 | value += value_add; |
| 2308 | value += slide_amount; |
| 2309 | } |
| 2310 | memcpy(loc, &value, sizeof(value)); |
| 2311 | page_offset += delta; |
| 2312 | } |
| 2313 | |
| 2314 | if (page_offset + sizeof(uint32_t) == PAGE_SIZE_FOR_SR_SLIDE) { |
| 2315 | /* If a pointer straddling the page boundary needs to be adjusted, then |
| 2316 | * add the slide to the lower half. The encoding guarantees that the upper |
| 2317 | * half on the next page will need no masking. |
| 2318 | * |
| 2319 | * This assumes a little-endian machine and that the region being slid |
| 2320 | * never crosses a 4 GB boundary. */ |
| 2321 | |
| 2322 | uint8_t *loc = page_content + page_offset; |
| 2323 | uint32_t value; |
| 2324 | |
| 2325 | memcpy(&value, loc, sizeof(value)); |
| 2326 | value += slide_amount; |
| 2327 | memcpy(loc, &value, sizeof(value)); |
| 2328 | } else if (page_offset > last_page_offset) { |
| 2329 | return KERN_FAILURE; |
| 2330 | } |
| 2331 | |
| 2332 | return KERN_SUCCESS; |
| 2333 | } |
| 2334 | |
| 2335 | static kern_return_t |
| 2336 | rebase_chain( |
| 2337 | boolean_t is_64, |
| 2338 | uint32_t pageIndex, |
| 2339 | uint8_t *page_content, |
| 2340 | uint16_t start_offset, |
| 2341 | uint32_t slide_amount, |
| 2342 | vm_shared_region_slide_info_entry_v2_t s_info) |
| 2343 | { |
| 2344 | kern_return_t kr; |
| 2345 | if (is_64) { |
| 2346 | kr = rebase_chain_64(page_content, start_offset, slide_amount, s_info); |
| 2347 | } else { |
| 2348 | kr = rebase_chain_32(page_content, start_offset, slide_amount, s_info); |
| 2349 | } |
| 2350 | |
| 2351 | if (kr != KERN_SUCCESS) { |
| 2352 | printf("vm_shared_region_slide_page() offset overflow: pageIndex=%u, start_offset=%u, slide_amount=%u\n" , |
| 2353 | pageIndex, start_offset, slide_amount); |
| 2354 | } |
| 2355 | return kr; |
| 2356 | } |
| 2357 | |
| 2358 | static kern_return_t |
| 2359 | vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) |
| 2360 | { |
| 2361 | vm_shared_region_slide_info_entry_v2_t s_info = &si->slide_info_entry->v2; |
| 2362 | const uint32_t slide_amount = si->slide; |
| 2363 | |
| 2364 | /* The high bits of the delta_mask field are nonzero precisely when the shared |
| 2365 | * cache is 64-bit. */ |
| 2366 | const boolean_t is_64 = (s_info->delta_mask >> 32) != 0; |
| 2367 | |
| 2368 | const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset); |
| 2369 | const uint16_t * = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset); |
| 2370 | |
| 2371 | uint8_t *page_content = (uint8_t *)vaddr; |
| 2372 | uint16_t page_entry; |
| 2373 | |
| 2374 | if (pageIndex >= s_info->page_starts_count) { |
| 2375 | printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n" , |
| 2376 | pageIndex, s_info->page_starts_count); |
| 2377 | return KERN_FAILURE; |
| 2378 | } |
| 2379 | page_entry = page_starts[pageIndex]; |
| 2380 | |
| 2381 | if (page_entry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE) { |
| 2382 | return KERN_SUCCESS; |
| 2383 | } |
| 2384 | |
| 2385 | if (page_entry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) { |
| 2386 | uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE_PAGE_VALUE; |
| 2387 | uint16_t info; |
| 2388 | |
| 2389 | do { |
| 2390 | uint16_t page_start_offset; |
| 2391 | kern_return_t kr; |
| 2392 | |
| 2393 | if (chain_index >= s_info->page_extras_count) { |
| 2394 | printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n" , |
| 2395 | chain_index, s_info->page_extras_count); |
| 2396 | return KERN_FAILURE; |
| 2397 | } |
| 2398 | info = page_extras[chain_index]; |
| 2399 | page_start_offset = (info & DYLD_CACHE_SLIDE_PAGE_VALUE) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; |
| 2400 | |
| 2401 | kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info); |
| 2402 | if (kr != KERN_SUCCESS) { |
| 2403 | return KERN_FAILURE; |
| 2404 | } |
| 2405 | |
| 2406 | chain_index++; |
| 2407 | } while (!(info & DYLD_CACHE_SLIDE_PAGE_ATTR_END)); |
| 2408 | } else { |
| 2409 | const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; |
| 2410 | kern_return_t kr; |
| 2411 | |
| 2412 | kr = rebase_chain(is_64, pageIndex, page_content, page_start_offset, slide_amount, s_info); |
| 2413 | if (kr != KERN_SUCCESS) { |
| 2414 | return KERN_FAILURE; |
| 2415 | } |
| 2416 | } |
| 2417 | |
| 2418 | return KERN_SUCCESS; |
| 2419 | } |
| 2420 | |
| 2421 | |
| 2422 | static kern_return_t |
| 2423 | vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vaddr, __unused mach_vm_offset_t uservaddr, uint32_t pageIndex) |
| 2424 | { |
| 2425 | vm_shared_region_slide_info_entry_v3_t s_info = &si->slide_info_entry->v3; |
| 2426 | const uint32_t slide_amount = si->slide; |
| 2427 | |
| 2428 | uint8_t *page_content = (uint8_t *)vaddr; |
| 2429 | uint16_t page_entry; |
| 2430 | |
| 2431 | if (pageIndex >= s_info->page_starts_count) { |
| 2432 | printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n" , |
| 2433 | pageIndex, s_info->page_starts_count); |
| 2434 | return KERN_FAILURE; |
| 2435 | } |
| 2436 | page_entry = s_info->page_starts[pageIndex]; |
| 2437 | |
| 2438 | if (page_entry == DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE) { |
| 2439 | return KERN_SUCCESS; |
| 2440 | } |
| 2441 | |
| 2442 | uint8_t* rebaseLocation = page_content; |
| 2443 | uint64_t delta = page_entry; |
| 2444 | do { |
| 2445 | rebaseLocation += delta; |
| 2446 | uint64_t value; |
| 2447 | memcpy(&value, rebaseLocation, sizeof(value)); |
| 2448 | delta = ( (value & 0x3FF8000000000000) >> 51) * sizeof(uint64_t); |
| 2449 | |
| 2450 | // A pointer is one of : |
| 2451 | // { |
| 2452 | // uint64_t pointerValue : 51; |
| 2453 | // uint64_t offsetToNextPointer : 11; |
| 2454 | // uint64_t isBind : 1 = 0; |
| 2455 | // uint64_t authenticated : 1 = 0; |
| 2456 | // } |
| 2457 | // { |
| 2458 | // uint32_t offsetFromSharedCacheBase; |
| 2459 | // uint16_t diversityData; |
| 2460 | // uint16_t hasAddressDiversity : 1; |
| 2461 | // uint16_t hasDKey : 1; |
| 2462 | // uint16_t hasBKey : 1; |
| 2463 | // uint16_t offsetToNextPointer : 11; |
| 2464 | // uint16_t isBind : 1; |
| 2465 | // uint16_t authenticated : 1 = 1; |
| 2466 | // } |
| 2467 | |
| 2468 | bool isBind = (value & (1ULL << 62)) == 1; |
| 2469 | if (isBind) { |
| 2470 | return KERN_FAILURE; |
| 2471 | } |
| 2472 | |
| 2473 | bool isAuthenticated = (value & (1ULL << 63)) != 0; |
| 2474 | |
| 2475 | if (isAuthenticated) { |
| 2476 | // The new value for a rebase is the low 32-bits of the threaded value plus the slide. |
| 2477 | value = (value & 0xFFFFFFFF) + slide_amount; |
| 2478 | // Add in the offset from the mach_header |
| 2479 | const uint64_t value_add = s_info->value_add; |
| 2480 | value += value_add; |
| 2481 | |
| 2482 | } else { |
| 2483 | // The new value for a rebase is the low 51-bits of the threaded value plus the slide. |
| 2484 | // Regular pointer which needs to fit in 51-bits of value. |
| 2485 | // C++ RTTI uses the top bit, so we'll allow the whole top-byte |
| 2486 | // and the bottom 43-bits to be fit in to 51-bits. |
| 2487 | uint64_t top8Bits = value & 0x0007F80000000000ULL; |
| 2488 | uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL; |
| 2489 | uint64_t targetValue = ( top8Bits << 13 ) | bottom43Bits; |
| 2490 | value = targetValue + slide_amount; |
| 2491 | } |
| 2492 | |
| 2493 | memcpy(rebaseLocation, &value, sizeof(value)); |
| 2494 | } while (delta != 0); |
| 2495 | |
| 2496 | return KERN_SUCCESS; |
| 2497 | } |
| 2498 | |
| 2499 | static kern_return_t |
| 2500 | rebase_chainv4( |
| 2501 | uint8_t *page_content, |
| 2502 | uint16_t start_offset, |
| 2503 | uint32_t slide_amount, |
| 2504 | vm_shared_region_slide_info_entry_v4_t s_info) |
| 2505 | { |
| 2506 | const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t); |
| 2507 | |
| 2508 | const uint32_t delta_mask = (uint32_t)(s_info->delta_mask); |
| 2509 | const uint32_t value_mask = ~delta_mask; |
| 2510 | const uint32_t value_add = (uint32_t)(s_info->value_add); |
| 2511 | const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2; |
| 2512 | |
| 2513 | uint32_t page_offset = start_offset; |
| 2514 | uint32_t delta = 1; |
| 2515 | |
| 2516 | while (delta != 0 && page_offset <= last_page_offset) { |
| 2517 | uint8_t *loc; |
| 2518 | uint32_t value; |
| 2519 | |
| 2520 | loc = page_content + page_offset; |
| 2521 | memcpy(&value, loc, sizeof(value)); |
| 2522 | delta = (value & delta_mask) >> delta_shift; |
| 2523 | value &= value_mask; |
| 2524 | |
| 2525 | if ( (value & 0xFFFF8000) == 0 ) { |
| 2526 | // small positive non-pointer, use as-is |
| 2527 | } else if ( (value & 0x3FFF8000) == 0x3FFF8000 ) { |
| 2528 | // small negative non-pointer |
| 2529 | value |= 0xC0000000; |
| 2530 | } else { |
| 2531 | // pointer that needs rebasing |
| 2532 | value += value_add; |
| 2533 | value += slide_amount; |
| 2534 | } |
| 2535 | memcpy(loc, &value, sizeof(value)); |
| 2536 | page_offset += delta; |
| 2537 | } |
| 2538 | |
| 2539 | /* If the offset went past the end of the page, then the slide data is invalid. */ |
| 2540 | if (page_offset > last_page_offset) { |
| 2541 | return KERN_FAILURE; |
| 2542 | } |
| 2543 | return KERN_SUCCESS; |
| 2544 | } |
| 2545 | |
| 2546 | static kern_return_t |
| 2547 | vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) |
| 2548 | { |
| 2549 | vm_shared_region_slide_info_entry_v4_t s_info = &si->slide_info_entry->v4; |
| 2550 | const uint32_t slide_amount = si->slide; |
| 2551 | |
| 2552 | const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset); |
| 2553 | const uint16_t * = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset); |
| 2554 | |
| 2555 | uint8_t *page_content = (uint8_t *)vaddr; |
| 2556 | uint16_t page_entry; |
| 2557 | |
| 2558 | if (pageIndex >= s_info->page_starts_count) { |
| 2559 | printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n" , |
| 2560 | pageIndex, s_info->page_starts_count); |
| 2561 | return KERN_FAILURE; |
| 2562 | } |
| 2563 | page_entry = page_starts[pageIndex]; |
| 2564 | |
| 2565 | if (page_entry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE) { |
| 2566 | return KERN_SUCCESS; |
| 2567 | } |
| 2568 | |
| 2569 | if (page_entry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) { |
| 2570 | uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE4_PAGE_INDEX; |
| 2571 | uint16_t info; |
| 2572 | |
| 2573 | do { |
| 2574 | uint16_t page_start_offset; |
| 2575 | kern_return_t kr; |
| 2576 | |
| 2577 | if (chain_index >= s_info->page_extras_count) { |
| 2578 | printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n" , |
| 2579 | chain_index, s_info->page_extras_count); |
| 2580 | return KERN_FAILURE; |
| 2581 | } |
| 2582 | info = page_extras[chain_index]; |
| 2583 | page_start_offset = (info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; |
| 2584 | |
| 2585 | kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); |
| 2586 | if (kr != KERN_SUCCESS) { |
| 2587 | return KERN_FAILURE; |
| 2588 | } |
| 2589 | |
| 2590 | chain_index++; |
| 2591 | } while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END)); |
| 2592 | } else { |
| 2593 | const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; |
| 2594 | kern_return_t kr; |
| 2595 | |
| 2596 | kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); |
| 2597 | if (kr != KERN_SUCCESS) { |
| 2598 | return KERN_FAILURE; |
| 2599 | } |
| 2600 | } |
| 2601 | |
| 2602 | return KERN_SUCCESS; |
| 2603 | } |
| 2604 | |
| 2605 | |
| 2606 | |
| 2607 | kern_return_t |
| 2608 | vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, mach_vm_offset_t uservaddr, uint32_t pageIndex) |
| 2609 | { |
| 2610 | if (si->slide_info_entry->version == 1) { |
| 2611 | return vm_shared_region_slide_page_v1(si, vaddr, pageIndex); |
| 2612 | } else if (si->slide_info_entry->version == 2) { |
| 2613 | return vm_shared_region_slide_page_v2(si, vaddr, pageIndex); |
| 2614 | } else if (si->slide_info_entry->version == 3) { |
| 2615 | return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex); |
| 2616 | } else if (si->slide_info_entry->version == 4) { |
| 2617 | return vm_shared_region_slide_page_v4(si, vaddr, pageIndex); |
| 2618 | } else { |
| 2619 | return KERN_FAILURE; |
| 2620 | } |
| 2621 | } |
| 2622 | |
| 2623 | /******************************************************************************/ |
| 2624 | /* Comm page support */ |
| 2625 | /******************************************************************************/ |
| 2626 | |
| 2627 | ipc_port_t commpage32_handle = IPC_PORT_NULL; |
| 2628 | ipc_port_t commpage64_handle = IPC_PORT_NULL; |
| 2629 | vm_named_entry_t commpage32_entry = NULL; |
| 2630 | vm_named_entry_t commpage64_entry = NULL; |
| 2631 | vm_map_t commpage32_map = VM_MAP_NULL; |
| 2632 | vm_map_t commpage64_map = VM_MAP_NULL; |
| 2633 | |
| 2634 | ipc_port_t commpage_text32_handle = IPC_PORT_NULL; |
| 2635 | ipc_port_t commpage_text64_handle = IPC_PORT_NULL; |
| 2636 | vm_named_entry_t commpage_text32_entry = NULL; |
| 2637 | vm_named_entry_t commpage_text64_entry = NULL; |
| 2638 | vm_map_t commpage_text32_map = VM_MAP_NULL; |
| 2639 | vm_map_t commpage_text64_map = VM_MAP_NULL; |
| 2640 | |
| 2641 | user32_addr_t commpage_text32_location = (user32_addr_t) _COMM_PAGE32_TEXT_START; |
| 2642 | user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START; |
| 2643 | |
| 2644 | #if defined(__i386__) || defined(__x86_64__) |
| 2645 | /* |
| 2646 | * Create a memory entry, VM submap and pmap for one commpage. |
| 2647 | */ |
| 2648 | static void |
| 2649 | _vm_commpage_init( |
| 2650 | ipc_port_t *handlep, |
| 2651 | vm_map_size_t size) |
| 2652 | { |
| 2653 | kern_return_t kr; |
| 2654 | vm_named_entry_t mem_entry; |
| 2655 | vm_map_t new_map; |
| 2656 | |
| 2657 | SHARED_REGION_TRACE_DEBUG( |
| 2658 | ("commpage: -> _init(0x%llx)\n" , |
| 2659 | (long long)size)); |
| 2660 | |
| 2661 | kr = mach_memory_entry_allocate(&mem_entry, |
| 2662 | handlep); |
| 2663 | if (kr != KERN_SUCCESS) { |
| 2664 | panic("_vm_commpage_init: could not allocate mem_entry" ); |
| 2665 | } |
| 2666 | new_map = vm_map_create(pmap_create(NULL, 0, 0), 0, size, TRUE); |
| 2667 | if (new_map == VM_MAP_NULL) { |
| 2668 | panic("_vm_commpage_init: could not allocate VM map" ); |
| 2669 | } |
| 2670 | mem_entry->backing.map = new_map; |
| 2671 | mem_entry->internal = TRUE; |
| 2672 | mem_entry->is_sub_map = TRUE; |
| 2673 | mem_entry->offset = 0; |
| 2674 | mem_entry->protection = VM_PROT_ALL; |
| 2675 | mem_entry->size = size; |
| 2676 | |
| 2677 | SHARED_REGION_TRACE_DEBUG( |
| 2678 | ("commpage: _init(0x%llx) <- %p\n" , |
| 2679 | (long long)size, (void *)VM_KERNEL_ADDRPERM(*handlep))); |
| 2680 | } |
| 2681 | #endif |
| 2682 | |
| 2683 | |
| 2684 | /* |
| 2685 | *Initialize the comm text pages at boot time |
| 2686 | */ |
| 2687 | extern u_int32_t random(void); |
| 2688 | void |
| 2689 | vm_commpage_text_init(void) |
| 2690 | { |
| 2691 | SHARED_REGION_TRACE_DEBUG( |
| 2692 | ("commpage text: ->init()\n" )); |
| 2693 | #if defined(__i386__) || defined(__x86_64__) |
| 2694 | /* create the 32 bit comm text page */ |
| 2695 | unsigned int offset = (random() % _PFZ32_SLIDE_RANGE) << PAGE_SHIFT; /* restricting to 32bMAX-2PAGE */ |
| 2696 | _vm_commpage_init(&commpage_text32_handle, _COMM_PAGE_TEXT_AREA_LENGTH); |
| 2697 | commpage_text32_entry = (vm_named_entry_t) commpage_text32_handle->ip_kobject; |
| 2698 | commpage_text32_map = commpage_text32_entry->backing.map; |
| 2699 | commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset); |
| 2700 | /* XXX if (cpu_is_64bit_capable()) ? */ |
| 2701 | /* create the 64-bit comm page */ |
| 2702 | offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */ |
| 2703 | _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH); |
| 2704 | commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject; |
| 2705 | commpage_text64_map = commpage_text64_entry->backing.map; |
| 2706 | commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset); |
| 2707 | |
| 2708 | commpage_text_populate(); |
| 2709 | #elif defined(__arm64__) || defined(__arm__) |
| 2710 | #else |
| 2711 | #error Unknown architecture. |
| 2712 | #endif /* __i386__ || __x86_64__ */ |
| 2713 | /* populate the routines in here */ |
| 2714 | SHARED_REGION_TRACE_DEBUG( |
| 2715 | ("commpage text: init() <-\n" )); |
| 2716 | |
| 2717 | } |
| 2718 | |
| 2719 | /* |
| 2720 | * Initialize the comm pages at boot time. |
| 2721 | */ |
| 2722 | void |
| 2723 | vm_commpage_init(void) |
| 2724 | { |
| 2725 | SHARED_REGION_TRACE_DEBUG( |
| 2726 | ("commpage: -> init()\n" )); |
| 2727 | |
| 2728 | #if defined(__i386__) || defined(__x86_64__) |
| 2729 | /* create the 32-bit comm page */ |
| 2730 | _vm_commpage_init(&commpage32_handle, _COMM_PAGE32_AREA_LENGTH); |
| 2731 | commpage32_entry = (vm_named_entry_t) commpage32_handle->ip_kobject; |
| 2732 | commpage32_map = commpage32_entry->backing.map; |
| 2733 | |
| 2734 | /* XXX if (cpu_is_64bit_capable()) ? */ |
| 2735 | /* create the 64-bit comm page */ |
| 2736 | _vm_commpage_init(&commpage64_handle, _COMM_PAGE64_AREA_LENGTH); |
| 2737 | commpage64_entry = (vm_named_entry_t) commpage64_handle->ip_kobject; |
| 2738 | commpage64_map = commpage64_entry->backing.map; |
| 2739 | |
| 2740 | #endif /* __i386__ || __x86_64__ */ |
| 2741 | |
| 2742 | /* populate them according to this specific platform */ |
| 2743 | commpage_populate(); |
| 2744 | __commpage_setup = 1; |
| 2745 | #if defined(__i386__) || defined(__x86_64__) |
| 2746 | if (__system_power_source == 0) { |
| 2747 | post_sys_powersource_internal(0, 1); |
| 2748 | } |
| 2749 | #endif /* __i386__ || __x86_64__ */ |
| 2750 | |
| 2751 | SHARED_REGION_TRACE_DEBUG( |
| 2752 | ("commpage: init() <-\n" )); |
| 2753 | } |
| 2754 | |
| 2755 | /* |
| 2756 | * Enter the appropriate comm page into the task's address space. |
| 2757 | * This is called at exec() time via vm_map_exec(). |
| 2758 | */ |
| 2759 | kern_return_t |
| 2760 | vm_commpage_enter( |
| 2761 | vm_map_t map, |
| 2762 | task_t task, |
| 2763 | boolean_t is64bit) |
| 2764 | { |
| 2765 | #if defined(__arm__) |
| 2766 | #pragma unused(is64bit) |
| 2767 | (void)task; |
| 2768 | (void)map; |
| 2769 | return KERN_SUCCESS; |
| 2770 | #elif defined(__arm64__) |
| 2771 | #pragma unused(is64bit) |
| 2772 | (void)task; |
| 2773 | (void)map; |
| 2774 | pmap_insert_sharedpage(vm_map_pmap(map)); |
| 2775 | return KERN_SUCCESS; |
| 2776 | #else |
| 2777 | ipc_port_t commpage_handle, commpage_text_handle; |
| 2778 | vm_map_offset_t commpage_address, objc_address, commpage_text_address; |
| 2779 | vm_map_size_t commpage_size, objc_size, commpage_text_size; |
| 2780 | int vm_flags; |
| 2781 | vm_map_kernel_flags_t vmk_flags; |
| 2782 | kern_return_t kr; |
| 2783 | |
| 2784 | SHARED_REGION_TRACE_DEBUG( |
| 2785 | ("commpage: -> enter(%p,%p)\n" , |
| 2786 | (void *)VM_KERNEL_ADDRPERM(map), |
| 2787 | (void *)VM_KERNEL_ADDRPERM(task))); |
| 2788 | |
| 2789 | commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH; |
| 2790 | /* the comm page is likely to be beyond the actual end of the VM map */ |
| 2791 | vm_flags = VM_FLAGS_FIXED; |
| 2792 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
| 2793 | vmk_flags.vmkf_beyond_max = TRUE; |
| 2794 | |
| 2795 | /* select the appropriate comm page for this task */ |
| 2796 | assert(! (is64bit ^ vm_map_is_64bit(map))); |
| 2797 | if (is64bit) { |
| 2798 | commpage_handle = commpage64_handle; |
| 2799 | commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS; |
| 2800 | commpage_size = _COMM_PAGE64_AREA_LENGTH; |
| 2801 | objc_size = _COMM_PAGE64_OBJC_SIZE; |
| 2802 | objc_address = _COMM_PAGE64_OBJC_BASE; |
| 2803 | commpage_text_handle = commpage_text64_handle; |
| 2804 | commpage_text_address = (vm_map_offset_t) commpage_text64_location; |
| 2805 | } else { |
| 2806 | commpage_handle = commpage32_handle; |
| 2807 | commpage_address = |
| 2808 | (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS; |
| 2809 | commpage_size = _COMM_PAGE32_AREA_LENGTH; |
| 2810 | objc_size = _COMM_PAGE32_OBJC_SIZE; |
| 2811 | objc_address = _COMM_PAGE32_OBJC_BASE; |
| 2812 | commpage_text_handle = commpage_text32_handle; |
| 2813 | commpage_text_address = (vm_map_offset_t) commpage_text32_location; |
| 2814 | } |
| 2815 | |
| 2816 | vm_tag_t tag = VM_KERN_MEMORY_NONE; |
| 2817 | if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 && |
| 2818 | (commpage_size & (pmap_nesting_size_min - 1)) == 0) { |
| 2819 | /* the commpage is properly aligned or sized for pmap-nesting */ |
| 2820 | tag = VM_MEMORY_SHARED_PMAP; |
| 2821 | } |
| 2822 | /* map the comm page in the task's address space */ |
| 2823 | assert(commpage_handle != IPC_PORT_NULL); |
| 2824 | kr = vm_map_enter_mem_object( |
| 2825 | map, |
| 2826 | &commpage_address, |
| 2827 | commpage_size, |
| 2828 | 0, |
| 2829 | vm_flags, |
| 2830 | vmk_flags, |
| 2831 | tag, |
| 2832 | commpage_handle, |
| 2833 | 0, |
| 2834 | FALSE, |
| 2835 | VM_PROT_READ, |
| 2836 | VM_PROT_READ, |
| 2837 | VM_INHERIT_SHARE); |
| 2838 | if (kr != KERN_SUCCESS) { |
| 2839 | SHARED_REGION_TRACE_ERROR( |
| 2840 | ("commpage: enter(%p,0x%llx,0x%llx) " |
| 2841 | "commpage %p mapping failed 0x%x\n" , |
| 2842 | (void *)VM_KERNEL_ADDRPERM(map), |
| 2843 | (long long)commpage_address, |
| 2844 | (long long)commpage_size, |
| 2845 | (void *)VM_KERNEL_ADDRPERM(commpage_handle), kr)); |
| 2846 | } |
| 2847 | |
| 2848 | /* map the comm text page in the task's address space */ |
| 2849 | assert(commpage_text_handle != IPC_PORT_NULL); |
| 2850 | kr = vm_map_enter_mem_object( |
| 2851 | map, |
| 2852 | &commpage_text_address, |
| 2853 | commpage_text_size, |
| 2854 | 0, |
| 2855 | vm_flags, |
| 2856 | vmk_flags, |
| 2857 | tag, |
| 2858 | commpage_text_handle, |
| 2859 | 0, |
| 2860 | FALSE, |
| 2861 | VM_PROT_READ|VM_PROT_EXECUTE, |
| 2862 | VM_PROT_READ|VM_PROT_EXECUTE, |
| 2863 | VM_INHERIT_SHARE); |
| 2864 | if (kr != KERN_SUCCESS) { |
| 2865 | SHARED_REGION_TRACE_ERROR( |
| 2866 | ("commpage text: enter(%p,0x%llx,0x%llx) " |
| 2867 | "commpage text %p mapping failed 0x%x\n" , |
| 2868 | (void *)VM_KERNEL_ADDRPERM(map), |
| 2869 | (long long)commpage_text_address, |
| 2870 | (long long)commpage_text_size, |
| 2871 | (void *)VM_KERNEL_ADDRPERM(commpage_text_handle), kr)); |
| 2872 | } |
| 2873 | |
| 2874 | /* |
| 2875 | * Since we're here, we also pre-allocate some virtual space for the |
| 2876 | * Objective-C run-time, if needed... |
| 2877 | */ |
| 2878 | if (objc_size != 0) { |
| 2879 | kr = vm_map_enter_mem_object( |
| 2880 | map, |
| 2881 | &objc_address, |
| 2882 | objc_size, |
| 2883 | 0, |
| 2884 | VM_FLAGS_FIXED, |
| 2885 | vmk_flags, |
| 2886 | tag, |
| 2887 | IPC_PORT_NULL, |
| 2888 | 0, |
| 2889 | FALSE, |
| 2890 | VM_PROT_ALL, |
| 2891 | VM_PROT_ALL, |
| 2892 | VM_INHERIT_DEFAULT); |
| 2893 | if (kr != KERN_SUCCESS) { |
| 2894 | SHARED_REGION_TRACE_ERROR( |
| 2895 | ("commpage: enter(%p,0x%llx,0x%llx) " |
| 2896 | "objc mapping failed 0x%x\n" , |
| 2897 | (void *)VM_KERNEL_ADDRPERM(map), |
| 2898 | (long long)objc_address, |
| 2899 | (long long)objc_size, kr)); |
| 2900 | } |
| 2901 | } |
| 2902 | |
| 2903 | SHARED_REGION_TRACE_DEBUG( |
| 2904 | ("commpage: enter(%p,%p) <- 0x%x\n" , |
| 2905 | (void *)VM_KERNEL_ADDRPERM(map), |
| 2906 | (void *)VM_KERNEL_ADDRPERM(task), kr)); |
| 2907 | return kr; |
| 2908 | #endif |
| 2909 | } |
| 2910 | |
| 2911 | int |
| 2912 | vm_shared_region_slide(uint32_t slide, |
| 2913 | mach_vm_offset_t entry_start_address, |
| 2914 | mach_vm_size_t entry_size, |
| 2915 | mach_vm_offset_t slide_start, |
| 2916 | mach_vm_size_t slide_size, |
| 2917 | mach_vm_offset_t slid_mapping, |
| 2918 | memory_object_control_t sr_file_control) |
| 2919 | { |
| 2920 | void *slide_info_entry = NULL; |
| 2921 | int error; |
| 2922 | vm_shared_region_t sr; |
| 2923 | |
| 2924 | SHARED_REGION_TRACE_DEBUG( |
| 2925 | ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n" , |
| 2926 | slide, entry_start_address, entry_size, slide_start, slide_size)); |
| 2927 | |
| 2928 | sr = vm_shared_region_get(current_task()); |
| 2929 | if (sr == NULL) { |
| 2930 | printf("%s: no shared region?\n" , __FUNCTION__); |
| 2931 | SHARED_REGION_TRACE_DEBUG( |
| 2932 | ("vm_shared_region_slide: <- %d (no shared region)\n" , |
| 2933 | KERN_FAILURE)); |
| 2934 | return KERN_FAILURE; |
| 2935 | } |
| 2936 | |
| 2937 | /* |
| 2938 | * Protect from concurrent access. |
| 2939 | */ |
| 2940 | vm_shared_region_lock(); |
| 2941 | while(sr->sr_slide_in_progress) { |
| 2942 | vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT); |
| 2943 | } |
| 2944 | if (sr->sr_slid |
| 2945 | #ifndef CONFIG_EMBEDDED |
| 2946 | || shared_region_completed_slide |
| 2947 | #endif |
| 2948 | ) { |
| 2949 | vm_shared_region_unlock(); |
| 2950 | |
| 2951 | vm_shared_region_deallocate(sr); |
| 2952 | printf("%s: shared region already slid?\n" , __FUNCTION__); |
| 2953 | SHARED_REGION_TRACE_DEBUG( |
| 2954 | ("vm_shared_region_slide: <- %d (already slid)\n" , |
| 2955 | KERN_FAILURE)); |
| 2956 | return KERN_FAILURE; |
| 2957 | } |
| 2958 | |
| 2959 | sr->sr_slide_in_progress = TRUE; |
| 2960 | vm_shared_region_unlock(); |
| 2961 | |
| 2962 | error = vm_shared_region_slide_mapping(sr, |
| 2963 | slide_size, |
| 2964 | entry_start_address, |
| 2965 | entry_size, |
| 2966 | slid_mapping, |
| 2967 | slide, |
| 2968 | sr_file_control); |
| 2969 | if (error) { |
| 2970 | printf("slide_info initialization failed with kr=%d\n" , error); |
| 2971 | goto done; |
| 2972 | } |
| 2973 | |
| 2974 | slide_info_entry = vm_shared_region_get_slide_info_entry(sr); |
| 2975 | if (slide_info_entry == NULL){ |
| 2976 | error = KERN_FAILURE; |
| 2977 | } else { |
| 2978 | error = copyin((user_addr_t)slide_start, |
| 2979 | slide_info_entry, |
| 2980 | (vm_size_t)slide_size); |
| 2981 | if (error) { |
| 2982 | error = KERN_INVALID_ADDRESS; |
| 2983 | } |
| 2984 | } |
| 2985 | if (error) { |
| 2986 | goto done; |
| 2987 | } |
| 2988 | |
| 2989 | if (vm_shared_region_slide_sanity_check(sr) != KERN_SUCCESS) { |
| 2990 | error = KERN_INVALID_ARGUMENT; |
| 2991 | printf("Sanity Check failed for slide_info\n" ); |
| 2992 | } else { |
| 2993 | #if DEBUG |
| 2994 | printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n" , |
| 2995 | (void*)(uintptr_t)entry_start_address, |
| 2996 | (unsigned long)entry_size, |
| 2997 | (unsigned long)slide_size); |
| 2998 | #endif |
| 2999 | } |
| 3000 | done: |
| 3001 | vm_shared_region_lock(); |
| 3002 | |
| 3003 | assert(sr->sr_slide_in_progress); |
| 3004 | assert(sr->sr_slid == FALSE); |
| 3005 | sr->sr_slide_in_progress = FALSE; |
| 3006 | thread_wakeup(&sr->sr_slide_in_progress); |
| 3007 | |
| 3008 | if (error == KERN_SUCCESS) { |
| 3009 | sr->sr_slid = TRUE; |
| 3010 | |
| 3011 | /* |
| 3012 | * We don't know how to tear down a slid shared region today, because |
| 3013 | * we would have to invalidate all the pages that have been slid |
| 3014 | * atomically with respect to anyone mapping the shared region afresh. |
| 3015 | * Therefore, take a dangling reference to prevent teardown. |
| 3016 | */ |
| 3017 | sr->sr_ref_count++; |
| 3018 | #ifndef CONFIG_EMBEDDED |
| 3019 | shared_region_completed_slide = TRUE; |
| 3020 | #endif |
| 3021 | } |
| 3022 | vm_shared_region_unlock(); |
| 3023 | |
| 3024 | vm_shared_region_deallocate(sr); |
| 3025 | |
| 3026 | SHARED_REGION_TRACE_DEBUG( |
| 3027 | ("vm_shared_region_slide: <- %d\n" , |
| 3028 | error)); |
| 3029 | |
| 3030 | return error; |
| 3031 | } |
| 3032 | |
| 3033 | /* |
| 3034 | * This is called from powermanagement code to let kernel know the current source of power. |
| 3035 | * 0 if it is external source (connected to power ) |
| 3036 | * 1 if it is internal power source ie battery |
| 3037 | */ |
| 3038 | void |
| 3039 | #if defined(__i386__) || defined(__x86_64__) |
| 3040 | post_sys_powersource(int i) |
| 3041 | #else |
| 3042 | post_sys_powersource(__unused int i) |
| 3043 | #endif |
| 3044 | { |
| 3045 | #if defined(__i386__) || defined(__x86_64__) |
| 3046 | post_sys_powersource_internal(i, 0); |
| 3047 | #endif /* __i386__ || __x86_64__ */ |
| 3048 | } |
| 3049 | |
| 3050 | |
| 3051 | #if defined(__i386__) || defined(__x86_64__) |
| 3052 | static void |
| 3053 | post_sys_powersource_internal(int i, int internal) |
| 3054 | { |
| 3055 | if (internal == 0) |
| 3056 | __system_power_source = i; |
| 3057 | |
| 3058 | if (__commpage_setup != 0) { |
| 3059 | if (__system_power_source != 0) |
| 3060 | commpage_set_spin_count(0); |
| 3061 | else |
| 3062 | commpage_set_spin_count(MP_SPIN_TRIES); |
| 3063 | } |
| 3064 | } |
| 3065 | #endif /* __i386__ || __x86_64__ */ |
| 3066 | |
| 3067 | |