| 1 | /* |
| 2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * Mach Operating System |
| 33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
| 34 | * All Rights Reserved. |
| 35 | * |
| 36 | * Permission to use, copy, modify and distribute this software and its |
| 37 | * documentation is hereby granted, provided that both the copyright |
| 38 | * notice and this permission notice appear in all copies of the |
| 39 | * software, derivative works or modified versions, and any portions |
| 40 | * thereof, and that both notices appear in supporting documentation. |
| 41 | * |
| 42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 45 | * |
| 46 | * Carnegie Mellon requests users of this software to return to |
| 47 | * |
| 48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 49 | * School of Computer Science |
| 50 | * Carnegie Mellon University |
| 51 | * Pittsburgh PA 15213-3890 |
| 52 | * |
| 53 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 54 | * the rights to redistribute these changes. |
| 55 | */ |
| 56 | /* |
| 57 | */ |
| 58 | |
| 59 | /* |
| 60 | * File: vm/vm_map.h |
| 61 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
| 62 | * Date: 1985 |
| 63 | * |
| 64 | * Virtual memory map module definitions. |
| 65 | * |
| 66 | * Contributors: |
| 67 | * avie, dlb, mwyoung |
| 68 | */ |
| 69 | |
| 70 | #ifndef _VM_VM_MAP_H_ |
| 71 | #define _VM_VM_MAP_H_ |
| 72 | |
| 73 | #include <mach/mach_types.h> |
| 74 | #include <mach/kern_return.h> |
| 75 | #include <mach/boolean.h> |
| 76 | #include <mach/vm_types.h> |
| 77 | #include <mach/vm_prot.h> |
| 78 | #include <mach/vm_inherit.h> |
| 79 | #include <mach/vm_behavior.h> |
| 80 | #include <mach/vm_param.h> |
| 81 | #include <vm/pmap.h> |
| 82 | |
| 83 | #ifdef KERNEL_PRIVATE |
| 84 | |
| 85 | #include <sys/cdefs.h> |
| 86 | |
| 87 | __BEGIN_DECLS |
| 88 | |
| 89 | extern void vm_map_reference(vm_map_t map); |
| 90 | extern vm_map_t current_map(void); |
| 91 | |
| 92 | /* Setup reserved areas in a new VM map */ |
| 93 | extern kern_return_t vm_map_exec( |
| 94 | vm_map_t new_map, |
| 95 | task_t task, |
| 96 | boolean_t is64bit, |
| 97 | void *fsroot, |
| 98 | cpu_type_t cpu, |
| 99 | cpu_subtype_t cpu_subtype); |
| 100 | |
| 101 | __END_DECLS |
| 102 | |
| 103 | #ifdef MACH_KERNEL_PRIVATE |
| 104 | |
| 105 | #include <task_swapper.h> |
| 106 | #include <mach_assert.h> |
| 107 | |
| 108 | #include <vm/vm_object.h> |
| 109 | #include <vm/vm_page.h> |
| 110 | #include <kern/locks.h> |
| 111 | #include <kern/zalloc.h> |
| 112 | #include <kern/macro_help.h> |
| 113 | |
| 114 | #include <kern/thread.h> |
| 115 | |
| 116 | #define current_map_fast() (current_thread()->map) |
| 117 | #define current_map() (current_map_fast()) |
| 118 | |
| 119 | #include <vm/vm_map_store.h> |
| 120 | |
| 121 | |
| 122 | /* |
| 123 | * Types defined: |
| 124 | * |
| 125 | * vm_map_t the high-level address map data structure. |
| 126 | * vm_map_entry_t an entry in an address map. |
| 127 | * vm_map_version_t a timestamp of a map, for use with vm_map_lookup |
| 128 | * vm_map_copy_t represents memory copied from an address map, |
| 129 | * used for inter-map copy operations |
| 130 | */ |
| 131 | typedef struct vm_map_entry *vm_map_entry_t; |
| 132 | #define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) |
| 133 | |
| 134 | |
| 135 | /* |
| 136 | * Type: vm_map_object_t [internal use only] |
| 137 | * |
| 138 | * Description: |
| 139 | * The target of an address mapping, either a virtual |
| 140 | * memory object or a sub map (of the kernel map). |
| 141 | */ |
| 142 | typedef union vm_map_object { |
| 143 | vm_object_t vmo_object; /* object object */ |
| 144 | vm_map_t vmo_submap; /* belongs to another map */ |
| 145 | } vm_map_object_t; |
| 146 | |
| 147 | #define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) |
| 148 | #define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) |
| 149 | #define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) |
| 150 | #define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) |
| 151 | #if VM_NAMED_ENTRY_LIST |
| 152 | extern queue_head_t vm_named_entry_list; |
| 153 | #endif /* VM_NAMED_ENTRY_LIST */ |
| 154 | |
| 155 | /* |
| 156 | * Type: vm_named_entry_t [internal use only] |
| 157 | * |
| 158 | * Description: |
| 159 | * Description of a mapping to a memory cache object. |
| 160 | * |
| 161 | * Implementation: |
| 162 | * While the handle to this object is used as a means to map |
| 163 | * and pass around the right to map regions backed by pagers |
| 164 | * of all sorts, the named_entry itself is only manipulated |
| 165 | * by the kernel. Named entries hold information on the |
| 166 | * right to map a region of a cached object. Namely, |
| 167 | * the target cache object, the beginning and ending of the |
| 168 | * region to be mapped, and the permissions, (read, write) |
| 169 | * with which it can be mapped. |
| 170 | * |
| 171 | */ |
| 172 | |
| 173 | struct vm_named_entry { |
| 174 | decl_lck_mtx_data(, Lock) /* Synchronization */ |
| 175 | union { |
| 176 | vm_object_t object; /* object I point to */ |
| 177 | vm_map_t map; /* map backing submap */ |
| 178 | vm_map_copy_t copy; /* a VM map copy */ |
| 179 | } backing; |
| 180 | vm_object_offset_t offset; /* offset into object */ |
| 181 | vm_object_size_t size; /* size of region */ |
| 182 | vm_object_offset_t data_offset; /* offset to first byte of data */ |
| 183 | vm_prot_t protection; /* access permissions */ |
| 184 | int ref_count; /* Number of references */ |
| 185 | unsigned int /* Is backing.xxx : */ |
| 186 | /* boolean_t */ internal:1, /* ... an internal object */ |
| 187 | /* boolean_t */ is_sub_map:1, /* ... a submap? */ |
| 188 | /* boolean_t */ is_copy:1; /* ... a VM map copy */ |
| 189 | #if VM_NAMED_ENTRY_LIST |
| 190 | queue_chain_t named_entry_list; |
| 191 | int named_entry_alias; |
| 192 | mach_port_t named_entry_port; |
| 193 | #define NAMED_ENTRY_BT_DEPTH 16 |
| 194 | void *named_entry_bt[NAMED_ENTRY_BT_DEPTH]; |
| 195 | #endif /* VM_NAMED_ENTRY_LIST */ |
| 196 | }; |
| 197 | |
| 198 | /* |
| 199 | * Type: vm_map_entry_t [internal use only] |
| 200 | * |
| 201 | * Description: |
| 202 | * A single mapping within an address map. |
| 203 | * |
| 204 | * Implementation: |
| 205 | * Address map entries consist of start and end addresses, |
| 206 | * a VM object (or sub map) and offset into that object, |
| 207 | * and user-exported inheritance and protection information. |
| 208 | * Control information for virtual copy operations is also |
| 209 | * stored in the address map entry. |
| 210 | */ |
| 211 | |
| 212 | struct vm_map_links { |
| 213 | struct vm_map_entry *prev; /* previous entry */ |
| 214 | struct vm_map_entry *next; /* next entry */ |
| 215 | vm_map_offset_t start; /* start address */ |
| 216 | vm_map_offset_t end; /* end address */ |
| 217 | }; |
| 218 | |
| 219 | /* |
| 220 | * IMPORTANT: |
| 221 | * The "alias" field can be updated while holding the VM map lock |
| 222 | * "shared". It's OK as along as it's the only field that can be |
| 223 | * updated without the VM map "exclusive" lock. |
| 224 | */ |
| 225 | #define VME_OBJECT(entry) ((entry)->vme_object.vmo_object) |
| 226 | #define VME_OBJECT_SET(entry, object) \ |
| 227 | MACRO_BEGIN \ |
| 228 | (entry)->vme_object.vmo_object = (object); \ |
| 229 | MACRO_END |
| 230 | #define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap) |
| 231 | #define VME_SUBMAP_SET(entry, submap) \ |
| 232 | MACRO_BEGIN \ |
| 233 | (entry)->vme_object.vmo_submap = (submap); \ |
| 234 | MACRO_END |
| 235 | #define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK) |
| 236 | #define VME_OFFSET_SET(entry, offset) \ |
| 237 | MACRO_BEGIN \ |
| 238 | int __alias; \ |
| 239 | __alias = VME_ALIAS((entry)); \ |
| 240 | assert((offset & PAGE_MASK) == 0); \ |
| 241 | (entry)->vme_offset = offset | __alias; \ |
| 242 | MACRO_END |
| 243 | #define VME_OBJECT_SHADOW(entry, length) \ |
| 244 | MACRO_BEGIN \ |
| 245 | vm_object_t __object; \ |
| 246 | vm_object_offset_t __offset; \ |
| 247 | __object = VME_OBJECT((entry)); \ |
| 248 | __offset = VME_OFFSET((entry)); \ |
| 249 | vm_object_shadow(&__object, &__offset, (length)); \ |
| 250 | if (__object != VME_OBJECT((entry))) { \ |
| 251 | VME_OBJECT_SET((entry), __object); \ |
| 252 | (entry)->use_pmap = TRUE; \ |
| 253 | } \ |
| 254 | if (__offset != VME_OFFSET((entry))) { \ |
| 255 | VME_OFFSET_SET((entry), __offset); \ |
| 256 | } \ |
| 257 | MACRO_END |
| 258 | |
| 259 | #define VME_ALIAS_MASK (PAGE_MASK) |
| 260 | #define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK)) |
| 261 | #define VME_ALIAS_SET(entry, alias) \ |
| 262 | MACRO_BEGIN \ |
| 263 | vm_map_offset_t __offset; \ |
| 264 | __offset = VME_OFFSET((entry)); \ |
| 265 | (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK); \ |
| 266 | MACRO_END |
| 267 | |
| 268 | /* |
| 269 | * FOOTPRINT ACCOUNTING: |
| 270 | * The "memory footprint" is better described in the pmap layer. |
| 271 | * |
| 272 | * At the VM level, these 2 vm_map_entry_t fields are relevant: |
| 273 | * iokit_mapped: |
| 274 | * For an "iokit_mapped" entry, we add the size of the entry to the |
| 275 | * footprint when the entry is entered into the map and we subtract that |
| 276 | * size when the entry is removed. No other accounting should take place. |
| 277 | * "use_pmap" should be FALSE but is not taken into account. |
| 278 | * use_pmap: (only when is_sub_map is FALSE) |
| 279 | * This indicates if we should ask the pmap layer to account for pages |
| 280 | * in this mapping. If FALSE, we expect that another form of accounting |
| 281 | * is being used (e.g. "iokit_mapped" or the explicit accounting of |
| 282 | * non-volatile purgable memory). |
| 283 | * |
| 284 | * So the logic is mostly: |
| 285 | * if entry->is_sub_map == TRUE |
| 286 | * anything in a submap does not count for the footprint |
| 287 | * else if entry->iokit_mapped == TRUE |
| 288 | * footprint includes the entire virtual size of this entry |
| 289 | * else if entry->use_pmap == FALSE |
| 290 | * tell pmap NOT to account for pages being pmap_enter()'d from this |
| 291 | * mapping (i.e. use "alternate accounting") |
| 292 | * else |
| 293 | * pmap will account for pages being pmap_enter()'d from this mapping |
| 294 | * as it sees fit (only if anonymous, etc...) |
| 295 | */ |
| 296 | |
| 297 | struct vm_map_entry { |
| 298 | struct vm_map_links links; /* links to other entries */ |
| 299 | #define vme_prev links.prev |
| 300 | #define vme_next links.next |
| 301 | #define vme_start links.start |
| 302 | #define vme_end links.end |
| 303 | |
| 304 | struct vm_map_store store; |
| 305 | union vm_map_object vme_object; /* object I point to */ |
| 306 | vm_object_offset_t vme_offset; /* offset into object */ |
| 307 | |
| 308 | unsigned int |
| 309 | /* boolean_t */ is_shared:1, /* region is shared */ |
| 310 | /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ |
| 311 | /* boolean_t */ in_transition:1, /* Entry being changed */ |
| 312 | /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ |
| 313 | /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ |
| 314 | /* behavior is not defined for submap type */ |
| 315 | /* boolean_t */ needs_copy:1, /* object need to be copied? */ |
| 316 | |
| 317 | /* Only in task maps: */ |
| 318 | /* vm_prot_t */ protection:3, /* protection code */ |
| 319 | /* vm_prot_t */ max_protection:3, /* maximum protection */ |
| 320 | /* vm_inherit_t */ inheritance:2, /* inheritance */ |
| 321 | /* boolean_t */ use_pmap:1, /* |
| 322 | * use_pmap is overloaded: |
| 323 | * if "is_sub_map": |
| 324 | * use a nested pmap? |
| 325 | * else (i.e. if object): |
| 326 | * use pmap accounting |
| 327 | * for footprint? |
| 328 | */ |
| 329 | /* boolean_t */ no_cache:1, /* should new pages be cached? */ |
| 330 | /* boolean_t */ permanent:1, /* mapping can not be removed */ |
| 331 | /* boolean_t */ superpage_size:1, /* use superpages of a certain size */ |
| 332 | /* boolean_t */ map_aligned:1, /* align to map's page size */ |
| 333 | /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of |
| 334 | * this entry it is being deleted |
| 335 | * without unwiring them */ |
| 336 | /* boolean_t */ used_for_jit:1, |
| 337 | /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */ |
| 338 | /* boolean_t */ from_reserved_zone:1, /* Allocated from |
| 339 | * kernel reserved zone */ |
| 340 | |
| 341 | /* iokit accounting: use the virtual size rather than resident size: */ |
| 342 | /* boolean_t */ iokit_acct:1, |
| 343 | /* boolean_t */ vme_resilient_codesign:1, |
| 344 | /* boolean_t */ vme_resilient_media:1, |
| 345 | /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */ |
| 346 | __unused:4; |
| 347 | ; |
| 348 | |
| 349 | unsigned short wired_count; /* can be paged if = 0 */ |
| 350 | unsigned short user_wired_count; /* for vm_wire */ |
| 351 | #if DEBUG |
| 352 | #define MAP_ENTRY_CREATION_DEBUG (1) |
| 353 | #define MAP_ENTRY_INSERTION_DEBUG (1) |
| 354 | #endif |
| 355 | #if MAP_ENTRY_CREATION_DEBUG |
| 356 | struct vm_map_header *vme_creation_maphdr; |
| 357 | uintptr_t vme_creation_bt[16]; |
| 358 | #endif |
| 359 | #if MAP_ENTRY_INSERTION_DEBUG |
| 360 | uintptr_t vme_insertion_bt[16]; |
| 361 | #endif |
| 362 | }; |
| 363 | |
| 364 | /* |
| 365 | * Convenience macros for dealing with superpages |
| 366 | * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h |
| 367 | */ |
| 368 | #define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES) |
| 369 | #define SUPERPAGE_MASK (-SUPERPAGE_SIZE) |
| 370 | #define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK) |
| 371 | #define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK) |
| 372 | |
| 373 | /* |
| 374 | * wired_counts are unsigned short. This value is used to safeguard |
| 375 | * against any mishaps due to runaway user programs. |
| 376 | */ |
| 377 | #define MAX_WIRE_COUNT 65535 |
| 378 | |
| 379 | |
| 380 | |
| 381 | /* |
| 382 | * Type: struct vm_map_header |
| 383 | * |
| 384 | * Description: |
| 385 | * Header for a vm_map and a vm_map_copy. |
| 386 | */ |
| 387 | |
| 388 | |
| 389 | struct vm_map_header { |
| 390 | struct vm_map_links links; /* first, last, min, max */ |
| 391 | int nentries; /* Number of entries */ |
| 392 | boolean_t entries_pageable; |
| 393 | /* are map entries pageable? */ |
| 394 | #ifdef VM_MAP_STORE_USE_RB |
| 395 | struct rb_head rb_head_store; |
| 396 | #endif |
| 397 | int page_shift; /* page shift */ |
| 398 | }; |
| 399 | |
| 400 | #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift) |
| 401 | #define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr))) |
| 402 | #define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1) |
| 403 | |
| 404 | /* |
| 405 | * Type: vm_map_t [exported; contents invisible] |
| 406 | * |
| 407 | * Description: |
| 408 | * An address map -- a directory relating valid |
| 409 | * regions of a task's address space to the corresponding |
| 410 | * virtual memory objects. |
| 411 | * |
| 412 | * Implementation: |
| 413 | * Maps are doubly-linked lists of map entries, sorted |
| 414 | * by address. One hint is used to start |
| 415 | * searches again from the last successful search, |
| 416 | * insertion, or removal. Another hint is used to |
| 417 | * quickly find free space. |
| 418 | */ |
| 419 | struct _vm_map { |
| 420 | lck_rw_t lock; /* map lock */ |
| 421 | struct vm_map_header hdr; /* Map entry header */ |
| 422 | #define min_offset hdr.links.start /* start of range */ |
| 423 | #define max_offset hdr.links.end /* end of range */ |
| 424 | pmap_t pmap; /* Physical map */ |
| 425 | vm_map_size_t size; /* virtual size */ |
| 426 | vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ |
| 427 | vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ |
| 428 | #if __x86_64__ |
| 429 | vm_map_offset_t vmmap_high_start; |
| 430 | #endif /* __x86_64__ */ |
| 431 | |
| 432 | union { |
| 433 | /* |
| 434 | * If map->disable_vmentry_reuse == TRUE: |
| 435 | * the end address of the highest allocated vm_map_entry_t. |
| 436 | */ |
| 437 | vm_map_offset_t vmu1_highest_entry_end; |
| 438 | /* |
| 439 | * For a nested VM map: |
| 440 | * the lowest address in this nested VM map that we would |
| 441 | * expect to be unnested under normal operation (i.e. for |
| 442 | * regular copy-on-write on DATA section). |
| 443 | */ |
| 444 | vm_map_offset_t vmu1_lowest_unnestable_start; |
| 445 | } vmu1; |
| 446 | #define highest_entry_end vmu1.vmu1_highest_entry_end |
| 447 | #define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start |
| 448 | decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */ |
| 449 | lck_mtx_ext_t s_lock_ext; |
| 450 | vm_map_entry_t hint; /* hint for quick lookups */ |
| 451 | union { |
| 452 | struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */ |
| 453 | struct vm_map_corpse_footprint_header *vmmap_corpse_footprint; |
| 454 | } vmmap_u_1; |
| 455 | #define hole_hint vmmap_u_1.vmmap_hole_hint |
| 456 | #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint |
| 457 | union{ |
| 458 | vm_map_entry_t _first_free; /* First free space hint */ |
| 459 | struct vm_map_links* _holes; /* links all holes between entries */ |
| 460 | } f_s; /* Union for free space data structures being used */ |
| 461 | |
| 462 | #define first_free f_s._first_free |
| 463 | #define holes_list f_s._holes |
| 464 | |
| 465 | int map_refcnt; /* Reference count */ |
| 466 | |
| 467 | #if TASK_SWAPPER |
| 468 | int res_count; /* Residence count (swap) */ |
| 469 | int sw_state; /* Swap state */ |
| 470 | #endif /* TASK_SWAPPER */ |
| 471 | |
| 472 | unsigned int |
| 473 | /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */ |
| 474 | /* boolean_t */ wiring_required:1, /* All memory wired? */ |
| 475 | /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */ |
| 476 | /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */ |
| 477 | /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */ |
| 478 | /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */ |
| 479 | /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */ |
| 480 | /* boolean_t */ holelistenabled:1, |
| 481 | /* boolean_t */ is_nested_map:1, |
| 482 | /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */ |
| 483 | /* boolean_t */ jit_entry_exists:1, |
| 484 | /* boolean_t */ has_corpse_footprint:1, |
| 485 | /* boolean_t */ warned_delete_gap:1, |
| 486 | /* reserved */ pad:19; |
| 487 | unsigned int timestamp; /* Version number */ |
| 488 | }; |
| 489 | |
| 490 | #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x)) |
| 491 | #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links) |
| 492 | #define vm_map_first_entry(map) ((map)->hdr.links.next) |
| 493 | #define vm_map_last_entry(map) ((map)->hdr.links.prev) |
| 494 | |
| 495 | #if TASK_SWAPPER |
| 496 | /* |
| 497 | * VM map swap states. There are no transition states. |
| 498 | */ |
| 499 | #define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ |
| 500 | #define MAP_SW_OUT 2 /* map is out (res_count == 0 */ |
| 501 | #endif /* TASK_SWAPPER */ |
| 502 | |
| 503 | /* |
| 504 | * Type: vm_map_version_t [exported; contents invisible] |
| 505 | * |
| 506 | * Description: |
| 507 | * Map versions may be used to quickly validate a previous |
| 508 | * lookup operation. |
| 509 | * |
| 510 | * Usage note: |
| 511 | * Because they are bulky objects, map versions are usually |
| 512 | * passed by reference. |
| 513 | * |
| 514 | * Implementation: |
| 515 | * Just a timestamp for the main map. |
| 516 | */ |
| 517 | typedef struct vm_map_version { |
| 518 | unsigned int main_timestamp; |
| 519 | } vm_map_version_t; |
| 520 | |
| 521 | /* |
| 522 | * Type: vm_map_copy_t [exported; contents invisible] |
| 523 | * |
| 524 | * Description: |
| 525 | * A map copy object represents a region of virtual memory |
| 526 | * that has been copied from an address map but is still |
| 527 | * in transit. |
| 528 | * |
| 529 | * A map copy object may only be used by a single thread |
| 530 | * at a time. |
| 531 | * |
| 532 | * Implementation: |
| 533 | * There are three formats for map copy objects. |
| 534 | * The first is very similar to the main |
| 535 | * address map in structure, and as a result, some |
| 536 | * of the internal maintenance functions/macros can |
| 537 | * be used with either address maps or map copy objects. |
| 538 | * |
| 539 | * The map copy object contains a header links |
| 540 | * entry onto which the other entries that represent |
| 541 | * the region are chained. |
| 542 | * |
| 543 | * The second format is a single vm object. This was used |
| 544 | * primarily in the pageout path - but is not currently used |
| 545 | * except for placeholder copy objects (see vm_map_copy_copy()). |
| 546 | * |
| 547 | * The third format is a kernel buffer copy object - for data |
| 548 | * small enough that physical copies were the most efficient |
| 549 | * method. This method uses a zero-sized array unioned with |
| 550 | * other format-specific data in the 'c_u' member. This unsized |
| 551 | * array overlaps the other elements and allows us to use this |
| 552 | * extra structure space for physical memory copies. On 64-bit |
| 553 | * systems this saves ~64 bytes per vm_map_copy. |
| 554 | */ |
| 555 | |
| 556 | struct vm_map_copy { |
| 557 | int type; |
| 558 | #define VM_MAP_COPY_ENTRY_LIST 1 |
| 559 | #define VM_MAP_COPY_OBJECT 2 |
| 560 | #define VM_MAP_COPY_KERNEL_BUFFER 3 |
| 561 | vm_object_offset_t offset; |
| 562 | vm_map_size_t size; |
| 563 | union { |
| 564 | struct vm_map_header hdr; /* ENTRY_LIST */ |
| 565 | vm_object_t object; /* OBJECT */ |
| 566 | uint8_t kdata[0]; /* KERNEL_BUFFER */ |
| 567 | } c_u; |
| 568 | }; |
| 569 | |
| 570 | |
| 571 | #define cpy_hdr c_u.hdr |
| 572 | |
| 573 | #define cpy_object c_u.object |
| 574 | #define cpy_kdata c_u.kdata |
| 575 | #define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata)) |
| 576 | |
| 577 | #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) |
| 578 | #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) |
| 579 | #define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1) |
| 580 | |
| 581 | /* |
| 582 | * Useful macros for entry list copy objects |
| 583 | */ |
| 584 | |
| 585 | #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links) |
| 586 | #define vm_map_copy_first_entry(copy) \ |
| 587 | ((copy)->cpy_hdr.links.next) |
| 588 | #define vm_map_copy_last_entry(copy) \ |
| 589 | ((copy)->cpy_hdr.links.prev) |
| 590 | |
| 591 | /* |
| 592 | * Macros: vm_map_lock, etc. [internal use only] |
| 593 | * Description: |
| 594 | * Perform locking on the data portion of a map. |
| 595 | * When multiple maps are to be locked, order by map address. |
| 596 | * (See vm_map.c::vm_remap()) |
| 597 | */ |
| 598 | |
| 599 | #define vm_map_lock_init(map) \ |
| 600 | ((map)->timestamp = 0 , \ |
| 601 | lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr)) |
| 602 | |
| 603 | #define vm_map_lock(map) lck_rw_lock_exclusive(&(map)->lock) |
| 604 | #define vm_map_unlock(map) \ |
| 605 | ((map)->timestamp++ , lck_rw_done(&(map)->lock)) |
| 606 | #define vm_map_lock_read(map) lck_rw_lock_shared(&(map)->lock) |
| 607 | #define vm_map_unlock_read(map) lck_rw_done(&(map)->lock) |
| 608 | #define vm_map_lock_write_to_read(map) \ |
| 609 | ((map)->timestamp++ , lck_rw_lock_exclusive_to_shared(&(map)->lock)) |
| 610 | /* lock_read_to_write() returns FALSE on failure. Macro evaluates to |
| 611 | * zero on success and non-zero value on failure. |
| 612 | */ |
| 613 | #define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE) |
| 614 | |
| 615 | #define vm_map_try_lock(map) lck_rw_try_lock_exclusive(&(map)->lock) |
| 616 | #define vm_map_try_lock_read(map) lck_rw_try_lock_shared(&(map)->lock) |
| 617 | |
| 618 | #if MACH_ASSERT || DEBUG |
| 619 | #define vm_map_lock_assert_held(map) \ |
| 620 | lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD) |
| 621 | #define vm_map_lock_assert_shared(map) \ |
| 622 | lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED) |
| 623 | #define vm_map_lock_assert_exclusive(map) \ |
| 624 | lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE) |
| 625 | #define vm_map_lock_assert_notheld(map) \ |
| 626 | lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD) |
| 627 | #else /* MACH_ASSERT || DEBUG */ |
| 628 | #define vm_map_lock_assert_held(map) |
| 629 | #define vm_map_lock_assert_shared(map) |
| 630 | #define vm_map_lock_assert_exclusive(map) |
| 631 | #define vm_map_lock_assert_notheld(map) |
| 632 | #endif /* MACH_ASSERT || DEBUG */ |
| 633 | |
| 634 | /* |
| 635 | * Exported procedures that operate on vm_map_t. |
| 636 | */ |
| 637 | |
| 638 | /* Initialize the module */ |
| 639 | extern void vm_map_init(void); |
| 640 | |
| 641 | extern void vm_kernel_reserved_entry_init(void); |
| 642 | |
| 643 | /* Allocate a range in the specified virtual address map and |
| 644 | * return the entry allocated for that range. */ |
| 645 | extern kern_return_t vm_map_find_space( |
| 646 | vm_map_t map, |
| 647 | vm_map_address_t *address, /* OUT */ |
| 648 | vm_map_size_t size, |
| 649 | vm_map_offset_t mask, |
| 650 | int flags, |
| 651 | vm_map_kernel_flags_t vmk_flags, |
| 652 | vm_tag_t tag, |
| 653 | vm_map_entry_t *o_entry); /* OUT */ |
| 654 | |
| 655 | extern void vm_map_clip_start( |
| 656 | vm_map_t map, |
| 657 | vm_map_entry_t entry, |
| 658 | vm_map_offset_t endaddr); |
| 659 | extern void vm_map_clip_end( |
| 660 | vm_map_t map, |
| 661 | vm_map_entry_t entry, |
| 662 | vm_map_offset_t endaddr); |
| 663 | extern boolean_t vm_map_entry_should_cow_for_true_share( |
| 664 | vm_map_entry_t entry); |
| 665 | |
| 666 | /* Lookup map entry containing or the specified address in the given map */ |
| 667 | extern boolean_t vm_map_lookup_entry( |
| 668 | vm_map_t map, |
| 669 | vm_map_address_t address, |
| 670 | vm_map_entry_t *entry); /* OUT */ |
| 671 | |
| 672 | extern void vm_map_copy_remap( |
| 673 | vm_map_t map, |
| 674 | vm_map_entry_t where, |
| 675 | vm_map_copy_t copy, |
| 676 | vm_map_offset_t adjustment, |
| 677 | vm_prot_t cur_prot, |
| 678 | vm_prot_t max_prot, |
| 679 | vm_inherit_t inheritance); |
| 680 | |
| 681 | /* Find the VM object, offset, and protection for a given virtual address |
| 682 | * in the specified map, assuming a page fault of the type specified. */ |
| 683 | extern kern_return_t vm_map_lookup_locked( |
| 684 | vm_map_t *var_map, /* IN/OUT */ |
| 685 | vm_map_address_t vaddr, |
| 686 | vm_prot_t fault_type, |
| 687 | int object_lock_type, |
| 688 | vm_map_version_t *out_version, /* OUT */ |
| 689 | vm_object_t *object, /* OUT */ |
| 690 | vm_object_offset_t *offset, /* OUT */ |
| 691 | vm_prot_t *out_prot, /* OUT */ |
| 692 | boolean_t *wired, /* OUT */ |
| 693 | vm_object_fault_info_t fault_info, /* OUT */ |
| 694 | vm_map_t *real_map); /* OUT */ |
| 695 | |
| 696 | /* Verifies that the map has not changed since the given version. */ |
| 697 | extern boolean_t vm_map_verify( |
| 698 | vm_map_t map, |
| 699 | vm_map_version_t *version); /* REF */ |
| 700 | |
| 701 | extern vm_map_entry_t vm_map_entry_insert( |
| 702 | vm_map_t map, |
| 703 | vm_map_entry_t insp_entry, |
| 704 | vm_map_offset_t start, |
| 705 | vm_map_offset_t end, |
| 706 | vm_object_t object, |
| 707 | vm_object_offset_t offset, |
| 708 | boolean_t needs_copy, |
| 709 | boolean_t is_shared, |
| 710 | boolean_t in_transition, |
| 711 | vm_prot_t cur_protection, |
| 712 | vm_prot_t max_protection, |
| 713 | vm_behavior_t behavior, |
| 714 | vm_inherit_t inheritance, |
| 715 | unsigned wired_count, |
| 716 | boolean_t no_cache, |
| 717 | boolean_t permanent, |
| 718 | unsigned int superpage_size, |
| 719 | boolean_t clear_map_aligned, |
| 720 | boolean_t is_submap, |
| 721 | boolean_t used_for_jit, |
| 722 | int alias); |
| 723 | |
| 724 | |
| 725 | /* |
| 726 | * Functions implemented as macros |
| 727 | */ |
| 728 | #define vm_map_min(map) ((map)->min_offset) |
| 729 | /* Lowest valid address in |
| 730 | * a map */ |
| 731 | |
| 732 | #define vm_map_max(map) ((map)->max_offset) |
| 733 | /* Highest valid address */ |
| 734 | |
| 735 | #define vm_map_pmap(map) ((map)->pmap) |
| 736 | /* Physical map associated |
| 737 | * with this address map */ |
| 738 | |
| 739 | /* |
| 740 | * Macros/functions for map residence counts and swapin/out of vm maps |
| 741 | */ |
| 742 | #if TASK_SWAPPER |
| 743 | |
| 744 | #if MACH_ASSERT |
| 745 | /* Gain a reference to an existing map */ |
| 746 | extern void vm_map_reference( |
| 747 | vm_map_t map); |
| 748 | /* Lose a residence count */ |
| 749 | extern void vm_map_res_deallocate( |
| 750 | vm_map_t map); |
| 751 | /* Gain a residence count on a map */ |
| 752 | extern void vm_map_res_reference( |
| 753 | vm_map_t map); |
| 754 | /* Gain reference & residence counts to possibly swapped-out map */ |
| 755 | extern void vm_map_reference_swap( |
| 756 | vm_map_t map); |
| 757 | |
| 758 | #else /* MACH_ASSERT */ |
| 759 | |
| 760 | #define vm_map_reference(map) \ |
| 761 | MACRO_BEGIN \ |
| 762 | vm_map_t Map = (map); \ |
| 763 | if (Map) { \ |
| 764 | lck_mtx_lock(&Map->s_lock); \ |
| 765 | Map->res_count++; \ |
| 766 | Map->map_refcnt++; \ |
| 767 | lck_mtx_unlock(&Map->s_lock); \ |
| 768 | } \ |
| 769 | MACRO_END |
| 770 | |
| 771 | #define vm_map_res_reference(map) \ |
| 772 | MACRO_BEGIN \ |
| 773 | vm_map_t Lmap = (map); \ |
| 774 | if (Lmap->res_count == 0) { \ |
| 775 | lck_mtx_unlock(&Lmap->s_lock);\ |
| 776 | vm_map_lock(Lmap); \ |
| 777 | vm_map_swapin(Lmap); \ |
| 778 | lck_mtx_lock(&Lmap->s_lock); \ |
| 779 | ++Lmap->res_count; \ |
| 780 | vm_map_unlock(Lmap); \ |
| 781 | } else \ |
| 782 | ++Lmap->res_count; \ |
| 783 | MACRO_END |
| 784 | |
| 785 | #define vm_map_res_deallocate(map) \ |
| 786 | MACRO_BEGIN \ |
| 787 | vm_map_t Map = (map); \ |
| 788 | if (--Map->res_count == 0) { \ |
| 789 | lck_mtx_unlock(&Map->s_lock); \ |
| 790 | vm_map_lock(Map); \ |
| 791 | vm_map_swapout(Map); \ |
| 792 | vm_map_unlock(Map); \ |
| 793 | lck_mtx_lock(&Map->s_lock); \ |
| 794 | } \ |
| 795 | MACRO_END |
| 796 | |
| 797 | #define vm_map_reference_swap(map) \ |
| 798 | MACRO_BEGIN \ |
| 799 | vm_map_t Map = (map); \ |
| 800 | lck_mtx_lock(&Map->s_lock); \ |
| 801 | ++Map->map_refcnt; \ |
| 802 | vm_map_res_reference(Map); \ |
| 803 | lck_mtx_unlock(&Map->s_lock); \ |
| 804 | MACRO_END |
| 805 | #endif /* MACH_ASSERT */ |
| 806 | |
| 807 | extern void vm_map_swapin( |
| 808 | vm_map_t map); |
| 809 | |
| 810 | extern void vm_map_swapout( |
| 811 | vm_map_t map); |
| 812 | |
| 813 | #else /* TASK_SWAPPER */ |
| 814 | |
| 815 | #define vm_map_reference(map) \ |
| 816 | MACRO_BEGIN \ |
| 817 | vm_map_t Map = (map); \ |
| 818 | if (Map) { \ |
| 819 | lck_mtx_lock(&Map->s_lock); \ |
| 820 | Map->map_refcnt++; \ |
| 821 | lck_mtx_unlock(&Map->s_lock); \ |
| 822 | } \ |
| 823 | MACRO_END |
| 824 | |
| 825 | #define vm_map_reference_swap(map) vm_map_reference(map) |
| 826 | #define vm_map_res_reference(map) |
| 827 | #define vm_map_res_deallocate(map) |
| 828 | |
| 829 | #endif /* TASK_SWAPPER */ |
| 830 | |
| 831 | /* |
| 832 | * Submap object. Must be used to create memory to be put |
| 833 | * in a submap by vm_map_submap. |
| 834 | */ |
| 835 | extern vm_object_t vm_submap_object; |
| 836 | |
| 837 | /* |
| 838 | * Wait and wakeup macros for in_transition map entries. |
| 839 | */ |
| 840 | #define vm_map_entry_wait(map, interruptible) \ |
| 841 | ((map)->timestamp++ , \ |
| 842 | lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \ |
| 843 | (event_t)&(map)->hdr, interruptible)) |
| 844 | |
| 845 | |
| 846 | #define vm_map_entry_wakeup(map) \ |
| 847 | thread_wakeup((event_t)(&(map)->hdr)) |
| 848 | |
| 849 | |
| 850 | #define vm_map_ref_fast(map) \ |
| 851 | MACRO_BEGIN \ |
| 852 | lck_mtx_lock(&map->s_lock); \ |
| 853 | map->ref_count++; \ |
| 854 | vm_map_res_reference(map); \ |
| 855 | lck_mtx_unlock(&map->s_lock); \ |
| 856 | MACRO_END |
| 857 | |
| 858 | #define vm_map_dealloc_fast(map) \ |
| 859 | MACRO_BEGIN \ |
| 860 | int c; \ |
| 861 | \ |
| 862 | lck_mtx_lock(&map->s_lock); \ |
| 863 | c = --map->ref_count; \ |
| 864 | if (c > 0) \ |
| 865 | vm_map_res_deallocate(map); \ |
| 866 | lck_mtx_unlock(&map->s_lock); \ |
| 867 | if (c == 0) \ |
| 868 | vm_map_destroy(map); \ |
| 869 | MACRO_END |
| 870 | |
| 871 | |
| 872 | /* simplify map entries */ |
| 873 | extern void vm_map_simplify_entry( |
| 874 | vm_map_t map, |
| 875 | vm_map_entry_t this_entry); |
| 876 | extern void vm_map_simplify( |
| 877 | vm_map_t map, |
| 878 | vm_map_offset_t start); |
| 879 | |
| 880 | /* Move the information in a map copy object to a new map copy object */ |
| 881 | extern vm_map_copy_t vm_map_copy_copy( |
| 882 | vm_map_copy_t copy); |
| 883 | |
| 884 | /* Create a copy object from an object. */ |
| 885 | extern kern_return_t vm_map_copyin_object( |
| 886 | vm_object_t object, |
| 887 | vm_object_offset_t offset, |
| 888 | vm_object_size_t size, |
| 889 | vm_map_copy_t *copy_result); /* OUT */ |
| 890 | |
| 891 | extern kern_return_t vm_map_random_address_for_size( |
| 892 | vm_map_t map, |
| 893 | vm_map_offset_t *address, |
| 894 | vm_map_size_t size); |
| 895 | |
| 896 | /* Enter a mapping */ |
| 897 | extern kern_return_t vm_map_enter( |
| 898 | vm_map_t map, |
| 899 | vm_map_offset_t *address, |
| 900 | vm_map_size_t size, |
| 901 | vm_map_offset_t mask, |
| 902 | int flags, |
| 903 | vm_map_kernel_flags_t vmk_flags, |
| 904 | vm_tag_t tag, |
| 905 | vm_object_t object, |
| 906 | vm_object_offset_t offset, |
| 907 | boolean_t needs_copy, |
| 908 | vm_prot_t cur_protection, |
| 909 | vm_prot_t max_protection, |
| 910 | vm_inherit_t inheritance); |
| 911 | |
| 912 | #if __arm64__ |
| 913 | extern kern_return_t vm_map_enter_fourk( |
| 914 | vm_map_t map, |
| 915 | vm_map_offset_t *address, |
| 916 | vm_map_size_t size, |
| 917 | vm_map_offset_t mask, |
| 918 | int flags, |
| 919 | vm_map_kernel_flags_t vmk_flags, |
| 920 | vm_tag_t tag, |
| 921 | vm_object_t object, |
| 922 | vm_object_offset_t offset, |
| 923 | boolean_t needs_copy, |
| 924 | vm_prot_t cur_protection, |
| 925 | vm_prot_t max_protection, |
| 926 | vm_inherit_t inheritance); |
| 927 | #endif /* __arm64__ */ |
| 928 | |
| 929 | /* XXX should go away - replaced with regular enter of contig object */ |
| 930 | extern kern_return_t vm_map_enter_cpm( |
| 931 | vm_map_t map, |
| 932 | vm_map_address_t *addr, |
| 933 | vm_map_size_t size, |
| 934 | int flags); |
| 935 | |
| 936 | extern kern_return_t vm_map_remap( |
| 937 | vm_map_t target_map, |
| 938 | vm_map_offset_t *address, |
| 939 | vm_map_size_t size, |
| 940 | vm_map_offset_t mask, |
| 941 | int flags, |
| 942 | vm_map_kernel_flags_t vmk_flags, |
| 943 | vm_tag_t tag, |
| 944 | vm_map_t src_map, |
| 945 | vm_map_offset_t memory_address, |
| 946 | boolean_t copy, |
| 947 | vm_prot_t *cur_protection, |
| 948 | vm_prot_t *max_protection, |
| 949 | vm_inherit_t inheritance); |
| 950 | |
| 951 | |
| 952 | /* |
| 953 | * Read and write from a kernel buffer to a specified map. |
| 954 | */ |
| 955 | extern kern_return_t vm_map_write_user( |
| 956 | vm_map_t map, |
| 957 | void *src_p, |
| 958 | vm_map_offset_t dst_addr, |
| 959 | vm_size_t size); |
| 960 | |
| 961 | extern kern_return_t vm_map_read_user( |
| 962 | vm_map_t map, |
| 963 | vm_map_offset_t src_addr, |
| 964 | void *dst_p, |
| 965 | vm_size_t size); |
| 966 | |
| 967 | /* Create a new task map using an existing task map as a template. */ |
| 968 | extern vm_map_t vm_map_fork( |
| 969 | ledger_t ledger, |
| 970 | vm_map_t old_map, |
| 971 | int options); |
| 972 | #define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001 |
| 973 | #define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002 |
| 974 | #define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004 |
| 975 | |
| 976 | /* Change inheritance */ |
| 977 | extern kern_return_t vm_map_inherit( |
| 978 | vm_map_t map, |
| 979 | vm_map_offset_t start, |
| 980 | vm_map_offset_t end, |
| 981 | vm_inherit_t new_inheritance); |
| 982 | |
| 983 | /* Add or remove machine-dependent attributes from map regions */ |
| 984 | extern kern_return_t vm_map_machine_attribute( |
| 985 | vm_map_t map, |
| 986 | vm_map_offset_t start, |
| 987 | vm_map_offset_t end, |
| 988 | vm_machine_attribute_t attribute, |
| 989 | vm_machine_attribute_val_t* value); /* IN/OUT */ |
| 990 | |
| 991 | extern kern_return_t vm_map_msync( |
| 992 | vm_map_t map, |
| 993 | vm_map_address_t address, |
| 994 | vm_map_size_t size, |
| 995 | vm_sync_t sync_flags); |
| 996 | |
| 997 | /* Set paging behavior */ |
| 998 | extern kern_return_t vm_map_behavior_set( |
| 999 | vm_map_t map, |
| 1000 | vm_map_offset_t start, |
| 1001 | vm_map_offset_t end, |
| 1002 | vm_behavior_t new_behavior); |
| 1003 | |
| 1004 | extern kern_return_t vm_map_region( |
| 1005 | vm_map_t map, |
| 1006 | vm_map_offset_t *address, |
| 1007 | vm_map_size_t *size, |
| 1008 | vm_region_flavor_t flavor, |
| 1009 | vm_region_info_t info, |
| 1010 | mach_msg_type_number_t *count, |
| 1011 | mach_port_t *object_name); |
| 1012 | |
| 1013 | extern kern_return_t vm_map_region_recurse_64( |
| 1014 | vm_map_t map, |
| 1015 | vm_map_offset_t *address, |
| 1016 | vm_map_size_t *size, |
| 1017 | natural_t *nesting_depth, |
| 1018 | vm_region_submap_info_64_t info, |
| 1019 | mach_msg_type_number_t *count); |
| 1020 | |
| 1021 | extern kern_return_t vm_map_page_query_internal( |
| 1022 | vm_map_t map, |
| 1023 | vm_map_offset_t offset, |
| 1024 | int *disposition, |
| 1025 | int *ref_count); |
| 1026 | |
| 1027 | extern kern_return_t vm_map_query_volatile( |
| 1028 | vm_map_t map, |
| 1029 | mach_vm_size_t *volatile_virtual_size_p, |
| 1030 | mach_vm_size_t *volatile_resident_size_p, |
| 1031 | mach_vm_size_t *volatile_compressed_size_p, |
| 1032 | mach_vm_size_t *volatile_pmap_size_p, |
| 1033 | mach_vm_size_t *volatile_compressed_pmap_size_p); |
| 1034 | |
| 1035 | extern kern_return_t vm_map_submap( |
| 1036 | vm_map_t map, |
| 1037 | vm_map_offset_t start, |
| 1038 | vm_map_offset_t end, |
| 1039 | vm_map_t submap, |
| 1040 | vm_map_offset_t offset, |
| 1041 | boolean_t use_pmap); |
| 1042 | |
| 1043 | extern void vm_map_submap_pmap_clean( |
| 1044 | vm_map_t map, |
| 1045 | vm_map_offset_t start, |
| 1046 | vm_map_offset_t end, |
| 1047 | vm_map_t sub_map, |
| 1048 | vm_map_offset_t offset); |
| 1049 | |
| 1050 | /* Convert from a map entry port to a map */ |
| 1051 | extern vm_map_t convert_port_entry_to_map( |
| 1052 | ipc_port_t port); |
| 1053 | |
| 1054 | /* Convert from a port to a vm_object */ |
| 1055 | extern vm_object_t convert_port_entry_to_object( |
| 1056 | ipc_port_t port); |
| 1057 | |
| 1058 | |
| 1059 | extern kern_return_t vm_map_set_cache_attr( |
| 1060 | vm_map_t map, |
| 1061 | vm_map_offset_t va); |
| 1062 | |
| 1063 | |
| 1064 | /* definitions related to overriding the NX behavior */ |
| 1065 | |
| 1066 | #define VM_ABI_32 0x1 |
| 1067 | #define VM_ABI_64 0x2 |
| 1068 | |
| 1069 | extern int override_nx(vm_map_t map, uint32_t user_tag); |
| 1070 | |
| 1071 | #if PMAP_CS |
| 1072 | extern kern_return_t vm_map_entry_cs_associate( |
| 1073 | vm_map_t map, |
| 1074 | vm_map_entry_t entry, |
| 1075 | vm_map_kernel_flags_t vmk_flags); |
| 1076 | #endif /* PMAP_CS */ |
| 1077 | |
| 1078 | extern void vm_map_region_top_walk( |
| 1079 | vm_map_entry_t entry, |
| 1080 | vm_region_top_info_t top); |
| 1081 | extern void vm_map_region_walk( |
| 1082 | vm_map_t map, |
| 1083 | vm_map_offset_t va, |
| 1084 | vm_map_entry_t entry, |
| 1085 | vm_object_offset_t offset, |
| 1086 | vm_object_size_t range, |
| 1087 | vm_region_extended_info_t extended, |
| 1088 | boolean_t look_for_pages, |
| 1089 | mach_msg_type_number_t count); |
| 1090 | |
| 1091 | |
| 1092 | struct vm_map_corpse_footprint_header { |
| 1093 | vm_size_t cf_size; /* allocated buffer size */ |
| 1094 | uint32_t cf_last_region; /* offset of last region in buffer */ |
| 1095 | union { |
| 1096 | uint32_t cfu_last_zeroes; /* during creation: |
| 1097 | * number of "zero" dispositions at |
| 1098 | * end of last region */ |
| 1099 | uint32_t cfu_hint_region; /* during lookup: |
| 1100 | * offset of last looked up region */ |
| 1101 | #define cf_last_zeroes cfu.cfu_last_zeroes |
| 1102 | #define cf_hint_region cfu.cfu_hint_region |
| 1103 | } cfu; |
| 1104 | }; |
| 1105 | struct vm_map_corpse_footprint_region { |
| 1106 | vm_map_offset_t cfr_vaddr; /* region start virtual address */ |
| 1107 | uint32_t cfr_num_pages; /* number of pages in this "region" */ |
| 1108 | unsigned char cfr_disposition[0]; /* disposition of each page */ |
| 1109 | } __attribute__((packed)); |
| 1110 | |
| 1111 | extern kern_return_t vm_map_corpse_footprint_collect( |
| 1112 | vm_map_t old_map, |
| 1113 | vm_map_entry_t old_entry, |
| 1114 | vm_map_t new_map); |
| 1115 | extern void vm_map_corpse_footprint_collect_done( |
| 1116 | vm_map_t new_map); |
| 1117 | |
| 1118 | extern kern_return_t vm_map_corpse_footprint_query_page_info( |
| 1119 | vm_map_t map, |
| 1120 | vm_map_offset_t va, |
| 1121 | int *disp); |
| 1122 | |
| 1123 | extern void vm_map_copy_footprint_ledgers( |
| 1124 | task_t old_task, |
| 1125 | task_t new_task); |
| 1126 | extern void vm_map_copy_ledger( |
| 1127 | task_t old_task, |
| 1128 | task_t new_task, |
| 1129 | int ledger_entry); |
| 1130 | |
| 1131 | #endif /* MACH_KERNEL_PRIVATE */ |
| 1132 | |
| 1133 | __BEGIN_DECLS |
| 1134 | |
| 1135 | /* Create an empty map */ |
| 1136 | extern vm_map_t vm_map_create( |
| 1137 | pmap_t pmap, |
| 1138 | vm_map_offset_t min_off, |
| 1139 | vm_map_offset_t max_off, |
| 1140 | boolean_t pageable); |
| 1141 | extern vm_map_t vm_map_create_options( |
| 1142 | pmap_t pmap, |
| 1143 | vm_map_offset_t min_off, |
| 1144 | vm_map_offset_t max_off, |
| 1145 | int options); |
| 1146 | #define VM_MAP_CREATE_PAGEABLE 0x00000001 |
| 1147 | #define 0x00000002 |
| 1148 | #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \ |
| 1149 | VM_MAP_CREATE_CORPSE_FOOTPRINT) |
| 1150 | |
| 1151 | extern void vm_map_disable_hole_optimization(vm_map_t map); |
| 1152 | |
| 1153 | /* Get rid of a map */ |
| 1154 | extern void vm_map_destroy( |
| 1155 | vm_map_t map, |
| 1156 | int flags); |
| 1157 | |
| 1158 | /* Lose a reference */ |
| 1159 | extern void vm_map_deallocate( |
| 1160 | vm_map_t map); |
| 1161 | |
| 1162 | extern vm_map_t vm_map_switch( |
| 1163 | vm_map_t map); |
| 1164 | |
| 1165 | /* Change protection */ |
| 1166 | extern kern_return_t vm_map_protect( |
| 1167 | vm_map_t map, |
| 1168 | vm_map_offset_t start, |
| 1169 | vm_map_offset_t end, |
| 1170 | vm_prot_t new_prot, |
| 1171 | boolean_t set_max); |
| 1172 | |
| 1173 | /* Check protection */ |
| 1174 | extern boolean_t vm_map_check_protection( |
| 1175 | vm_map_t map, |
| 1176 | vm_map_offset_t start, |
| 1177 | vm_map_offset_t end, |
| 1178 | vm_prot_t protection); |
| 1179 | |
| 1180 | /* wire down a region */ |
| 1181 | |
| 1182 | #ifdef XNU_KERNEL_PRIVATE |
| 1183 | |
| 1184 | extern kern_return_t vm_map_wire_kernel( |
| 1185 | vm_map_t map, |
| 1186 | vm_map_offset_t start, |
| 1187 | vm_map_offset_t end, |
| 1188 | vm_prot_t access_type, |
| 1189 | vm_tag_t tag, |
| 1190 | boolean_t user_wire); |
| 1191 | |
| 1192 | extern kern_return_t vm_map_wire_and_extract_kernel( |
| 1193 | vm_map_t map, |
| 1194 | vm_map_offset_t start, |
| 1195 | vm_prot_t access_type, |
| 1196 | vm_tag_t tag, |
| 1197 | boolean_t user_wire, |
| 1198 | ppnum_t *physpage_p); |
| 1199 | |
| 1200 | /* kext exported versions */ |
| 1201 | |
| 1202 | extern kern_return_t vm_map_wire_external( |
| 1203 | vm_map_t map, |
| 1204 | vm_map_offset_t start, |
| 1205 | vm_map_offset_t end, |
| 1206 | vm_prot_t access_type, |
| 1207 | boolean_t user_wire); |
| 1208 | |
| 1209 | extern kern_return_t vm_map_wire_and_extract_external( |
| 1210 | vm_map_t map, |
| 1211 | vm_map_offset_t start, |
| 1212 | vm_prot_t access_type, |
| 1213 | boolean_t user_wire, |
| 1214 | ppnum_t *physpage_p); |
| 1215 | |
| 1216 | #else /* XNU_KERNEL_PRIVATE */ |
| 1217 | |
| 1218 | extern kern_return_t vm_map_wire( |
| 1219 | vm_map_t map, |
| 1220 | vm_map_offset_t start, |
| 1221 | vm_map_offset_t end, |
| 1222 | vm_prot_t access_type, |
| 1223 | boolean_t user_wire); |
| 1224 | |
| 1225 | extern kern_return_t vm_map_wire_and_extract( |
| 1226 | vm_map_t map, |
| 1227 | vm_map_offset_t start, |
| 1228 | vm_prot_t access_type, |
| 1229 | boolean_t user_wire, |
| 1230 | ppnum_t *physpage_p); |
| 1231 | |
| 1232 | #endif /* !XNU_KERNEL_PRIVATE */ |
| 1233 | |
| 1234 | /* unwire a region */ |
| 1235 | extern kern_return_t vm_map_unwire( |
| 1236 | vm_map_t map, |
| 1237 | vm_map_offset_t start, |
| 1238 | vm_map_offset_t end, |
| 1239 | boolean_t user_wire); |
| 1240 | |
| 1241 | #ifdef XNU_KERNEL_PRIVATE |
| 1242 | |
| 1243 | /* Enter a mapping of a memory object */ |
| 1244 | extern kern_return_t vm_map_enter_mem_object( |
| 1245 | vm_map_t map, |
| 1246 | vm_map_offset_t *address, |
| 1247 | vm_map_size_t size, |
| 1248 | vm_map_offset_t mask, |
| 1249 | int flags, |
| 1250 | vm_map_kernel_flags_t vmk_flags, |
| 1251 | vm_tag_t tag, |
| 1252 | ipc_port_t port, |
| 1253 | vm_object_offset_t offset, |
| 1254 | boolean_t needs_copy, |
| 1255 | vm_prot_t cur_protection, |
| 1256 | vm_prot_t max_protection, |
| 1257 | vm_inherit_t inheritance); |
| 1258 | |
| 1259 | /* Enter a mapping of a memory object */ |
| 1260 | extern kern_return_t vm_map_enter_mem_object_prefault( |
| 1261 | vm_map_t map, |
| 1262 | vm_map_offset_t *address, |
| 1263 | vm_map_size_t size, |
| 1264 | vm_map_offset_t mask, |
| 1265 | int flags, |
| 1266 | vm_map_kernel_flags_t vmk_flags, |
| 1267 | vm_tag_t tag, |
| 1268 | ipc_port_t port, |
| 1269 | vm_object_offset_t offset, |
| 1270 | vm_prot_t cur_protection, |
| 1271 | vm_prot_t max_protection, |
| 1272 | upl_page_list_ptr_t page_list, |
| 1273 | unsigned int page_list_count); |
| 1274 | |
| 1275 | /* Enter a mapping of a memory object */ |
| 1276 | extern kern_return_t vm_map_enter_mem_object_control( |
| 1277 | vm_map_t map, |
| 1278 | vm_map_offset_t *address, |
| 1279 | vm_map_size_t size, |
| 1280 | vm_map_offset_t mask, |
| 1281 | int flags, |
| 1282 | vm_map_kernel_flags_t vmk_flags, |
| 1283 | vm_tag_t tag, |
| 1284 | memory_object_control_t control, |
| 1285 | vm_object_offset_t offset, |
| 1286 | boolean_t needs_copy, |
| 1287 | vm_prot_t cur_protection, |
| 1288 | vm_prot_t max_protection, |
| 1289 | vm_inherit_t inheritance); |
| 1290 | |
| 1291 | #endif /* !XNU_KERNEL_PRIVATE */ |
| 1292 | |
| 1293 | /* Deallocate a region */ |
| 1294 | extern kern_return_t vm_map_remove( |
| 1295 | vm_map_t map, |
| 1296 | vm_map_offset_t start, |
| 1297 | vm_map_offset_t end, |
| 1298 | boolean_t flags); |
| 1299 | |
| 1300 | /* Deallocate a region when the map is already locked */ |
| 1301 | extern kern_return_t vm_map_remove_locked( |
| 1302 | vm_map_t map, |
| 1303 | vm_map_offset_t start, |
| 1304 | vm_map_offset_t end, |
| 1305 | boolean_t flags); |
| 1306 | |
| 1307 | /* Discard a copy without using it */ |
| 1308 | extern void vm_map_copy_discard( |
| 1309 | vm_map_copy_t copy); |
| 1310 | |
| 1311 | /* Overwrite existing memory with a copy */ |
| 1312 | extern kern_return_t vm_map_copy_overwrite( |
| 1313 | vm_map_t dst_map, |
| 1314 | vm_map_address_t dst_addr, |
| 1315 | vm_map_copy_t copy, |
| 1316 | boolean_t interruptible); |
| 1317 | |
| 1318 | /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */ |
| 1319 | extern boolean_t vm_map_copy_validate_size( |
| 1320 | vm_map_t dst_map, |
| 1321 | vm_map_copy_t copy, |
| 1322 | vm_map_size_t *size); |
| 1323 | |
| 1324 | /* Place a copy into a map */ |
| 1325 | extern kern_return_t vm_map_copyout( |
| 1326 | vm_map_t dst_map, |
| 1327 | vm_map_address_t *dst_addr, /* OUT */ |
| 1328 | vm_map_copy_t copy); |
| 1329 | |
| 1330 | extern kern_return_t vm_map_copyout_size( |
| 1331 | vm_map_t dst_map, |
| 1332 | vm_map_address_t *dst_addr, /* OUT */ |
| 1333 | vm_map_copy_t copy, |
| 1334 | vm_map_size_t copy_size); |
| 1335 | |
| 1336 | extern kern_return_t vm_map_copyout_internal( |
| 1337 | vm_map_t dst_map, |
| 1338 | vm_map_address_t *dst_addr, /* OUT */ |
| 1339 | vm_map_copy_t copy, |
| 1340 | vm_map_size_t copy_size, |
| 1341 | boolean_t consume_on_success, |
| 1342 | vm_prot_t cur_protection, |
| 1343 | vm_prot_t max_protection, |
| 1344 | vm_inherit_t inheritance); |
| 1345 | |
| 1346 | extern kern_return_t vm_map_copyin( |
| 1347 | vm_map_t src_map, |
| 1348 | vm_map_address_t src_addr, |
| 1349 | vm_map_size_t len, |
| 1350 | boolean_t src_destroy, |
| 1351 | vm_map_copy_t *copy_result); /* OUT */ |
| 1352 | |
| 1353 | extern kern_return_t vm_map_copyin_common( |
| 1354 | vm_map_t src_map, |
| 1355 | vm_map_address_t src_addr, |
| 1356 | vm_map_size_t len, |
| 1357 | boolean_t src_destroy, |
| 1358 | boolean_t src_volatile, |
| 1359 | vm_map_copy_t *copy_result, /* OUT */ |
| 1360 | boolean_t use_maxprot); |
| 1361 | |
| 1362 | #define VM_MAP_COPYIN_SRC_DESTROY 0x00000001 |
| 1363 | #define VM_MAP_COPYIN_USE_MAXPROT 0x00000002 |
| 1364 | #define VM_MAP_COPYIN_ENTRY_LIST 0x00000004 |
| 1365 | #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008 |
| 1366 | #define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F |
| 1367 | extern kern_return_t vm_map_copyin_internal( |
| 1368 | vm_map_t src_map, |
| 1369 | vm_map_address_t src_addr, |
| 1370 | vm_map_size_t len, |
| 1371 | int flags, |
| 1372 | vm_map_copy_t *copy_result); /* OUT */ |
| 1373 | |
| 1374 | extern kern_return_t ( |
| 1375 | vm_map_t src_map, |
| 1376 | vm_map_address_t src_addr, |
| 1377 | vm_map_size_t len, |
| 1378 | vm_map_copy_t *copy_result, /* OUT */ |
| 1379 | vm_prot_t *cur_prot, /* OUT */ |
| 1380 | vm_prot_t *max_prot); |
| 1381 | |
| 1382 | |
| 1383 | extern void vm_map_disable_NX( |
| 1384 | vm_map_t map); |
| 1385 | |
| 1386 | extern void vm_map_disallow_data_exec( |
| 1387 | vm_map_t map); |
| 1388 | |
| 1389 | extern void vm_map_set_64bit( |
| 1390 | vm_map_t map); |
| 1391 | |
| 1392 | extern void vm_map_set_32bit( |
| 1393 | vm_map_t map); |
| 1394 | |
| 1395 | extern void vm_map_set_jumbo( |
| 1396 | vm_map_t map); |
| 1397 | |
| 1398 | extern void vm_map_set_max_addr( |
| 1399 | vm_map_t map, vm_map_offset_t new_max_offset); |
| 1400 | |
| 1401 | extern boolean_t vm_map_has_hard_pagezero( |
| 1402 | vm_map_t map, |
| 1403 | vm_map_offset_t pagezero_size); |
| 1404 | extern void vm_commit_pagezero_status(vm_map_t tmap); |
| 1405 | |
| 1406 | #ifdef __arm__ |
| 1407 | static inline boolean_t vm_map_is_64bit(__unused vm_map_t map) { return 0; } |
| 1408 | #else |
| 1409 | extern boolean_t vm_map_is_64bit( |
| 1410 | vm_map_t map); |
| 1411 | #endif |
| 1412 | |
| 1413 | |
| 1414 | extern kern_return_t vm_map_raise_max_offset( |
| 1415 | vm_map_t map, |
| 1416 | vm_map_offset_t new_max_offset); |
| 1417 | |
| 1418 | extern kern_return_t vm_map_raise_min_offset( |
| 1419 | vm_map_t map, |
| 1420 | vm_map_offset_t new_min_offset); |
| 1421 | #if __x86_64__ |
| 1422 | extern void vm_map_set_high_start( |
| 1423 | vm_map_t map, |
| 1424 | vm_map_offset_t high_start); |
| 1425 | #endif /* __x86_64__ */ |
| 1426 | |
| 1427 | extern vm_map_offset_t vm_compute_max_offset( |
| 1428 | boolean_t is64); |
| 1429 | |
| 1430 | extern void vm_map_get_max_aslr_slide_section( |
| 1431 | vm_map_t map, |
| 1432 | int64_t *max_sections, |
| 1433 | int64_t *section_size); |
| 1434 | |
| 1435 | extern uint64_t vm_map_get_max_aslr_slide_pages( |
| 1436 | vm_map_t map); |
| 1437 | |
| 1438 | extern uint64_t vm_map_get_max_loader_aslr_slide_pages( |
| 1439 | vm_map_t map); |
| 1440 | |
| 1441 | extern void vm_map_set_user_wire_limit( |
| 1442 | vm_map_t map, |
| 1443 | vm_size_t limit); |
| 1444 | |
| 1445 | extern void vm_map_switch_protect( |
| 1446 | vm_map_t map, |
| 1447 | boolean_t val); |
| 1448 | |
| 1449 | extern void vm_map_iokit_mapped_region( |
| 1450 | vm_map_t map, |
| 1451 | vm_size_t bytes); |
| 1452 | |
| 1453 | extern void vm_map_iokit_unmapped_region( |
| 1454 | vm_map_t map, |
| 1455 | vm_size_t bytes); |
| 1456 | |
| 1457 | |
| 1458 | extern boolean_t first_free_is_valid(vm_map_t); |
| 1459 | |
| 1460 | extern int vm_map_page_shift( |
| 1461 | vm_map_t map); |
| 1462 | |
| 1463 | extern vm_map_offset_t vm_map_page_mask( |
| 1464 | vm_map_t map); |
| 1465 | |
| 1466 | extern int vm_map_page_size( |
| 1467 | vm_map_t map); |
| 1468 | |
| 1469 | extern vm_map_offset_t vm_map_round_page_mask( |
| 1470 | vm_map_offset_t offset, |
| 1471 | vm_map_offset_t mask); |
| 1472 | |
| 1473 | extern vm_map_offset_t vm_map_trunc_page_mask( |
| 1474 | vm_map_offset_t offset, |
| 1475 | vm_map_offset_t mask); |
| 1476 | |
| 1477 | extern boolean_t vm_map_page_aligned( |
| 1478 | vm_map_offset_t offset, |
| 1479 | vm_map_offset_t mask); |
| 1480 | |
| 1481 | #ifdef XNU_KERNEL_PRIVATE |
| 1482 | extern kern_return_t vm_map_page_info( |
| 1483 | vm_map_t map, |
| 1484 | vm_map_offset_t offset, |
| 1485 | vm_page_info_flavor_t flavor, |
| 1486 | vm_page_info_t info, |
| 1487 | mach_msg_type_number_t *count); |
| 1488 | extern kern_return_t vm_map_page_range_info_internal( |
| 1489 | vm_map_t map, |
| 1490 | vm_map_offset_t start_offset, |
| 1491 | vm_map_offset_t end_offset, |
| 1492 | vm_page_info_flavor_t flavor, |
| 1493 | vm_page_info_t info, |
| 1494 | mach_msg_type_number_t *count); |
| 1495 | #endif /* XNU_KERNEL_PRIVATE */ |
| 1496 | |
| 1497 | |
| 1498 | #ifdef MACH_KERNEL_PRIVATE |
| 1499 | |
| 1500 | /* |
| 1501 | * Macros to invoke vm_map_copyin_common. vm_map_copyin is the |
| 1502 | * usual form; it handles a copyin based on the current protection |
| 1503 | * (current protection == VM_PROT_NONE) is a failure. |
| 1504 | * vm_map_copyin_maxprot handles a copyin based on maximum possible |
| 1505 | * access. The difference is that a region with no current access |
| 1506 | * BUT possible maximum access is rejected by vm_map_copyin(), but |
| 1507 | * returned by vm_map_copyin_maxprot. |
| 1508 | */ |
| 1509 | #define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ |
| 1510 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ |
| 1511 | FALSE, copy_result, FALSE) |
| 1512 | |
| 1513 | #define vm_map_copyin_maxprot(src_map, \ |
| 1514 | src_addr, len, src_destroy, copy_result) \ |
| 1515 | vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ |
| 1516 | FALSE, copy_result, TRUE) |
| 1517 | |
| 1518 | |
| 1519 | /* |
| 1520 | * Internal macros for rounding and truncation of vm_map offsets and sizes |
| 1521 | */ |
| 1522 | #define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) |
| 1523 | #define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) |
| 1524 | |
| 1525 | /* |
| 1526 | * Macros for rounding and truncation of vm_map offsets and sizes |
| 1527 | */ |
| 1528 | #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT) |
| 1529 | #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map))) |
| 1530 | #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) |
| 1531 | #define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0) |
| 1532 | |
| 1533 | static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) |
| 1534 | { |
| 1535 | switch (prot) { |
| 1536 | case MAP_MEM_NOOP: break; |
| 1537 | case MAP_MEM_IO: *wimg = VM_WIMG_IO; break; |
| 1538 | case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break; |
| 1539 | case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break; |
| 1540 | case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break; |
| 1541 | case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break; |
| 1542 | case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break; |
| 1543 | default: |
| 1544 | panic("Unrecognized mapping type %u\n" , prot); |
| 1545 | } |
| 1546 | } |
| 1547 | |
| 1548 | #endif /* MACH_KERNEL_PRIVATE */ |
| 1549 | |
| 1550 | #ifdef XNU_KERNEL_PRIVATE |
| 1551 | extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift); |
| 1552 | #endif /* XNU_KERNEL_PRIVATE */ |
| 1553 | |
| 1554 | #define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) |
| 1555 | #define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) |
| 1556 | |
| 1557 | /* |
| 1558 | * Flags for vm_map_remove() and vm_map_delete() |
| 1559 | */ |
| 1560 | #define VM_MAP_REMOVE_NO_FLAGS 0x0 |
| 1561 | #define VM_MAP_REMOVE_KUNWIRE 0x1 |
| 1562 | #define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 |
| 1563 | #define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 |
| 1564 | #define VM_MAP_REMOVE_SAVE_ENTRIES 0x8 |
| 1565 | #define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10 |
| 1566 | #define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20 |
| 1567 | #define VM_MAP_REMOVE_NO_UNNESTING 0x40 |
| 1568 | #define VM_MAP_REMOVE_IMMUTABLE 0x80 |
| 1569 | #define VM_MAP_REMOVE_GAPS_OK 0x100 |
| 1570 | |
| 1571 | /* Support for UPLs from vm_maps */ |
| 1572 | |
| 1573 | #ifdef XNU_KERNEL_PRIVATE |
| 1574 | |
| 1575 | extern kern_return_t vm_map_get_upl( |
| 1576 | vm_map_t target_map, |
| 1577 | vm_map_offset_t map_offset, |
| 1578 | upl_size_t *size, |
| 1579 | upl_t *upl, |
| 1580 | upl_page_info_array_t page_info, |
| 1581 | unsigned int *page_infoCnt, |
| 1582 | upl_control_flags_t *flags, |
| 1583 | vm_tag_t tag, |
| 1584 | int force_data_sync); |
| 1585 | |
| 1586 | #endif /* XNU_KERNEL_PRIVATE */ |
| 1587 | |
| 1588 | extern void |
| 1589 | vm_map_sizes(vm_map_t map, |
| 1590 | vm_map_size_t * psize, |
| 1591 | vm_map_size_t * pfree, |
| 1592 | vm_map_size_t * plargest_free); |
| 1593 | |
| 1594 | #if CONFIG_DYNAMIC_CODE_SIGNING |
| 1595 | extern kern_return_t vm_map_sign(vm_map_t map, |
| 1596 | vm_map_offset_t start, |
| 1597 | vm_map_offset_t end); |
| 1598 | #endif |
| 1599 | |
| 1600 | extern kern_return_t vm_map_partial_reap( |
| 1601 | vm_map_t map, |
| 1602 | unsigned int *reclaimed_resident, |
| 1603 | unsigned int *reclaimed_compressed); |
| 1604 | |
| 1605 | |
| 1606 | #if DEVELOPMENT || DEBUG |
| 1607 | |
| 1608 | extern int vm_map_disconnect_page_mappings( |
| 1609 | vm_map_t map, |
| 1610 | boolean_t); |
| 1611 | #endif |
| 1612 | |
| 1613 | |
| 1614 | #if CONFIG_FREEZE |
| 1615 | |
| 1616 | extern kern_return_t vm_map_freeze( |
| 1617 | vm_map_t map, |
| 1618 | unsigned int *purgeable_count, |
| 1619 | unsigned int *wired_count, |
| 1620 | unsigned int *clean_count, |
| 1621 | unsigned int *dirty_count, |
| 1622 | unsigned int dirty_budget, |
| 1623 | unsigned int *shared_count, |
| 1624 | int *freezer_error_code, |
| 1625 | boolean_t eval_only); |
| 1626 | |
| 1627 | |
| 1628 | #define FREEZER_ERROR_GENERIC (-1) |
| 1629 | #define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2) |
| 1630 | #define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3) |
| 1631 | #define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4) |
| 1632 | #define FREEZER_ERROR_NO_SWAP_SPACE (-5) |
| 1633 | |
| 1634 | #endif |
| 1635 | |
| 1636 | __END_DECLS |
| 1637 | |
| 1638 | /* |
| 1639 | * In some cases, we don't have a real VM object but still want to return a |
| 1640 | * unique ID (to avoid a memory region looking like shared memory), so build |
| 1641 | * a fake pointer based on the map's ledger and the index of the ledger being |
| 1642 | * reported. |
| 1643 | */ |
| 1644 | #define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) |
| 1645 | |
| 1646 | #endif /* KERNEL_PRIVATE */ |
| 1647 | |
| 1648 | #endif /* _VM_VM_MAP_H_ */ |
| 1649 | |