| 1 | /* |
| 2 | * Copyright (c) 2000-2014 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * Mach Operating System |
| 33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
| 34 | * All Rights Reserved. |
| 35 | * |
| 36 | * Permission to use, copy, modify and distribute this software and its |
| 37 | * documentation is hereby granted, provided that both the copyright |
| 38 | * notice and this permission notice appear in all copies of the |
| 39 | * software, derivative works or modified versions, and any portions |
| 40 | * thereof, and that both notices appear in supporting documentation. |
| 41 | * |
| 42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 45 | * |
| 46 | * Carnegie Mellon requests users of this software to return to |
| 47 | * |
| 48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 49 | * School of Computer Science |
| 50 | * Carnegie Mellon University |
| 51 | * Pittsburgh PA 15213-3890 |
| 52 | * |
| 53 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 54 | * the rights to redistribute these changes. |
| 55 | */ |
| 56 | /* |
| 57 | */ |
| 58 | /* |
| 59 | * File: zalloc.h |
| 60 | * Author: Avadis Tevanian, Jr. |
| 61 | * Date: 1985 |
| 62 | * |
| 63 | */ |
| 64 | |
| 65 | #ifdef KERNEL_PRIVATE |
| 66 | |
| 67 | #ifndef _KERN_ZALLOC_H_ |
| 68 | #define _KERN_ZALLOC_H_ |
| 69 | |
| 70 | #include <mach/machine/vm_types.h> |
| 71 | #include <mach_debug/zone_info.h> |
| 72 | #include <kern/kern_types.h> |
| 73 | #include <sys/cdefs.h> |
| 74 | |
| 75 | #ifdef MACH_KERNEL_PRIVATE |
| 76 | |
| 77 | #include <zone_debug.h> |
| 78 | #include <kern/locks.h> |
| 79 | #include <kern/queue.h> |
| 80 | #include <kern/thread_call.h> |
| 81 | #include <kern/btlog.h> |
| 82 | |
| 83 | #if KASAN |
| 84 | #include <sys/queue.h> |
| 85 | #include <san/kasan.h> |
| 86 | #endif |
| 87 | |
| 88 | #ifdef CONFIG_ZCACHE |
| 89 | #include <kern/zcache.h> |
| 90 | #endif |
| 91 | |
| 92 | #if CONFIG_GZALLOC |
| 93 | typedef struct gzalloc_data { |
| 94 | uint32_t gzfc_index; |
| 95 | vm_offset_t *gzfc; |
| 96 | } gzalloc_data_t; |
| 97 | #endif |
| 98 | |
| 99 | /* |
| 100 | * A zone is a collection of fixed size blocks for which there |
| 101 | * is fast allocation/deallocation access. Kernel routines can |
| 102 | * use zones to manage data structures dynamically, creating a zone |
| 103 | * for each type of data structure to be managed. |
| 104 | * |
| 105 | */ |
| 106 | |
| 107 | struct zone_free_element; |
| 108 | struct zone_page_metadata; |
| 109 | |
| 110 | struct zone { |
| 111 | #ifdef CONFIG_ZCACHE |
| 112 | struct zone_cache *zcache; |
| 113 | #endif /* CONFIG_ZCACHE */ |
| 114 | struct zone_free_element *free_elements; /* free elements directly linked */ |
| 115 | struct { |
| 116 | queue_head_t any_free_foreign; /* foreign pages crammed into zone */ |
| 117 | queue_head_t all_free; |
| 118 | queue_head_t intermediate; |
| 119 | queue_head_t all_used; |
| 120 | } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */ |
| 121 | int count; /* Number of elements used now */ |
| 122 | int countfree; /* Number of free elements */ |
| 123 | int count_all_free_pages; /* Number of pages collectable by GC */ |
| 124 | lck_attr_t lock_attr; /* zone lock attribute */ |
| 125 | decl_lck_mtx_data(,lock) /* zone lock */ |
| 126 | lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */ |
| 127 | vm_size_t cur_size; /* current memory utilization */ |
| 128 | vm_size_t max_size; /* how large can this zone grow */ |
| 129 | vm_size_t elem_size; /* size of an element */ |
| 130 | vm_size_t alloc_size; /* size used for more memory */ |
| 131 | uint64_t page_count __attribute__((aligned(8))); /* number of pages used by this zone */ |
| 132 | uint64_t sum_count; /* count of allocs (life of zone) */ |
| 133 | uint32_t |
| 134 | /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ |
| 135 | /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ |
| 136 | /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ |
| 137 | /* boolean_t */ allows_foreign :1, /* (F) allow non-zalloc space */ |
| 138 | /* boolean_t */ doing_alloc_without_vm_priv:1, /* is zone expanding now via a non-vm_privileged thread? */ |
| 139 | /* boolean_t */ doing_alloc_with_vm_priv:1, /* is zone expanding now via a vm_privileged thread? */ |
| 140 | /* boolean_t */ waiting :1, /* is thread waiting for expansion? */ |
| 141 | /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */ |
| 142 | /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */ |
| 143 | /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */ |
| 144 | /* boolean_t */ noencrypt :1, |
| 145 | /* boolean_t */ no_callout :1, |
| 146 | /* boolean_t */ async_prio_refill :1, |
| 147 | /* boolean_t */ gzalloc_exempt :1, |
| 148 | /* boolean_t */ alignment_required :1, |
| 149 | /* boolean_t */ zone_logging :1, /* Enable zone logging for this zone. */ |
| 150 | /* boolean_t */ zone_replenishing :1, |
| 151 | /* boolean_t */ kasan_quarantine :1, |
| 152 | /* boolean_t */ tags :1, |
| 153 | /* boolean_t */ tags_inline :1, |
| 154 | /* future */ tag_zone_index :6, |
| 155 | /* boolean_t */ zone_valid :1, |
| 156 | /* boolean_t */ cpu_cache_enable_when_ready :1, |
| 157 | /* boolean_t */ cpu_cache_enabled :1, |
| 158 | /* future */ _reserved :3; |
| 159 | |
| 160 | int index; /* index into zone_info arrays for this zone */ |
| 161 | const char *zone_name; /* a name for the zone */ |
| 162 | |
| 163 | #if CONFIG_ZLEAKS |
| 164 | uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ |
| 165 | #endif /* CONFIG_ZLEAKS */ |
| 166 | uint32_t zp_count; /* counter for poisoning every N frees */ |
| 167 | vm_size_t prio_refill_watermark; |
| 168 | thread_t zone_replenish_thread; |
| 169 | #if CONFIG_GZALLOC |
| 170 | gzalloc_data_t gz; |
| 171 | #endif /* CONFIG_GZALLOC */ |
| 172 | |
| 173 | #if KASAN_ZALLOC |
| 174 | vm_size_t kasan_redzone; |
| 175 | #endif |
| 176 | |
| 177 | btlog_t *zlog_btlog; /* zone logging structure to hold stacks and element references to those stacks. */ |
| 178 | }; |
| 179 | |
| 180 | /* |
| 181 | * structure for tracking zone usage |
| 182 | * Used either one per task/thread for all zones or <per-task,per-zone>. |
| 183 | */ |
| 184 | typedef struct zinfo_usage_store_t { |
| 185 | /* These fields may be updated atomically, and so must be 8 byte aligned */ |
| 186 | uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ |
| 187 | uint64_t free __attribute__((aligned(8))); /* free counter */ |
| 188 | } zinfo_usage_store_t; |
| 189 | |
| 190 | /* |
| 191 | * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a |
| 192 | * userspace reboot is needed. The only other way to query for this information |
| 193 | * is via mach_memory_info() which is unavailable on release kernels. |
| 194 | */ |
| 195 | extern uint64_t get_zones_collectable_bytes(void); |
| 196 | |
| 197 | /* |
| 198 | * zone_gc also checks if the zone_map is getting close to full and triggers jetsams if needed, provided |
| 199 | * consider_jetsams is set to TRUE. To avoid deadlocks, we only pass a value of TRUE from within the |
| 200 | * vm_pageout_garbage_collect thread. |
| 201 | */ |
| 202 | extern void zone_gc(boolean_t consider_jetsams); |
| 203 | extern void consider_zone_gc(boolean_t consider_jetsams); |
| 204 | extern void drop_free_elements(zone_t z); |
| 205 | |
| 206 | /* Debug logging for zone-map-exhaustion jetsams. */ |
| 207 | extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); |
| 208 | extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); |
| 209 | |
| 210 | /* Bootstrap zone module (create zone zone) */ |
| 211 | extern void zone_bootstrap(void); |
| 212 | |
| 213 | /* Init zone module */ |
| 214 | extern void zone_init( |
| 215 | vm_size_t map_size); |
| 216 | |
| 217 | /* Stack use statistics */ |
| 218 | extern void stack_fake_zone_init(int zone_index); |
| 219 | extern void stack_fake_zone_info( |
| 220 | int *count, |
| 221 | vm_size_t *cur_size, |
| 222 | vm_size_t *max_size, |
| 223 | vm_size_t *elem_size, |
| 224 | vm_size_t *alloc_size, |
| 225 | uint64_t *sum_size, |
| 226 | int *collectable, |
| 227 | int *exhaustable, |
| 228 | int *caller_acct); |
| 229 | |
| 230 | #if ZONE_DEBUG |
| 231 | |
| 232 | extern void zone_debug_enable( |
| 233 | zone_t z); |
| 234 | |
| 235 | extern void zone_debug_disable( |
| 236 | zone_t z); |
| 237 | |
| 238 | #define zone_debug_enabled(z) z->active_zones.next |
| 239 | #define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) |
| 240 | #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) |
| 241 | #endif /* ZONE_DEBUG */ |
| 242 | |
| 243 | extern unsigned int num_zones; |
| 244 | extern struct zone zone_array[]; |
| 245 | |
| 246 | /* zindex and page_count must pack into 16 bits |
| 247 | * update tools/lldbmacros/memory.py:GetRealMetadata |
| 248 | * when these values change */ |
| 249 | |
| 250 | #define ZINDEX_BITS (10U) |
| 251 | #define PAGECOUNT_BITS (16U - ZINDEX_BITS) |
| 252 | #define MULTIPAGE_METADATA_MAGIC ((1UL << ZINDEX_BITS) - 1) |
| 253 | #define ZONE_CHUNK_MAXPAGES ((1UL << PAGECOUNT_BITS) - 1) |
| 254 | |
| 255 | /* |
| 256 | * The max # of elements in a chunk should fit into zone_page_metadata.free_count (uint16_t). |
| 257 | * Update this if the type of free_count changes. |
| 258 | */ |
| 259 | #define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX) |
| 260 | |
| 261 | #endif /* MACH_KERNEL_PRIVATE */ |
| 262 | |
| 263 | __BEGIN_DECLS |
| 264 | |
| 265 | |
| 266 | /* Item definitions for zalloc/zinit/zone_change */ |
| 267 | #define Z_EXHAUST 1 /* Make zone exhaustible */ |
| 268 | #define Z_COLLECT 2 /* Make zone collectable */ |
| 269 | #define Z_EXPAND 3 /* Make zone expandable */ |
| 270 | #define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ |
| 271 | #define Z_CALLERACCT 5 /* Account alloc/free against the caller */ |
| 272 | #define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ |
| 273 | #define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */ |
| 274 | #define Z_ALIGNMENT_REQUIRED 8 |
| 275 | #define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ |
| 276 | #define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */ |
| 277 | #ifdef XNU_KERNEL_PRIVATE |
| 278 | #define Z_TAGS_ENABLED 11 /* Store tags */ |
| 279 | #endif /* XNU_KERNEL_PRIVATE */ |
| 280 | #define Z_CACHING_ENABLED 12 /*enable and initialize per-cpu caches for the zone*/ |
| 281 | |
| 282 | #ifdef XNU_KERNEL_PRIVATE |
| 283 | |
| 284 | extern vm_offset_t zone_map_min_address; |
| 285 | extern vm_offset_t zone_map_max_address; |
| 286 | |
| 287 | /* free an element with no regard for gzalloc, zleaks, or kasan*/ |
| 288 | extern void zfree_direct( zone_t zone, |
| 289 | vm_offset_t elem); |
| 290 | |
| 291 | /* attempts to allocate an element with no regard for gzalloc, zleaks, or kasan*/ |
| 292 | extern void * zalloc_attempt( zone_t zone); |
| 293 | |
| 294 | /* Non-waiting for memory version of zalloc */ |
| 295 | extern void * zalloc_nopagewait( |
| 296 | zone_t zone); |
| 297 | |
| 298 | /* selective version of zalloc */ |
| 299 | extern void * zalloc_canblock( |
| 300 | zone_t zone, |
| 301 | boolean_t canblock); |
| 302 | |
| 303 | /* selective version of zalloc */ |
| 304 | extern void * zalloc_canblock_tag( |
| 305 | zone_t zone, |
| 306 | boolean_t canblock, |
| 307 | vm_size_t reqsize, |
| 308 | vm_tag_t tag); |
| 309 | |
| 310 | /* Get from zone free list */ |
| 311 | extern void * zget( |
| 312 | zone_t zone); |
| 313 | |
| 314 | /* Fill zone with memory */ |
| 315 | extern void zcram( |
| 316 | zone_t zone, |
| 317 | vm_offset_t newmem, |
| 318 | vm_size_t size); |
| 319 | |
| 320 | /* Initially fill zone with specified number of elements */ |
| 321 | extern int zfill( |
| 322 | zone_t zone, |
| 323 | int nelem); |
| 324 | |
| 325 | extern void zone_prio_refill_configure(zone_t, vm_size_t); |
| 326 | |
| 327 | /* See above/top of file. Z_* definitions moved so they would be usable by kexts */ |
| 328 | |
| 329 | /* Preallocate space for zone from zone map */ |
| 330 | extern void zprealloc( |
| 331 | zone_t zone, |
| 332 | vm_size_t size); |
| 333 | |
| 334 | extern integer_t zone_free_count( |
| 335 | zone_t zone); |
| 336 | |
| 337 | extern vm_size_t zone_element_size( |
| 338 | void *addr, |
| 339 | zone_t *z); |
| 340 | |
| 341 | /* |
| 342 | * Structure for keeping track of a backtrace, used for leak detection. |
| 343 | * This is in the .h file because it is used during panic, see kern/debug.c |
| 344 | * A non-zero size indicates that the trace is in use. |
| 345 | */ |
| 346 | struct ztrace { |
| 347 | vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */ |
| 348 | uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */ |
| 349 | void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */ |
| 350 | uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */ |
| 351 | uint32_t zt_hit_count; /* for determining effectiveness of hash function */ |
| 352 | }; |
| 353 | |
| 354 | #if CONFIG_ZLEAKS |
| 355 | |
| 356 | /* support for the kern.zleak.* sysctls */ |
| 357 | |
| 358 | extern kern_return_t zleak_activate(void); |
| 359 | extern vm_size_t zleak_max_zonemap_size; |
| 360 | extern vm_size_t zleak_global_tracking_threshold; |
| 361 | extern vm_size_t zleak_per_zone_tracking_threshold; |
| 362 | |
| 363 | extern int get_zleak_state(void); |
| 364 | |
| 365 | #endif /* CONFIG_ZLEAKS */ |
| 366 | |
| 367 | #ifndef VM_MAX_TAG_ZONES |
| 368 | #error MAX_TAG_ZONES |
| 369 | #endif |
| 370 | |
| 371 | #if VM_MAX_TAG_ZONES |
| 372 | |
| 373 | extern boolean_t zone_tagging_on; |
| 374 | extern uint32_t zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size); |
| 375 | |
| 376 | #endif /* VM_MAX_TAG_ZONES */ |
| 377 | |
| 378 | /* These functions used for leak detection both in zalloc.c and mbuf.c */ |
| 379 | extern uintptr_t hash_mix(uintptr_t); |
| 380 | extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t); |
| 381 | extern uint32_t hashaddr(uintptr_t, uint32_t); |
| 382 | |
| 383 | #define lock_zone(zone) \ |
| 384 | MACRO_BEGIN \ |
| 385 | lck_mtx_lock_spin_always(&(zone)->lock); \ |
| 386 | MACRO_END |
| 387 | |
| 388 | #define unlock_zone(zone) \ |
| 389 | MACRO_BEGIN \ |
| 390 | lck_mtx_unlock(&(zone)->lock); \ |
| 391 | MACRO_END |
| 392 | |
| 393 | #if CONFIG_GZALLOC |
| 394 | void gzalloc_init(vm_size_t); |
| 395 | void gzalloc_zone_init(zone_t); |
| 396 | void gzalloc_configure(void); |
| 397 | void gzalloc_reconfigure(zone_t); |
| 398 | void gzalloc_empty_free_cache(zone_t); |
| 399 | boolean_t gzalloc_enabled(void); |
| 400 | |
| 401 | vm_offset_t gzalloc_alloc(zone_t, boolean_t); |
| 402 | boolean_t gzalloc_free(zone_t, void *); |
| 403 | boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *); |
| 404 | #endif /* CONFIG_GZALLOC */ |
| 405 | |
| 406 | /* Callbacks for btlog lock/unlock */ |
| 407 | void zlog_btlog_lock(__unused void *); |
| 408 | void zlog_btlog_unlock(__unused void *); |
| 409 | |
| 410 | #ifdef MACH_KERNEL_PRIVATE |
| 411 | #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ |
| 412 | int track_this_zone(const char *zonename, const char *logname); |
| 413 | #endif |
| 414 | |
| 415 | #if DEBUG || DEVELOPMENT |
| 416 | extern boolean_t run_zone_test(void); |
| 417 | extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag); |
| 418 | #endif /* DEBUG || DEVELOPMENT */ |
| 419 | |
| 420 | #endif /* XNU_KERNEL_PRIVATE */ |
| 421 | |
| 422 | /* Allocate from zone */ |
| 423 | extern void * zalloc( |
| 424 | zone_t zone); |
| 425 | |
| 426 | /* Non-blocking version of zalloc */ |
| 427 | extern void * zalloc_noblock( |
| 428 | zone_t zone); |
| 429 | |
| 430 | /* Free zone element */ |
| 431 | extern void zfree( |
| 432 | zone_t zone, |
| 433 | void *elem); |
| 434 | |
| 435 | /* Create zone */ |
| 436 | extern zone_t zinit( |
| 437 | vm_size_t size, /* the size of an element */ |
| 438 | vm_size_t maxmem, /* maximum memory to use */ |
| 439 | vm_size_t alloc, /* allocation size */ |
| 440 | const char *name); /* a name for the zone */ |
| 441 | |
| 442 | /* Change zone parameters */ |
| 443 | extern void zone_change( |
| 444 | zone_t zone, |
| 445 | unsigned int item, |
| 446 | boolean_t value); |
| 447 | |
| 448 | /* Destroy the zone */ |
| 449 | extern void zdestroy( |
| 450 | zone_t zone); |
| 451 | |
| 452 | __END_DECLS |
| 453 | |
| 454 | #endif /* _KERN_ZALLOC_H_ */ |
| 455 | |
| 456 | #endif /* KERNEL_PRIVATE */ |
| 457 | |