1 | /* |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | |
57 | /* |
58 | * Copyright (c) 1994 The University of Utah and |
59 | * the Computer Systems Laboratory at the University of Utah (CSL). |
60 | * All rights reserved. |
61 | * |
62 | * Permission to use, copy, modify and distribute this software is hereby |
63 | * granted provided that (1) source code retains these copyright, permission, |
64 | * and disclaimer notices, and (2) redistributions including binaries |
65 | * reproduce the notices in supporting documentation, and (3) all advertising |
66 | * materials mentioning features or use of this software display the following |
67 | * acknowledgement: ``This product includes software developed by the |
68 | * Computer Systems Laboratory at the University of Utah.'' |
69 | * |
70 | * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS |
71 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF |
72 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
73 | * |
74 | * CSL requests users of this software to return to csl-dist@cs.utah.edu any |
75 | * improvements that they make and grant CSL redistribution rights. |
76 | * |
77 | */ |
78 | |
79 | /* |
80 | * File: vm_param.h |
81 | * Author: Avadis Tevanian, Jr. |
82 | * Date: 1985 |
83 | * |
84 | * I386 machine dependent virtual memory parameters. |
85 | * Most of the declarations are preceeded by I386_ (or i386_) |
86 | * which is OK because only I386 specific code will be using |
87 | * them. |
88 | */ |
89 | |
90 | #ifndef _MACH_I386_VM_PARAM_H_ |
91 | #define _MACH_I386_VM_PARAM_H_ |
92 | |
93 | #define BYTE_SIZE 8 /* byte size in bits */ |
94 | |
95 | #define I386_PGBYTES 4096 /* bytes per 80386 page */ |
96 | #define I386_PGSHIFT 12 /* bitshift for pages */ |
97 | |
98 | #define PAGE_SIZE I386_PGBYTES |
99 | #define PAGE_SHIFT I386_PGSHIFT |
100 | #define PAGE_MASK (PAGE_SIZE - 1) |
101 | |
102 | #define PAGE_MAX_SHIFT PAGE_SHIFT |
103 | #define PAGE_MAX_SIZE PAGE_SIZE |
104 | #define PAGE_MAX_MASK PAGE_MASK |
105 | |
106 | #define PAGE_MIN_SHIFT PAGE_SHIFT |
107 | #define PAGE_MIN_SIZE PAGE_SIZE |
108 | #define PAGE_MIN_MASK PAGE_MASK |
109 | |
110 | #define I386_LPGBYTES 2*1024*1024 /* bytes per large page */ |
111 | #define I386_LPGSHIFT 21 /* bitshift for large pages */ |
112 | #define I386_LPGMASK (I386_LPGBYTES-1) |
113 | |
114 | /* |
115 | * Convert bytes to pages and convert pages to bytes. |
116 | * No rounding is used. |
117 | */ |
118 | |
119 | #define i386_btop(x) ((ppnum_t)((x) >> I386_PGSHIFT)) |
120 | #define machine_btop(x) i386_btop(x) |
121 | #define i386_ptob(x) (((pmap_paddr_t)(x)) << I386_PGSHIFT) |
122 | #define machine_ptob(x) i386_ptob(x) |
123 | |
124 | /* |
125 | * Round off or truncate to the nearest page. These will work |
126 | * for either addresses or counts. (i.e. 1 byte rounds to 1 page |
127 | * bytes. |
128 | */ |
129 | |
130 | #define i386_round_page(x) ((((pmap_paddr_t)(x)) + I386_PGBYTES - 1) & \ |
131 | ~(I386_PGBYTES-1)) |
132 | #define i386_trunc_page(x) (((pmap_paddr_t)(x)) & ~(I386_PGBYTES-1)) |
133 | |
134 | |
135 | |
136 | #define VM_MIN_ADDRESS64 ((user_addr_t) 0x0000000000000000ULL) |
137 | /* |
138 | * default top of user stack... it grows down from here |
139 | */ |
140 | #define VM_USRSTACK64 ((user_addr_t) 0x00007FFEEFC00000ULL) |
141 | |
142 | /* |
143 | * XXX TODO: Obsolete? |
144 | */ |
145 | #define VM_DYLD64 ((user_addr_t) 0x00007FFF5FC00000ULL) |
146 | #define VM_LIB64_SHR_DATA ((user_addr_t) 0x00007FFF60000000ULL) |
147 | #define VM_LIB64_SHR_TEXT ((user_addr_t) 0x00007FFF80000000ULL) |
148 | /* |
149 | * the end of the usable user address space , for now about 47 bits. |
150 | * the 64 bit commpage is past the end of this |
151 | */ |
152 | #define VM_MAX_PAGE_ADDRESS ((user_addr_t) 0x00007FFFFFE00000ULL) |
153 | /* |
154 | * canonical end of user address space for limits checking |
155 | */ |
156 | #define VM_MAX_USER_PAGE_ADDRESS ((user_addr_t)0x00007FFFFFFFF000ULL) |
157 | |
158 | |
159 | /* system-wide values */ |
160 | #define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0) |
161 | #define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) VM_MAX_PAGE_ADDRESS) |
162 | |
163 | /* process-relative values (all 32-bit legacy only for now) */ |
164 | #define VM_MIN_ADDRESS ((vm_offset_t) 0) |
165 | #define VM_USRSTACK32 ((vm_offset_t) 0xC0000000) /* ASLR slides stack down by up to 1 MB */ |
166 | #define VM_MAX_ADDRESS ((vm_offset_t) 0xFFE00000) |
167 | |
168 | |
169 | #ifdef KERNEL_PRIVATE |
170 | |
171 | #define TEST_PAGE_SIZE_16K FALSE |
172 | #define TEST_PAGE_SIZE_4K TRUE |
173 | |
174 | /* Kernel-wide values */ |
175 | |
176 | #define KB (1024ULL) |
177 | #define MB (1024*KB) |
178 | #define GB (1024*MB) |
179 | |
180 | /* |
181 | * Maximum physical memory supported. |
182 | */ |
183 | #define K32_MAXMEM (32*GB) |
184 | #define K64_MAXMEM (252*GB) |
185 | #define KERNEL_MAXMEM K64_MAXMEM |
186 | |
187 | /* |
188 | * XXX |
189 | * The kernel max VM address is limited to 0xFF3FFFFF for now because |
190 | * some data structures are explicitly allocated at 0xFF400000 without |
191 | * VM's knowledge (see osfmk/i386/locore.s for the allocation of PTmap and co.). |
192 | * We can't let VM allocate memory from there. |
193 | */ |
194 | |
195 | |
196 | #define KERNEL_IMAGE_TO_PHYS(x) (x) |
197 | #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 39 |
198 | #define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) |
199 | #define VM_MIN_KERNEL_PAGE ((ppnum_t)0) |
200 | #define VM_MIN_KERNEL_AND_KEXT_ADDRESS (VM_MIN_KERNEL_ADDRESS - 0x80000000ULL) |
201 | #define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFFFFFFFFEFFFUL) |
202 | #define VM_MAX_KERNEL_ADDRESS_EFI32 ((vm_offset_t) 0xFFFFFF80FFFFEFFFUL) |
203 | #define KEXT_ALLOC_MAX_OFFSET (2 * 1024 * 1024 * 1024UL) |
204 | #define KEXT_ALLOC_BASE(x) ((x) - KEXT_ALLOC_MAX_OFFSET) |
205 | #define KEXT_ALLOC_SIZE(x) (KEXT_ALLOC_MAX_OFFSET - (x)) |
206 | |
207 | #define VM_KERNEL_STRIP_PTR(_v) (_v) |
208 | |
209 | #define VM_KERNEL_ADDRESS(va) ((((vm_address_t)(va))>=VM_MIN_KERNEL_AND_KEXT_ADDRESS) && \ |
210 | (((vm_address_t)(va))<=VM_MAX_KERNEL_ADDRESS)) |
211 | |
212 | #define VM_MAP_MIN_ADDRESS MACH_VM_MIN_ADDRESS |
213 | #define VM_MAP_MAX_ADDRESS MACH_VM_MAX_ADDRESS |
214 | |
215 | /* FIXME - always leave like this? */ |
216 | #if KASAN |
217 | /* Increase the stack sizes to account for the redzones that get added to every |
218 | * stack object. */ |
219 | # define INTSTACK_SIZE (I386_PGBYTES*4*4) |
220 | # define KERNEL_STACK_SIZE (I386_PGBYTES*4*4) |
221 | #elif DEBUG |
222 | # define INTSTACK_SIZE (I386_PGBYTES*4) |
223 | # define KERNEL_STACK_SIZE (I386_PGBYTES*6) |
224 | #else |
225 | # define INTSTACK_SIZE (I386_PGBYTES*4) |
226 | # define KERNEL_STACK_SIZE (I386_PGBYTES*4) |
227 | #endif |
228 | |
229 | #ifdef MACH_KERNEL_PRIVATE |
230 | |
231 | /* For implementing legacy 32-bit interfaces */ |
232 | #define VM32_SUPPORT 1 |
233 | #define VM32_MIN_ADDRESS ((vm32_offset_t) 0) |
234 | #define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) |
235 | |
236 | /* |
237 | * kalloc() parameters: |
238 | * |
239 | * Historically kalloc's underlying zones were power-of-2 sizes, with a |
240 | * KALLOC_MINSIZE of 16 bytes. The allocator ensured that |
241 | * (sizeof == alignof) >= 16 for all kalloc allocations. |
242 | * |
243 | * Today kalloc may use zones with intermediate sizes, constrained by |
244 | * KALLOC_MINSIZE and a minimum alignment, expressed by KALLOC_LOG2_MINALIGN. |
245 | * |
246 | * The common alignment for LP64 is for longs and pointers i.e. 8 bytes. |
247 | */ |
248 | |
249 | |
250 | #define KALLOC_MINSIZE 16 /* minimum allocation size */ |
251 | #define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */ |
252 | |
253 | #define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0x00000000) |
254 | |
255 | #define VM_MIN_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) |
256 | #define VM_MAX_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF801FFFFFFFUL) |
257 | |
258 | #define NCOPY_WINDOWS 0 |
259 | |
260 | |
261 | |
262 | /* |
263 | * Conversion between 80386 pages and VM pages |
264 | */ |
265 | |
266 | #define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p)))) |
267 | #define round_i386_to_vm(p) (atop(round_page(i386_ptob(p)))) |
268 | #define vm_to_i386(p) (i386_btop(ptoa(p))) |
269 | |
270 | |
271 | #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ |
272 | MACRO_BEGIN \ |
273 | pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), (cache_attr)); \ |
274 | (object)->set_cache_attr = TRUE; \ |
275 | (void) batch_pmap_op; \ |
276 | MACRO_END |
277 | |
278 | #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op)\ |
279 | MACRO_BEGIN \ |
280 | (void) user_page_list; \ |
281 | (void) num_pages; \ |
282 | (void) batch_pmap_op; \ |
283 | MACRO_END |
284 | |
285 | #define IS_USERADDR64_CANONICAL(addr) \ |
286 | ((addr) < (VM_MAX_USER_PAGE_ADDRESS)) |
287 | |
288 | #endif /* MACH_KERNEL_PRIVATE */ |
289 | |
290 | #endif /* KERNEL_PRIVATE */ |
291 | |
292 | #endif /* _MACH_I386_VM_PARAM_H_ */ |
293 | |