1 | /* |
2 | * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | |
57 | /* |
58 | * Compressor Pager. |
59 | * Memory Object Management. |
60 | */ |
61 | |
62 | #include <kern/host_statistics.h> |
63 | #include <kern/kalloc.h> |
64 | #include <kern/ipc_kobject.h> |
65 | |
66 | #include <mach/memory_object_control.h> |
67 | #include <mach/memory_object_types.h> |
68 | #include <mach/upl.h> |
69 | |
70 | #include <vm/memory_object.h> |
71 | #include <vm/vm_compressor_pager.h> |
72 | #include <vm/vm_external.h> |
73 | #include <vm/vm_pageout.h> |
74 | #include <vm/vm_protos.h> |
75 | |
76 | /* memory_object interfaces */ |
77 | void compressor_memory_object_reference(memory_object_t mem_obj); |
78 | void compressor_memory_object_deallocate(memory_object_t mem_obj); |
79 | kern_return_t compressor_memory_object_init( |
80 | memory_object_t mem_obj, |
81 | memory_object_control_t control, |
82 | memory_object_cluster_size_t ); |
83 | kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj); |
84 | kern_return_t compressor_memory_object_data_request( |
85 | memory_object_t mem_obj, |
86 | memory_object_offset_t offset, |
87 | memory_object_cluster_size_t length, |
88 | __unused vm_prot_t protection_required, |
89 | memory_object_fault_info_t fault_info); |
90 | kern_return_t compressor_memory_object_data_return( |
91 | memory_object_t mem_obj, |
92 | memory_object_offset_t offset, |
93 | memory_object_cluster_size_t size, |
94 | __unused memory_object_offset_t *resid_offset, |
95 | __unused int *io_error, |
96 | __unused boolean_t dirty, |
97 | __unused boolean_t kernel_copy, |
98 | __unused int upl_flags); |
99 | kern_return_t compressor_memory_object_data_initialize( |
100 | memory_object_t mem_obj, |
101 | memory_object_offset_t offset, |
102 | memory_object_cluster_size_t size); |
103 | kern_return_t compressor_memory_object_data_unlock( |
104 | __unused memory_object_t mem_obj, |
105 | __unused memory_object_offset_t offset, |
106 | __unused memory_object_size_t size, |
107 | __unused vm_prot_t desired_access); |
108 | kern_return_t compressor_memory_object_synchronize( |
109 | memory_object_t mem_obj, |
110 | memory_object_offset_t offset, |
111 | memory_object_size_t length, |
112 | __unused vm_sync_t flags); |
113 | kern_return_t compressor_memory_object_map( |
114 | __unused memory_object_t mem_obj, |
115 | __unused vm_prot_t prot); |
116 | kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj); |
117 | kern_return_t compressor_memory_object_data_reclaim( |
118 | __unused memory_object_t mem_obj, |
119 | __unused boolean_t reclaim_backing_store); |
120 | |
121 | const struct memory_object_pager_ops = { |
122 | compressor_memory_object_reference, |
123 | compressor_memory_object_deallocate, |
124 | compressor_memory_object_init, |
125 | compressor_memory_object_terminate, |
126 | compressor_memory_object_data_request, |
127 | compressor_memory_object_data_return, |
128 | compressor_memory_object_data_initialize, |
129 | compressor_memory_object_data_unlock, |
130 | compressor_memory_object_synchronize, |
131 | compressor_memory_object_map, |
132 | compressor_memory_object_last_unmap, |
133 | compressor_memory_object_data_reclaim, |
134 | "compressor pager" |
135 | }; |
136 | |
137 | /* internal data structures */ |
138 | |
139 | struct { |
140 | uint64_t data_returns; |
141 | uint64_t data_requests; |
142 | uint64_t put; |
143 | uint64_t get; |
144 | uint64_t state_clr; |
145 | uint64_t state_get; |
146 | uint64_t transfer; |
147 | } ; |
148 | |
149 | typedef int compressor_slot_t; |
150 | |
151 | typedef struct { |
152 | /* mandatory generic header */ |
153 | struct memory_object ; |
154 | |
155 | /* pager-specific data */ |
156 | lck_mtx_t ; |
157 | unsigned int ; |
158 | unsigned int ; |
159 | unsigned int ; |
160 | union { |
161 | compressor_slot_t [2]; /* embedded slots */ |
162 | compressor_slot_t *; /* direct slots */ |
163 | compressor_slot_t **; /* indirect slots */ |
164 | } ; |
165 | } *; |
166 | |
167 | #define (_mem_obj_, _cpgr_) \ |
168 | MACRO_BEGIN \ |
169 | if (_mem_obj_ == NULL || \ |
170 | _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \ |
171 | _cpgr_ = NULL; \ |
172 | } else { \ |
173 | _cpgr_ = (compressor_pager_t) _mem_obj_; \ |
174 | } \ |
175 | MACRO_END |
176 | |
177 | zone_t ; |
178 | |
179 | lck_grp_t ; |
180 | lck_grp_attr_t ; |
181 | lck_attr_t ; |
182 | |
183 | #define (_cpgr_) \ |
184 | lck_mtx_lock(&(_cpgr_)->cpgr_lock) |
185 | #define (_cpgr_) \ |
186 | lck_mtx_unlock(&(_cpgr_)->cpgr_lock) |
187 | #define (_cpgr_) \ |
188 | lck_mtx_init(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp, &compressor_pager_lck_attr) |
189 | #define (_cpgr_) \ |
190 | lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp) |
191 | |
192 | #define COMPRESSOR_SLOTS_CHUNK_SIZE (512) |
193 | #define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t)) |
194 | |
195 | /* forward declarations */ |
196 | unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk, |
197 | int num_slots, |
198 | int flags, |
199 | int *failures); |
200 | void compressor_pager_slot_lookup( |
201 | compressor_pager_t , |
202 | boolean_t do_alloc, |
203 | memory_object_offset_t offset, |
204 | compressor_slot_t **slot_pp); |
205 | |
206 | kern_return_t |
207 | compressor_memory_object_init( |
208 | memory_object_t mem_obj, |
209 | memory_object_control_t control, |
210 | __unused memory_object_cluster_size_t ) |
211 | { |
212 | compressor_pager_t ; |
213 | |
214 | assert(pager_page_size == PAGE_SIZE); |
215 | |
216 | memory_object_control_reference(control); |
217 | |
218 | compressor_pager_lookup(mem_obj, pager); |
219 | compressor_pager_lock(pager); |
220 | |
221 | if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) |
222 | panic("compressor_memory_object_init: bad request" ); |
223 | pager->cpgr_hdr.mo_control = control; |
224 | |
225 | compressor_pager_unlock(pager); |
226 | |
227 | return KERN_SUCCESS; |
228 | } |
229 | |
230 | kern_return_t |
231 | compressor_memory_object_synchronize( |
232 | __unused memory_object_t mem_obj, |
233 | __unused memory_object_offset_t offset, |
234 | __unused memory_object_size_t length, |
235 | __unused vm_sync_t flags) |
236 | { |
237 | panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n" ); |
238 | return KERN_FAILURE; |
239 | } |
240 | |
241 | kern_return_t |
242 | compressor_memory_object_map( |
243 | __unused memory_object_t mem_obj, |
244 | __unused vm_prot_t prot) |
245 | { |
246 | panic("compressor_memory_object_map" ); |
247 | return KERN_FAILURE; |
248 | } |
249 | |
250 | kern_return_t |
251 | compressor_memory_object_last_unmap( |
252 | __unused memory_object_t mem_obj) |
253 | { |
254 | panic("compressor_memory_object_last_unmap" ); |
255 | return KERN_FAILURE; |
256 | } |
257 | |
258 | kern_return_t |
259 | compressor_memory_object_data_reclaim( |
260 | __unused memory_object_t mem_obj, |
261 | __unused boolean_t reclaim_backing_store) |
262 | { |
263 | panic("compressor_memory_object_data_reclaim" ); |
264 | return KERN_FAILURE; |
265 | } |
266 | |
267 | kern_return_t |
268 | compressor_memory_object_terminate( |
269 | memory_object_t mem_obj) |
270 | { |
271 | memory_object_control_t control; |
272 | compressor_pager_t ; |
273 | |
274 | /* |
275 | * control port is a receive right, not a send right. |
276 | */ |
277 | |
278 | compressor_pager_lookup(mem_obj, pager); |
279 | compressor_pager_lock(pager); |
280 | |
281 | /* |
282 | * After memory_object_terminate both memory_object_init |
283 | * and a no-senders notification are possible, so we need |
284 | * to clean up our reference to the memory_object_control |
285 | * to prepare for a new init. |
286 | */ |
287 | |
288 | control = pager->cpgr_hdr.mo_control; |
289 | pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
290 | |
291 | compressor_pager_unlock(pager); |
292 | |
293 | /* |
294 | * Now we deallocate our reference on the control. |
295 | */ |
296 | memory_object_control_deallocate(control); |
297 | return KERN_SUCCESS; |
298 | } |
299 | |
300 | void |
301 | compressor_memory_object_reference( |
302 | memory_object_t mem_obj) |
303 | { |
304 | compressor_pager_t ; |
305 | |
306 | compressor_pager_lookup(mem_obj, pager); |
307 | if (pager == NULL) |
308 | return; |
309 | |
310 | compressor_pager_lock(pager); |
311 | assert(pager->cpgr_references > 0); |
312 | pager->cpgr_references++; |
313 | compressor_pager_unlock(pager); |
314 | } |
315 | |
316 | void |
317 | compressor_memory_object_deallocate( |
318 | memory_object_t mem_obj) |
319 | { |
320 | compressor_pager_t ; |
321 | unsigned int num_slots_freed; |
322 | |
323 | /* |
324 | * Because we don't give out multiple first references |
325 | * for a memory object, there can't be a race |
326 | * between getting a deallocate call and creating |
327 | * a new reference for the object. |
328 | */ |
329 | |
330 | compressor_pager_lookup(mem_obj, pager); |
331 | if (pager == NULL) |
332 | return; |
333 | |
334 | compressor_pager_lock(pager); |
335 | if (--pager->cpgr_references > 0) { |
336 | compressor_pager_unlock(pager); |
337 | return; |
338 | } |
339 | |
340 | /* |
341 | * We shouldn't get a deallocation call |
342 | * when the kernel has the object cached. |
343 | */ |
344 | if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) |
345 | panic("compressor_memory_object_deallocate(): bad request" ); |
346 | |
347 | /* |
348 | * Unlock the pager (though there should be no one |
349 | * waiting for it). |
350 | */ |
351 | compressor_pager_unlock(pager); |
352 | |
353 | /* free the compressor slots */ |
354 | int num_chunks; |
355 | int i; |
356 | compressor_slot_t *chunk; |
357 | |
358 | num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK; |
359 | if (num_chunks > 1) { |
360 | /* we have an array of chunks */ |
361 | for (i = 0; i < num_chunks; i++) { |
362 | chunk = pager->cpgr_slots.cpgr_islots[i]; |
363 | if (chunk != NULL) { |
364 | num_slots_freed = |
365 | compressor_pager_slots_chunk_free( |
366 | chunk, |
367 | COMPRESSOR_SLOTS_PER_CHUNK, |
368 | 0, |
369 | NULL); |
370 | pager->cpgr_slots.cpgr_islots[i] = NULL; |
371 | kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); |
372 | } |
373 | } |
374 | kfree(pager->cpgr_slots.cpgr_islots, |
375 | num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0])); |
376 | pager->cpgr_slots.cpgr_islots = NULL; |
377 | } else if (pager->cpgr_num_slots > 2) { |
378 | chunk = pager->cpgr_slots.cpgr_dslots; |
379 | num_slots_freed = |
380 | compressor_pager_slots_chunk_free( |
381 | chunk, |
382 | pager->cpgr_num_slots, |
383 | 0, |
384 | NULL); |
385 | pager->cpgr_slots.cpgr_dslots = NULL; |
386 | kfree(chunk, |
387 | (pager->cpgr_num_slots * |
388 | sizeof (pager->cpgr_slots.cpgr_dslots[0]))); |
389 | } else { |
390 | chunk = &pager->cpgr_slots.cpgr_eslots[0]; |
391 | num_slots_freed = |
392 | compressor_pager_slots_chunk_free( |
393 | chunk, |
394 | pager->cpgr_num_slots, |
395 | 0, |
396 | NULL); |
397 | } |
398 | |
399 | compressor_pager_lock_destroy(pager); |
400 | zfree(compressor_pager_zone, pager); |
401 | } |
402 | |
403 | kern_return_t |
404 | compressor_memory_object_data_request( |
405 | memory_object_t mem_obj, |
406 | memory_object_offset_t offset, |
407 | memory_object_cluster_size_t length, |
408 | __unused vm_prot_t protection_required, |
409 | __unused memory_object_fault_info_t fault_info) |
410 | { |
411 | compressor_pager_t ; |
412 | kern_return_t kr; |
413 | compressor_slot_t *slot_p; |
414 | |
415 | compressor_pager_stats.data_requests++; |
416 | |
417 | /* |
418 | * Request must be on a page boundary and a multiple of pages. |
419 | */ |
420 | if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) |
421 | panic("compressor_memory_object_data_request(): bad alignment" ); |
422 | |
423 | if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { |
424 | panic("%s: offset 0x%llx overflow\n" , |
425 | __FUNCTION__, (uint64_t) offset); |
426 | return KERN_FAILURE; |
427 | } |
428 | |
429 | compressor_pager_lookup(mem_obj, pager); |
430 | |
431 | if (length == 0) { |
432 | /* we're only querying the pager for this page */ |
433 | } else { |
434 | panic("compressor: data_request" ); |
435 | } |
436 | |
437 | /* find the compressor slot for that page */ |
438 | compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p); |
439 | |
440 | if (offset / PAGE_SIZE >= pager->cpgr_num_slots) { |
441 | /* out of range */ |
442 | kr = KERN_FAILURE; |
443 | } else if (slot_p == NULL || *slot_p == 0) { |
444 | /* compressor does not have this page */ |
445 | kr = KERN_FAILURE; |
446 | } else { |
447 | /* compressor does have this page */ |
448 | kr = KERN_SUCCESS; |
449 | } |
450 | return kr; |
451 | } |
452 | |
453 | /* |
454 | * memory_object_data_initialize: check whether we already have each page, and |
455 | * write it if we do not. The implementation is far from optimized, and |
456 | * also assumes that the default_pager is single-threaded. |
457 | */ |
458 | /* It is questionable whether or not a pager should decide what is relevant */ |
459 | /* and what is not in data sent from the kernel. Data initialize has been */ |
460 | /* changed to copy back all data sent to it in preparation for its eventual */ |
461 | /* merge with data return. It is the kernel that should decide what pages */ |
462 | /* to write back. As of the writing of this note, this is indeed the case */ |
463 | /* the kernel writes back one page at a time through this interface */ |
464 | |
465 | kern_return_t |
466 | compressor_memory_object_data_initialize( |
467 | memory_object_t mem_obj, |
468 | memory_object_offset_t offset, |
469 | memory_object_cluster_size_t size) |
470 | { |
471 | compressor_pager_t ; |
472 | memory_object_offset_t cur_offset; |
473 | |
474 | compressor_pager_lookup(mem_obj, pager); |
475 | compressor_pager_lock(pager); |
476 | |
477 | for (cur_offset = offset; |
478 | cur_offset < offset + size; |
479 | cur_offset += PAGE_SIZE) { |
480 | panic("do a data_return() if slot for this page is empty" ); |
481 | } |
482 | |
483 | compressor_pager_unlock(pager); |
484 | |
485 | return KERN_SUCCESS; |
486 | } |
487 | |
488 | kern_return_t |
489 | compressor_memory_object_data_unlock( |
490 | __unused memory_object_t mem_obj, |
491 | __unused memory_object_offset_t offset, |
492 | __unused memory_object_size_t size, |
493 | __unused vm_prot_t desired_access) |
494 | { |
495 | panic("compressor_memory_object_data_unlock()" ); |
496 | return KERN_FAILURE; |
497 | } |
498 | |
499 | |
500 | /*ARGSUSED*/ |
501 | kern_return_t |
502 | compressor_memory_object_data_return( |
503 | __unused memory_object_t mem_obj, |
504 | __unused memory_object_offset_t offset, |
505 | __unused memory_object_cluster_size_t size, |
506 | __unused memory_object_offset_t *resid_offset, |
507 | __unused int *io_error, |
508 | __unused boolean_t dirty, |
509 | __unused boolean_t kernel_copy, |
510 | __unused int upl_flags) |
511 | { |
512 | panic("compressor: data_return" ); |
513 | return KERN_FAILURE; |
514 | } |
515 | |
516 | /* |
517 | * Routine: default_pager_memory_object_create |
518 | * Purpose: |
519 | * Handle requests for memory objects from the |
520 | * kernel. |
521 | * Notes: |
522 | * Because we only give out the default memory |
523 | * manager port to the kernel, we don't have to |
524 | * be so paranoid about the contents. |
525 | */ |
526 | kern_return_t |
527 | compressor_memory_object_create( |
528 | memory_object_size_t new_size, |
529 | memory_object_t *new_mem_obj) |
530 | { |
531 | compressor_pager_t ; |
532 | int num_chunks; |
533 | |
534 | if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) { |
535 | /* 32-bit overflow for number of pages */ |
536 | panic("%s: size 0x%llx overflow\n" , |
537 | __FUNCTION__, (uint64_t) new_size); |
538 | return KERN_INVALID_ARGUMENT; |
539 | } |
540 | |
541 | pager = (compressor_pager_t) zalloc(compressor_pager_zone); |
542 | if (pager == NULL) { |
543 | return KERN_RESOURCE_SHORTAGE; |
544 | } |
545 | |
546 | compressor_pager_lock_init(pager); |
547 | pager->cpgr_references = 1; |
548 | pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE); |
549 | pager->cpgr_num_slots_occupied = 0; |
550 | |
551 | num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK; |
552 | if (num_chunks > 1) { |
553 | pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0])); |
554 | bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0])); |
555 | } else if (pager->cpgr_num_slots > 2) { |
556 | pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0])); |
557 | bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0])); |
558 | } else { |
559 | pager->cpgr_slots.cpgr_eslots[0] = 0; |
560 | pager->cpgr_slots.cpgr_eslots[1] = 0; |
561 | } |
562 | |
563 | /* |
564 | * Set up associations between this memory object |
565 | * and this compressor_pager structure |
566 | */ |
567 | pager->cpgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; |
568 | pager->cpgr_hdr.mo_pager_ops = &compressor_pager_ops; |
569 | pager->cpgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
570 | |
571 | *new_mem_obj = (memory_object_t) pager; |
572 | return KERN_SUCCESS; |
573 | } |
574 | |
575 | |
576 | unsigned int |
577 | ( |
578 | compressor_slot_t *chunk, |
579 | int num_slots, |
580 | int flags, |
581 | int *failures) |
582 | { |
583 | int i; |
584 | int retval; |
585 | unsigned int num_slots_freed; |
586 | |
587 | if (failures) |
588 | *failures = 0; |
589 | num_slots_freed = 0; |
590 | for (i = 0; i < num_slots; i++) { |
591 | if (chunk[i] != 0) { |
592 | retval = vm_compressor_free(&chunk[i], flags); |
593 | |
594 | if (retval == 0) |
595 | num_slots_freed++; |
596 | else { |
597 | if (retval == -2) |
598 | assert(flags & C_DONT_BLOCK); |
599 | |
600 | if (failures) |
601 | *failures += 1; |
602 | } |
603 | } |
604 | } |
605 | return num_slots_freed; |
606 | } |
607 | |
608 | void |
609 | ( |
610 | compressor_pager_t , |
611 | boolean_t do_alloc, |
612 | memory_object_offset_t offset, |
613 | compressor_slot_t **slot_pp) |
614 | { |
615 | int num_chunks; |
616 | uint32_t page_num; |
617 | int chunk_idx; |
618 | int slot_idx; |
619 | compressor_slot_t *chunk; |
620 | compressor_slot_t *t_chunk; |
621 | |
622 | page_num = (uint32_t)(offset/PAGE_SIZE); |
623 | if (page_num != (offset/PAGE_SIZE)) { |
624 | /* overflow */ |
625 | panic("%s: offset 0x%llx overflow\n" , |
626 | __FUNCTION__, (uint64_t) offset); |
627 | *slot_pp = NULL; |
628 | return; |
629 | } |
630 | if (page_num >= pager->cpgr_num_slots) { |
631 | /* out of range */ |
632 | *slot_pp = NULL; |
633 | return; |
634 | } |
635 | num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK; |
636 | if (num_chunks > 1) { |
637 | /* we have an array of chunks */ |
638 | chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK; |
639 | chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]; |
640 | |
641 | if (chunk == NULL && do_alloc) { |
642 | t_chunk = kalloc(COMPRESSOR_SLOTS_CHUNK_SIZE); |
643 | bzero(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); |
644 | |
645 | compressor_pager_lock(pager); |
646 | |
647 | if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) { |
648 | |
649 | /* |
650 | * On some platforms, the memory stores from |
651 | * the bzero(t_chunk) above might not have been |
652 | * made visible and another thread might see |
653 | * the contents of this new chunk before it's |
654 | * been fully zero-filled. |
655 | * This memory barrier should take care of this |
656 | * according to the platform requirements. |
657 | */ |
658 | __c11_atomic_thread_fence(memory_order_release); |
659 | |
660 | chunk = pager->cpgr_slots.cpgr_islots[chunk_idx] = t_chunk; |
661 | t_chunk = NULL; |
662 | } |
663 | compressor_pager_unlock(pager); |
664 | |
665 | if (t_chunk) |
666 | kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); |
667 | } |
668 | if (chunk == NULL) { |
669 | *slot_pp = NULL; |
670 | } else { |
671 | slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK; |
672 | *slot_pp = &chunk[slot_idx]; |
673 | } |
674 | } else if (pager->cpgr_num_slots > 2) { |
675 | slot_idx = page_num; |
676 | *slot_pp = &pager->cpgr_slots.cpgr_dslots[slot_idx]; |
677 | } else { |
678 | slot_idx = page_num; |
679 | *slot_pp = &pager->cpgr_slots.cpgr_eslots[slot_idx]; |
680 | } |
681 | } |
682 | |
683 | void |
684 | (void) |
685 | { |
686 | lck_grp_attr_setdefault(&compressor_pager_lck_grp_attr); |
687 | lck_grp_init(&compressor_pager_lck_grp, "compressor_pager" , &compressor_pager_lck_grp_attr); |
688 | lck_attr_setdefault(&compressor_pager_lck_attr); |
689 | |
690 | compressor_pager_zone = zinit(sizeof (struct compressor_pager), |
691 | 10000 * sizeof (struct compressor_pager), |
692 | 8192, "compressor_pager" ); |
693 | zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE); |
694 | zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE); |
695 | |
696 | vm_compressor_init(); |
697 | } |
698 | |
699 | kern_return_t |
700 | ( |
701 | memory_object_t mem_obj, |
702 | memory_object_offset_t offset, |
703 | ppnum_t ppnum, |
704 | void **current_chead, |
705 | char *scratch_buf, |
706 | int *compressed_count_delta_p) |
707 | { |
708 | compressor_pager_t ; |
709 | compressor_slot_t *slot_p; |
710 | unsigned int prev_wimg = VM_WIMG_DEFAULT; |
711 | boolean_t set_cache_attr = FALSE; |
712 | |
713 | compressor_pager_stats.put++; |
714 | |
715 | *compressed_count_delta_p = 0; |
716 | |
717 | /* This routine is called by the pageout thread. The pageout thread */ |
718 | /* cannot be blocked by read activities unless the read activities */ |
719 | /* Therefore the grant of vs lock must be done on a try versus a */ |
720 | /* blocking basis. The code below relies on the fact that the */ |
721 | /* interface is synchronous. Should this interface be again async */ |
722 | /* for some type of pager in the future the pages will have to be */ |
723 | /* returned through a separate, asynchronous path. */ |
724 | |
725 | compressor_pager_lookup(mem_obj, pager); |
726 | |
727 | if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { |
728 | /* overflow */ |
729 | panic("%s: offset 0x%llx overflow\n" , |
730 | __FUNCTION__, (uint64_t) offset); |
731 | return KERN_RESOURCE_SHORTAGE; |
732 | } |
733 | |
734 | compressor_pager_slot_lookup(pager, TRUE, offset, &slot_p); |
735 | |
736 | if (slot_p == NULL) { |
737 | /* out of range ? */ |
738 | panic("vm_compressor_pager_put: out of range" ); |
739 | } |
740 | if (*slot_p != 0) { |
741 | /* |
742 | * Already compressed: forget about the old one. |
743 | * |
744 | * This can happen after a vm_object_do_collapse() when |
745 | * the "backing_object" had some pages paged out and the |
746 | * "object" had an equivalent page resident. |
747 | */ |
748 | vm_compressor_free(slot_p, 0); |
749 | *compressed_count_delta_p -= 1; |
750 | } |
751 | |
752 | /* |
753 | * cacheability should be set to the system default (usually writeback) |
754 | * during compressor operations, both for performance and correctness, |
755 | * e.g. to avoid compressor codec faults generated by an unexpected |
756 | * memory type. |
757 | */ |
758 | prev_wimg = pmap_cache_attributes(ppnum) & VM_WIMG_MASK; |
759 | |
760 | if ((prev_wimg != VM_WIMG_DEFAULT) && (prev_wimg != VM_WIMG_USE_DEFAULT)) { |
761 | set_cache_attr = TRUE; |
762 | pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT); |
763 | } |
764 | /* |
765 | * If the compressor operation succeeds, we presumably don't need to |
766 | * undo any previous WIMG update, as all live mappings should be |
767 | * disconnected. |
768 | */ |
769 | |
770 | if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) { |
771 | if (set_cache_attr) |
772 | pmap_set_cache_attributes(ppnum, prev_wimg); |
773 | return KERN_RESOURCE_SHORTAGE; |
774 | } |
775 | *compressed_count_delta_p += 1; |
776 | |
777 | return KERN_SUCCESS; |
778 | } |
779 | |
780 | |
781 | kern_return_t |
782 | ( |
783 | memory_object_t mem_obj, |
784 | memory_object_offset_t offset, |
785 | ppnum_t ppnum, |
786 | int *my_fault_type, |
787 | int flags, |
788 | int *compressed_count_delta_p) |
789 | { |
790 | compressor_pager_t ; |
791 | kern_return_t kr; |
792 | compressor_slot_t *slot_p; |
793 | |
794 | compressor_pager_stats.get++; |
795 | |
796 | *compressed_count_delta_p = 0; |
797 | |
798 | if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { |
799 | panic("%s: offset 0x%llx overflow\n" , |
800 | __FUNCTION__, (uint64_t) offset); |
801 | return KERN_MEMORY_ERROR; |
802 | } |
803 | |
804 | compressor_pager_lookup(mem_obj, pager); |
805 | |
806 | /* find the compressor slot for that page */ |
807 | compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p); |
808 | |
809 | if (offset / PAGE_SIZE >= pager->cpgr_num_slots) { |
810 | /* out of range */ |
811 | kr = KERN_MEMORY_FAILURE; |
812 | } else if (slot_p == NULL || *slot_p == 0) { |
813 | /* compressor does not have this page */ |
814 | kr = KERN_MEMORY_ERROR; |
815 | } else { |
816 | /* compressor does have this page */ |
817 | kr = KERN_SUCCESS; |
818 | } |
819 | *my_fault_type = DBG_COMPRESSOR_FAULT; |
820 | |
821 | if (kr == KERN_SUCCESS) { |
822 | int retval; |
823 | unsigned int prev_wimg = VM_WIMG_DEFAULT; |
824 | boolean_t set_cache_attr = FALSE; |
825 | |
826 | /* |
827 | * cacheability should be set to the system default (usually writeback) |
828 | * during compressor operations, both for performance and correctness, |
829 | * e.g. to avoid compressor codec faults generated by an unexpected |
830 | * memory type. |
831 | */ |
832 | prev_wimg = pmap_cache_attributes(ppnum) & VM_WIMG_MASK; |
833 | |
834 | if ((prev_wimg != VM_WIMG_DEFAULT) && (prev_wimg != VM_WIMG_USE_DEFAULT)) { |
835 | set_cache_attr = TRUE; |
836 | pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT); |
837 | } |
838 | |
839 | /* get the page from the compressor */ |
840 | retval = vm_compressor_get(ppnum, slot_p, flags); |
841 | if (retval == -1) |
842 | kr = KERN_MEMORY_FAILURE; |
843 | else if (retval == 1) |
844 | *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT; |
845 | else if (retval == -2) { |
846 | assert((flags & C_DONT_BLOCK)); |
847 | kr = KERN_FAILURE; |
848 | } |
849 | if (set_cache_attr) |
850 | pmap_set_cache_attributes(ppnum, prev_wimg); |
851 | } |
852 | |
853 | if (kr == KERN_SUCCESS) { |
854 | assert(slot_p != NULL); |
855 | if (*slot_p != 0) { |
856 | /* |
857 | * We got the page for a copy-on-write fault |
858 | * and we kept the original in place. Slot |
859 | * is still occupied. |
860 | */ |
861 | } else { |
862 | *compressed_count_delta_p -= 1; |
863 | } |
864 | } |
865 | |
866 | return kr; |
867 | } |
868 | |
869 | unsigned int |
870 | ( |
871 | memory_object_t mem_obj, |
872 | memory_object_offset_t offset) |
873 | { |
874 | compressor_pager_t ; |
875 | compressor_slot_t *slot_p; |
876 | unsigned int num_slots_freed; |
877 | |
878 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
879 | |
880 | compressor_pager_stats.state_clr++; |
881 | |
882 | if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { |
883 | /* overflow */ |
884 | panic("%s: offset 0x%llx overflow\n" , |
885 | __FUNCTION__, (uint64_t) offset); |
886 | return 0; |
887 | } |
888 | |
889 | compressor_pager_lookup(mem_obj, pager); |
890 | |
891 | /* find the compressor slot for that page */ |
892 | compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p); |
893 | |
894 | num_slots_freed = 0; |
895 | if (slot_p && *slot_p != 0) { |
896 | vm_compressor_free(slot_p, 0); |
897 | num_slots_freed++; |
898 | assert(*slot_p == 0); |
899 | } |
900 | |
901 | return num_slots_freed; |
902 | } |
903 | |
904 | vm_external_state_t |
905 | ( |
906 | memory_object_t mem_obj, |
907 | memory_object_offset_t offset) |
908 | { |
909 | compressor_pager_t ; |
910 | compressor_slot_t *slot_p; |
911 | |
912 | assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); |
913 | |
914 | compressor_pager_stats.state_get++; |
915 | |
916 | if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { |
917 | /* overflow */ |
918 | panic("%s: offset 0x%llx overflow\n" , |
919 | __FUNCTION__, (uint64_t) offset); |
920 | return VM_EXTERNAL_STATE_ABSENT; |
921 | } |
922 | |
923 | compressor_pager_lookup(mem_obj, pager); |
924 | |
925 | /* find the compressor slot for that page */ |
926 | compressor_pager_slot_lookup(pager, FALSE, offset, &slot_p); |
927 | |
928 | if (offset / PAGE_SIZE >= pager->cpgr_num_slots) { |
929 | /* out of range */ |
930 | return VM_EXTERNAL_STATE_ABSENT; |
931 | } else if (slot_p == NULL || *slot_p == 0) { |
932 | /* compressor does not have this page */ |
933 | return VM_EXTERNAL_STATE_ABSENT; |
934 | } else { |
935 | /* compressor does have this page */ |
936 | return VM_EXTERNAL_STATE_EXISTS; |
937 | } |
938 | } |
939 | |
940 | unsigned int |
941 | ( |
942 | memory_object_t mem_obj, |
943 | int flags) |
944 | { |
945 | compressor_pager_t ; |
946 | int num_chunks; |
947 | int failures; |
948 | int i; |
949 | compressor_slot_t *chunk; |
950 | unsigned int num_slots_freed; |
951 | |
952 | compressor_pager_lookup(mem_obj, pager); |
953 | if (pager == NULL) |
954 | return 0; |
955 | |
956 | compressor_pager_lock(pager); |
957 | |
958 | /* reap the compressor slots */ |
959 | num_slots_freed = 0; |
960 | |
961 | num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK; |
962 | if (num_chunks > 1) { |
963 | /* we have an array of chunks */ |
964 | for (i = 0; i < num_chunks; i++) { |
965 | chunk = pager->cpgr_slots.cpgr_islots[i]; |
966 | if (chunk != NULL) { |
967 | num_slots_freed += |
968 | compressor_pager_slots_chunk_free( |
969 | chunk, |
970 | COMPRESSOR_SLOTS_PER_CHUNK, |
971 | flags, |
972 | &failures); |
973 | if (failures == 0) { |
974 | pager->cpgr_slots.cpgr_islots[i] = NULL; |
975 | kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); |
976 | } |
977 | } |
978 | } |
979 | } else if (pager->cpgr_num_slots > 2) { |
980 | chunk = pager->cpgr_slots.cpgr_dslots; |
981 | num_slots_freed += |
982 | compressor_pager_slots_chunk_free( |
983 | chunk, |
984 | pager->cpgr_num_slots, |
985 | flags, |
986 | NULL); |
987 | } else { |
988 | chunk = &pager->cpgr_slots.cpgr_eslots[0]; |
989 | num_slots_freed += |
990 | compressor_pager_slots_chunk_free( |
991 | chunk, |
992 | pager->cpgr_num_slots, |
993 | flags, |
994 | NULL); |
995 | } |
996 | |
997 | compressor_pager_unlock(pager); |
998 | |
999 | return num_slots_freed; |
1000 | } |
1001 | |
1002 | void |
1003 | ( |
1004 | memory_object_t dst_mem_obj, |
1005 | memory_object_offset_t dst_offset, |
1006 | memory_object_t src_mem_obj, |
1007 | memory_object_offset_t src_offset) |
1008 | { |
1009 | compressor_pager_t , ; |
1010 | compressor_slot_t *src_slot_p, *dst_slot_p; |
1011 | |
1012 | compressor_pager_stats.transfer++; |
1013 | |
1014 | /* find the compressor slot for the destination */ |
1015 | assert((uint32_t) dst_offset == dst_offset); |
1016 | compressor_pager_lookup(dst_mem_obj, dst_pager); |
1017 | assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots); |
1018 | compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset, |
1019 | &dst_slot_p); |
1020 | assert(dst_slot_p != NULL); |
1021 | assert(*dst_slot_p == 0); |
1022 | |
1023 | /* find the compressor slot for the source */ |
1024 | assert((uint32_t) src_offset == src_offset); |
1025 | compressor_pager_lookup(src_mem_obj, src_pager); |
1026 | assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots); |
1027 | compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset, |
1028 | &src_slot_p); |
1029 | assert(src_slot_p != NULL); |
1030 | assert(*src_slot_p != 0); |
1031 | |
1032 | /* transfer the slot from source to destination */ |
1033 | vm_compressor_transfer(dst_slot_p, src_slot_p); |
1034 | OSAddAtomic(-1, &src_pager->cpgr_num_slots_occupied); |
1035 | OSAddAtomic(+1, &dst_pager->cpgr_num_slots_occupied); |
1036 | } |
1037 | |
1038 | memory_object_offset_t |
1039 | ( |
1040 | memory_object_t mem_obj, |
1041 | memory_object_offset_t offset) |
1042 | { |
1043 | compressor_pager_t ; |
1044 | uint32_t num_chunks; |
1045 | uint32_t page_num; |
1046 | uint32_t chunk_idx; |
1047 | uint32_t slot_idx; |
1048 | compressor_slot_t *chunk; |
1049 | |
1050 | compressor_pager_lookup(mem_obj, pager); |
1051 | |
1052 | page_num = (uint32_t)(offset / PAGE_SIZE); |
1053 | if (page_num != (offset/PAGE_SIZE)) { |
1054 | /* overflow */ |
1055 | return (memory_object_offset_t) -1; |
1056 | } |
1057 | if (page_num >= pager->cpgr_num_slots) { |
1058 | /* out of range */ |
1059 | return (memory_object_offset_t) -1; |
1060 | } |
1061 | |
1062 | num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / |
1063 | COMPRESSOR_SLOTS_PER_CHUNK); |
1064 | |
1065 | if (num_chunks == 1) { |
1066 | if (pager->cpgr_num_slots > 2) { |
1067 | chunk = pager->cpgr_slots.cpgr_dslots; |
1068 | } else { |
1069 | chunk = &pager->cpgr_slots.cpgr_eslots[0]; |
1070 | } |
1071 | for (slot_idx = page_num; |
1072 | slot_idx < pager->cpgr_num_slots; |
1073 | slot_idx++) { |
1074 | if (chunk[slot_idx] != 0) { |
1075 | /* found a non-NULL slot in this chunk */ |
1076 | return (memory_object_offset_t) (slot_idx * |
1077 | PAGE_SIZE); |
1078 | } |
1079 | } |
1080 | return (memory_object_offset_t) -1; |
1081 | } |
1082 | |
1083 | /* we have an array of chunks; find the next non-NULL chunk */ |
1084 | chunk = NULL; |
1085 | for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK, |
1086 | slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK; |
1087 | chunk_idx < num_chunks; |
1088 | chunk_idx++, |
1089 | slot_idx = 0) { |
1090 | chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]; |
1091 | if (chunk == NULL) { |
1092 | /* no chunk here: try the next one */ |
1093 | continue; |
1094 | } |
1095 | /* search for an occupied slot in this chunk */ |
1096 | for (; |
1097 | slot_idx < COMPRESSOR_SLOTS_PER_CHUNK; |
1098 | slot_idx++) { |
1099 | if (chunk[slot_idx] != 0) { |
1100 | /* found an occupied slot in this chunk */ |
1101 | uint32_t next_slot; |
1102 | |
1103 | next_slot = ((chunk_idx * |
1104 | COMPRESSOR_SLOTS_PER_CHUNK) + |
1105 | slot_idx); |
1106 | if (next_slot >= pager->cpgr_num_slots) { |
1107 | /* went beyond end of object */ |
1108 | return (memory_object_offset_t) -1; |
1109 | } |
1110 | return (memory_object_offset_t) (next_slot * |
1111 | PAGE_SIZE); |
1112 | } |
1113 | } |
1114 | } |
1115 | return (memory_object_offset_t) -1; |
1116 | } |
1117 | |
1118 | unsigned int |
1119 | ( |
1120 | memory_object_t mem_obj) |
1121 | { |
1122 | compressor_pager_t ; |
1123 | |
1124 | compressor_pager_lookup(mem_obj, pager); |
1125 | if (pager == NULL) |
1126 | return 0; |
1127 | |
1128 | /* |
1129 | * The caller should have the VM object locked and one |
1130 | * needs that lock to do a page-in or page-out, so no |
1131 | * need to lock the pager here. |
1132 | */ |
1133 | assert(pager->cpgr_num_slots_occupied >= 0); |
1134 | |
1135 | return pager->cpgr_num_slots_occupied; |
1136 | } |
1137 | |
1138 | void |
1139 | ( |
1140 | memory_object_t mem_obj, |
1141 | int compressed_count_delta, |
1142 | boolean_t shared_lock, |
1143 | vm_object_t object __unused) |
1144 | { |
1145 | compressor_pager_t ; |
1146 | |
1147 | if (compressed_count_delta == 0) { |
1148 | return; |
1149 | } |
1150 | |
1151 | compressor_pager_lookup(mem_obj, pager); |
1152 | if (pager == NULL) |
1153 | return; |
1154 | |
1155 | if (compressed_count_delta < 0) { |
1156 | assert(pager->cpgr_num_slots_occupied >= |
1157 | (unsigned int) -compressed_count_delta); |
1158 | } |
1159 | |
1160 | /* |
1161 | * The caller should have the VM object locked, |
1162 | * shared or exclusive. |
1163 | */ |
1164 | if (shared_lock) { |
1165 | vm_object_lock_assert_shared(object); |
1166 | OSAddAtomic(compressed_count_delta, |
1167 | &pager->cpgr_num_slots_occupied); |
1168 | } else { |
1169 | vm_object_lock_assert_exclusive(object); |
1170 | pager->cpgr_num_slots_occupied += compressed_count_delta; |
1171 | } |
1172 | } |
1173 | |
1174 | #if CONFIG_FREEZE |
1175 | kern_return_t |
1176 | vm_compressor_pager_relocate( |
1177 | memory_object_t mem_obj, |
1178 | memory_object_offset_t offset, |
1179 | void **current_chead) |
1180 | { |
1181 | /* |
1182 | * Has the page at this offset been compressed? |
1183 | */ |
1184 | |
1185 | compressor_slot_t *slot_p; |
1186 | compressor_pager_t dst_pager; |
1187 | |
1188 | assert(mem_obj); |
1189 | |
1190 | compressor_pager_lookup(mem_obj, dst_pager); |
1191 | if (dst_pager == NULL) |
1192 | return KERN_FAILURE; |
1193 | |
1194 | compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p); |
1195 | return (vm_compressor_relocate(current_chead, slot_p)); |
1196 | } |
1197 | #endif /* CONFIG_FREEZE */ |
1198 | |
1199 | |