| 1 | /* |
| 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <sys/errno.h> |
| 30 | |
| 31 | #include <mach/mach_types.h> |
| 32 | #include <mach/kern_return.h> |
| 33 | #include <mach/memory_object_control.h> |
| 34 | #include <mach/memory_object_types.h> |
| 35 | #include <mach/port.h> |
| 36 | #include <mach/policy.h> |
| 37 | #include <mach/upl.h> |
| 38 | #include <kern/kern_types.h> |
| 39 | #include <kern/ipc_kobject.h> |
| 40 | #include <kern/host.h> |
| 41 | #include <kern/thread.h> |
| 42 | #include <ipc/ipc_port.h> |
| 43 | #include <ipc/ipc_space.h> |
| 44 | #include <device/device_port.h> |
| 45 | #include <vm/memory_object.h> |
| 46 | #include <vm/vm_pageout.h> |
| 47 | #include <vm/vm_map.h> |
| 48 | #include <vm/vm_kern.h> |
| 49 | #include <vm/vm_pageout.h> |
| 50 | #include <vm/vm_protos.h> |
| 51 | #include <mach/sdt.h> |
| 52 | |
| 53 | |
| 54 | /* Device VM COMPONENT INTERFACES */ |
| 55 | |
| 56 | |
| 57 | /* |
| 58 | * Device PAGER |
| 59 | */ |
| 60 | |
| 61 | |
| 62 | /* until component support available */ |
| 63 | |
| 64 | |
| 65 | |
| 66 | /* until component support available */ |
| 67 | const struct memory_object_pager_ops = { |
| 68 | device_pager_reference, |
| 69 | device_pager_deallocate, |
| 70 | device_pager_init, |
| 71 | device_pager_terminate, |
| 72 | device_pager_data_request, |
| 73 | device_pager_data_return, |
| 74 | device_pager_data_initialize, |
| 75 | device_pager_data_unlock, |
| 76 | device_pager_synchronize, |
| 77 | device_pager_map, |
| 78 | device_pager_last_unmap, |
| 79 | NULL, /* data_reclaim */ |
| 80 | "device pager" |
| 81 | }; |
| 82 | |
| 83 | typedef uintptr_t device_port_t; |
| 84 | |
| 85 | /* |
| 86 | * The start of "struct device_pager" MUST match a "struct memory_object". |
| 87 | */ |
| 88 | typedef struct { |
| 89 | /* mandatory generic header */ |
| 90 | struct memory_object ; |
| 91 | |
| 92 | /* pager-specific data */ |
| 93 | lck_mtx_t ; |
| 94 | unsigned int ; /* reference count */ |
| 95 | device_port_t device_handle; /* device_handle */ |
| 96 | vm_size_t ; |
| 97 | int ; |
| 98 | boolean_t ; |
| 99 | } *; |
| 100 | |
| 101 | lck_grp_t ; |
| 102 | lck_grp_attr_t ; |
| 103 | lck_attr_t ; |
| 104 | |
| 105 | #define (pager) \ |
| 106 | lck_mtx_init(&(pager)->lock, \ |
| 107 | &device_pager_lck_grp, \ |
| 108 | &device_pager_lck_attr) |
| 109 | #define (pager) \ |
| 110 | lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp) |
| 111 | #define (pager) lck_mtx_lock(&(pager)->lock) |
| 112 | #define (pager) lck_mtx_unlock(&(pager)->lock) |
| 113 | |
| 114 | device_pager_t |
| 115 | device_pager_lookup( /* forward */ |
| 116 | memory_object_t); |
| 117 | |
| 118 | device_pager_t |
| 119 | device_object_create(void); /* forward */ |
| 120 | |
| 121 | zone_t ; |
| 122 | |
| 123 | |
| 124 | #define ((device_pager_t) 0) |
| 125 | |
| 126 | |
| 127 | #define MAX_DNODE 10000 |
| 128 | |
| 129 | |
| 130 | |
| 131 | |
| 132 | |
| 133 | /* |
| 134 | * |
| 135 | */ |
| 136 | void |
| 137 | (void) |
| 138 | { |
| 139 | vm_size_t size; |
| 140 | |
| 141 | size = (vm_size_t) sizeof(struct device_pager); |
| 142 | device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size, |
| 143 | PAGE_SIZE, "device node pager structures" ); |
| 144 | zone_change(device_pager_zone, Z_CALLERACCT, FALSE); |
| 145 | |
| 146 | lck_grp_attr_setdefault(&device_pager_lck_grp_attr); |
| 147 | lck_grp_init(&device_pager_lck_grp, "device_pager" , &device_pager_lck_grp_attr); |
| 148 | lck_attr_setdefault(&device_pager_lck_attr); |
| 149 | |
| 150 | return; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * |
| 155 | */ |
| 156 | memory_object_t |
| 157 | ( |
| 158 | __unused memory_object_t device, |
| 159 | uintptr_t device_handle, |
| 160 | vm_size_t size, |
| 161 | int flags) |
| 162 | { |
| 163 | device_pager_t device_object; |
| 164 | memory_object_control_t control; |
| 165 | vm_object_t object; |
| 166 | |
| 167 | device_object = device_object_create(); |
| 168 | if (device_object == DEVICE_PAGER_NULL) |
| 169 | panic("device_pager_setup: device_object_create() failed" ); |
| 170 | |
| 171 | device_object->device_handle = device_handle; |
| 172 | device_object->size = size; |
| 173 | device_object->flags = flags; |
| 174 | |
| 175 | memory_object_create_named((memory_object_t) device_object, |
| 176 | size, |
| 177 | &control); |
| 178 | object = memory_object_control_to_vm_object(control); |
| 179 | |
| 180 | assert(object != VM_OBJECT_NULL); |
| 181 | vm_object_lock(object); |
| 182 | object->true_share = TRUE; |
| 183 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { |
| 184 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
| 185 | } |
| 186 | vm_object_unlock(object); |
| 187 | |
| 188 | return (memory_object_t)device_object; |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * |
| 193 | */ |
| 194 | kern_return_t |
| 195 | ( |
| 196 | memory_object_t device, |
| 197 | memory_object_offset_t offset, |
| 198 | ppnum_t page_num, |
| 199 | vm_size_t size) |
| 200 | { |
| 201 | device_pager_t device_object; |
| 202 | vm_object_t vm_object; |
| 203 | kern_return_t kr; |
| 204 | upl_t upl; |
| 205 | |
| 206 | device_object = device_pager_lookup(device); |
| 207 | if(device_object == DEVICE_PAGER_NULL) |
| 208 | return KERN_FAILURE; |
| 209 | |
| 210 | vm_object = (vm_object_t)memory_object_control_to_vm_object( |
| 211 | device_object->dev_pgr_hdr.mo_control); |
| 212 | if(vm_object == NULL) |
| 213 | return KERN_FAILURE; |
| 214 | |
| 215 | kr = vm_object_populate_with_private( |
| 216 | vm_object, offset, page_num, size); |
| 217 | if(kr != KERN_SUCCESS) |
| 218 | return kr; |
| 219 | |
| 220 | if(!vm_object->phys_contiguous) { |
| 221 | unsigned int null_size = 0; |
| 222 | assert((upl_size_t) size == size); |
| 223 | kr = vm_object_upl_request(vm_object, |
| 224 | (vm_object_offset_t)offset, |
| 225 | (upl_size_t) size, &upl, NULL, |
| 226 | &null_size, |
| 227 | (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE), |
| 228 | VM_KERN_MEMORY_NONE); |
| 229 | if(kr != KERN_SUCCESS) |
| 230 | panic("device_pager_populate_object: list_req failed" ); |
| 231 | |
| 232 | upl_commit(upl, NULL, 0); |
| 233 | upl_deallocate(upl); |
| 234 | } |
| 235 | |
| 236 | |
| 237 | return kr; |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * |
| 242 | */ |
| 243 | device_pager_t |
| 244 | ( |
| 245 | memory_object_t mem_obj) |
| 246 | { |
| 247 | device_pager_t device_object; |
| 248 | |
| 249 | assert(mem_obj->mo_pager_ops == &device_pager_ops); |
| 250 | device_object = (device_pager_t)mem_obj; |
| 251 | assert(device_object->ref_count > 0); |
| 252 | return device_object; |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * |
| 257 | */ |
| 258 | kern_return_t |
| 259 | ( |
| 260 | memory_object_t mem_obj, |
| 261 | memory_object_control_t control, |
| 262 | __unused memory_object_cluster_size_t pg_size) |
| 263 | { |
| 264 | device_pager_t device_object; |
| 265 | kern_return_t kr; |
| 266 | memory_object_attr_info_data_t attributes; |
| 267 | |
| 268 | vm_object_t vm_object; |
| 269 | |
| 270 | |
| 271 | if (control == MEMORY_OBJECT_CONTROL_NULL) |
| 272 | return KERN_INVALID_ARGUMENT; |
| 273 | |
| 274 | device_object = device_pager_lookup(mem_obj); |
| 275 | |
| 276 | memory_object_control_reference(control); |
| 277 | device_object->dev_pgr_hdr.mo_control = control; |
| 278 | |
| 279 | |
| 280 | /* The following settings should be done through an expanded change */ |
| 281 | /* attributes call */ |
| 282 | |
| 283 | vm_object = (vm_object_t)memory_object_control_to_vm_object(control); |
| 284 | vm_object_lock(vm_object); |
| 285 | vm_object->private = TRUE; |
| 286 | if(device_object->flags & DEVICE_PAGER_CONTIGUOUS) |
| 287 | vm_object->phys_contiguous = TRUE; |
| 288 | if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE) |
| 289 | vm_object->nophyscache = TRUE; |
| 290 | |
| 291 | vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK; |
| 292 | vm_object_unlock(vm_object); |
| 293 | |
| 294 | |
| 295 | attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
| 296 | /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ |
| 297 | attributes.cluster_size = (1 << (PAGE_SHIFT)); |
| 298 | attributes.may_cache_object = FALSE; |
| 299 | attributes.temporary = TRUE; |
| 300 | |
| 301 | kr = memory_object_change_attributes( |
| 302 | control, |
| 303 | MEMORY_OBJECT_ATTRIBUTE_INFO, |
| 304 | (memory_object_info_t) &attributes, |
| 305 | MEMORY_OBJECT_ATTR_INFO_COUNT); |
| 306 | if (kr != KERN_SUCCESS) |
| 307 | panic("device_pager_init: memory_object_change_attributes() failed" ); |
| 308 | |
| 309 | return(KERN_SUCCESS); |
| 310 | } |
| 311 | |
| 312 | /* |
| 313 | * |
| 314 | */ |
| 315 | /*ARGSUSED6*/ |
| 316 | kern_return_t |
| 317 | ( |
| 318 | memory_object_t mem_obj, |
| 319 | memory_object_offset_t offset, |
| 320 | memory_object_cluster_size_t data_cnt, |
| 321 | __unused memory_object_offset_t *resid_offset, |
| 322 | __unused int *io_error, |
| 323 | __unused boolean_t dirty, |
| 324 | __unused boolean_t kernel_copy, |
| 325 | __unused int upl_flags) |
| 326 | { |
| 327 | device_pager_t device_object; |
| 328 | |
| 329 | device_object = device_pager_lookup(mem_obj); |
| 330 | if (device_object == DEVICE_PAGER_NULL) |
| 331 | panic("device_pager_data_return: lookup failed" ); |
| 332 | |
| 333 | __IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle, |
| 334 | (ipc_port_t) device_object, |
| 335 | VM_PROT_READ | VM_PROT_WRITE, |
| 336 | offset, data_cnt)); |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * |
| 341 | */ |
| 342 | kern_return_t |
| 343 | ( |
| 344 | memory_object_t mem_obj, |
| 345 | memory_object_offset_t offset, |
| 346 | memory_object_cluster_size_t length, |
| 347 | __unused vm_prot_t protection_required, |
| 348 | __unused memory_object_fault_info_t fault_info) |
| 349 | { |
| 350 | device_pager_t device_object; |
| 351 | |
| 352 | device_object = device_pager_lookup(mem_obj); |
| 353 | |
| 354 | if (device_object == DEVICE_PAGER_NULL) |
| 355 | panic("device_pager_data_request: lookup failed" ); |
| 356 | |
| 357 | __IGNORE_WCASTALIGN(device_data_action(device_object->device_handle, |
| 358 | (ipc_port_t) device_object, |
| 359 | VM_PROT_READ, offset, length)); |
| 360 | return KERN_SUCCESS; |
| 361 | } |
| 362 | |
| 363 | /* |
| 364 | * |
| 365 | */ |
| 366 | void |
| 367 | ( |
| 368 | memory_object_t mem_obj) |
| 369 | { |
| 370 | device_pager_t device_object; |
| 371 | unsigned int new_ref_count; |
| 372 | |
| 373 | device_object = device_pager_lookup(mem_obj); |
| 374 | new_ref_count = hw_atomic_add(&device_object->ref_count, 1); |
| 375 | assert(new_ref_count > 1); |
| 376 | DTRACE_VM2(device_pager_reference, |
| 377 | device_pager_t, device_object, |
| 378 | unsigned int, device_object->ref_count); |
| 379 | |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * |
| 384 | */ |
| 385 | void |
| 386 | ( |
| 387 | memory_object_t mem_obj) |
| 388 | { |
| 389 | device_pager_t device_object; |
| 390 | memory_object_control_t device_control; |
| 391 | unsigned int ref_count; |
| 392 | |
| 393 | device_object = device_pager_lookup(mem_obj); |
| 394 | assert(device_object->ref_count > 0); |
| 395 | |
| 396 | DTRACE_VM2(device_pager_deallocate, |
| 397 | device_pager_t, device_object, |
| 398 | unsigned int, device_object->ref_count); |
| 399 | |
| 400 | ref_count = hw_atomic_sub(&device_object->ref_count, 1); |
| 401 | |
| 402 | if (ref_count == 1) { |
| 403 | /* |
| 404 | * The last reference is our "named" reference. |
| 405 | * Close the device and "destroy" the VM object. |
| 406 | */ |
| 407 | |
| 408 | DTRACE_VM2(device_pager_destroy, |
| 409 | device_pager_t, device_object, |
| 410 | unsigned int, device_object->ref_count); |
| 411 | |
| 412 | assert(device_object->is_mapped == FALSE); |
| 413 | if (device_object->device_handle != (device_port_t) NULL) { |
| 414 | device_close(device_object->device_handle); |
| 415 | device_object->device_handle = (device_port_t) NULL; |
| 416 | } |
| 417 | device_control = device_object->dev_pgr_hdr.mo_control; |
| 418 | memory_object_destroy(device_control, 0); |
| 419 | } else if (ref_count == 0) { |
| 420 | /* |
| 421 | * No more references: free the pager. |
| 422 | */ |
| 423 | DTRACE_VM2(device_pager_free, |
| 424 | device_pager_t, device_object, |
| 425 | unsigned int, device_object->ref_count); |
| 426 | |
| 427 | device_pager_lock_destroy(device_object); |
| 428 | |
| 429 | zfree(device_pager_zone, device_object); |
| 430 | } |
| 431 | return; |
| 432 | } |
| 433 | |
| 434 | kern_return_t |
| 435 | ( |
| 436 | __unused memory_object_t mem_obj, |
| 437 | __unused memory_object_offset_t offset, |
| 438 | __unused memory_object_cluster_size_t data_cnt) |
| 439 | { |
| 440 | panic("device_pager_data_initialize" ); |
| 441 | return KERN_FAILURE; |
| 442 | } |
| 443 | |
| 444 | kern_return_t |
| 445 | ( |
| 446 | __unused memory_object_t mem_obj, |
| 447 | __unused memory_object_offset_t offset, |
| 448 | __unused memory_object_size_t size, |
| 449 | __unused vm_prot_t desired_access) |
| 450 | { |
| 451 | return KERN_FAILURE; |
| 452 | } |
| 453 | |
| 454 | kern_return_t |
| 455 | ( |
| 456 | __unused memory_object_t mem_obj) |
| 457 | { |
| 458 | return KERN_SUCCESS; |
| 459 | } |
| 460 | |
| 461 | |
| 462 | |
| 463 | /* |
| 464 | * |
| 465 | */ |
| 466 | kern_return_t |
| 467 | ( |
| 468 | __unused memory_object_t mem_obj, |
| 469 | __unused memory_object_offset_t offset, |
| 470 | __unused memory_object_size_t length, |
| 471 | __unused vm_sync_t sync_flags) |
| 472 | { |
| 473 | panic("device_pager_synchronize: memory_object_synchronize no longer supported\n" ); |
| 474 | return KERN_FAILURE; |
| 475 | } |
| 476 | |
| 477 | /* |
| 478 | * |
| 479 | */ |
| 480 | kern_return_t |
| 481 | ( |
| 482 | memory_object_t mem_obj, |
| 483 | __unused vm_prot_t prot) |
| 484 | { |
| 485 | device_pager_t device_object; |
| 486 | |
| 487 | device_object = device_pager_lookup(mem_obj); |
| 488 | |
| 489 | device_pager_lock(device_object); |
| 490 | assert(device_object->ref_count > 0); |
| 491 | if (device_object->is_mapped == FALSE) { |
| 492 | /* |
| 493 | * First mapping of this pager: take an extra reference |
| 494 | * that will remain until all the mappings of this pager |
| 495 | * are removed. |
| 496 | */ |
| 497 | device_object->is_mapped = TRUE; |
| 498 | device_pager_reference(mem_obj); |
| 499 | } |
| 500 | device_pager_unlock(device_object); |
| 501 | |
| 502 | return KERN_SUCCESS; |
| 503 | } |
| 504 | |
| 505 | kern_return_t |
| 506 | ( |
| 507 | memory_object_t mem_obj) |
| 508 | { |
| 509 | device_pager_t device_object; |
| 510 | boolean_t drop_ref; |
| 511 | |
| 512 | device_object = device_pager_lookup(mem_obj); |
| 513 | |
| 514 | device_pager_lock(device_object); |
| 515 | assert(device_object->ref_count > 0); |
| 516 | if (device_object->is_mapped) { |
| 517 | device_object->is_mapped = FALSE; |
| 518 | drop_ref = TRUE; |
| 519 | } else { |
| 520 | drop_ref = FALSE; |
| 521 | } |
| 522 | device_pager_unlock(device_object); |
| 523 | |
| 524 | if (drop_ref) { |
| 525 | device_pager_deallocate(mem_obj); |
| 526 | } |
| 527 | |
| 528 | return KERN_SUCCESS; |
| 529 | } |
| 530 | |
| 531 | |
| 532 | |
| 533 | /* |
| 534 | * |
| 535 | */ |
| 536 | device_pager_t |
| 537 | device_object_create(void) |
| 538 | { |
| 539 | device_pager_t device_object; |
| 540 | |
| 541 | device_object = (struct device_pager *) zalloc(device_pager_zone); |
| 542 | if (device_object == DEVICE_PAGER_NULL) |
| 543 | return(DEVICE_PAGER_NULL); |
| 544 | |
| 545 | bzero(device_object, sizeof (*device_object)); |
| 546 | |
| 547 | device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; |
| 548 | device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops; |
| 549 | device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; |
| 550 | |
| 551 | device_pager_lock_init(device_object); |
| 552 | device_object->ref_count = 1; |
| 553 | device_object->is_mapped = FALSE; |
| 554 | |
| 555 | DTRACE_VM2(device_pager_create, |
| 556 | device_pager_t, device_object, |
| 557 | unsigned int, device_object->ref_count); |
| 558 | |
| 559 | return(device_object); |
| 560 | } |
| 561 | |
| 562 | boolean_t |
| 563 | (const struct memory_object_pager_ops *) |
| 564 | { |
| 565 | if (pager_ops == &device_pager_ops) { |
| 566 | return TRUE; |
| 567 | } |
| 568 | return FALSE; |
| 569 | } |
| 570 | |