| 1 | /* |
| 2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #define _IOMEMORYDESCRIPTOR_INTERNAL_ |
| 30 | |
| 31 | #include <IOKit/assert.h> |
| 32 | #include <IOKit/system.h> |
| 33 | |
| 34 | #include <IOKit/IOLib.h> |
| 35 | #include <IOKit/IOMapper.h> |
| 36 | #include <IOKit/IOBufferMemoryDescriptor.h> |
| 37 | #include <libkern/OSDebug.h> |
| 38 | #include <mach/mach_vm.h> |
| 39 | |
| 40 | #include "IOKitKernelInternal.h" |
| 41 | |
| 42 | #ifdef IOALLOCDEBUG |
| 43 | #include <libkern/c++/OSCPPDebug.h> |
| 44 | #endif |
| 45 | #include <IOKit/IOStatisticsPrivate.h> |
| 46 | |
| 47 | #if IOKITSTATS |
| 48 | #define IOStatisticsAlloc(type, size) \ |
| 49 | do { \ |
| 50 | IOStatistics::countAlloc(type, size); \ |
| 51 | } while (0) |
| 52 | #else |
| 53 | #define IOStatisticsAlloc(type, size) |
| 54 | #endif /* IOKITSTATS */ |
| 55 | |
| 56 | |
| 57 | __BEGIN_DECLS |
| 58 | void ipc_port_release_send(ipc_port_t port); |
| 59 | #include <vm/pmap.h> |
| 60 | |
| 61 | __END_DECLS |
| 62 | |
| 63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 64 | |
| 65 | enum |
| 66 | { |
| 67 | kInternalFlagPhysical = 0x00000001, |
| 68 | kInternalFlagPageSized = 0x00000002, |
| 69 | kInternalFlagPageAllocated = 0x00000004, |
| 70 | kInternalFlagInit = 0x00000008 |
| 71 | }; |
| 72 | |
| 73 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 74 | |
| 75 | #define super IOGeneralMemoryDescriptor |
| 76 | OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, |
| 77 | IOGeneralMemoryDescriptor); |
| 78 | |
| 79 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 80 | |
| 81 | static uintptr_t IOBMDPageProc(iopa_t * a) |
| 82 | { |
| 83 | kern_return_t kr; |
| 84 | vm_address_t vmaddr = 0; |
| 85 | int options = 0; // KMA_LOMEM; |
| 86 | |
| 87 | kr = kernel_memory_allocate(kernel_map, &vmaddr, |
| 88 | page_size, 0, options, VM_KERN_MEMORY_IOKIT); |
| 89 | |
| 90 | if (KERN_SUCCESS != kr) vmaddr = 0; |
| 91 | else bzero((void *) vmaddr, page_size); |
| 92 | |
| 93 | return ((uintptr_t) vmaddr); |
| 94 | } |
| 95 | |
| 96 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
| 97 | |
| 98 | #ifndef __LP64__ |
| 99 | bool IOBufferMemoryDescriptor::initWithOptions( |
| 100 | IOOptionBits options, |
| 101 | vm_size_t capacity, |
| 102 | vm_offset_t alignment, |
| 103 | task_t inTask) |
| 104 | { |
| 105 | mach_vm_address_t physicalMask = 0; |
| 106 | return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); |
| 107 | } |
| 108 | #endif /* !__LP64__ */ |
| 109 | |
| 110 | bool IOBufferMemoryDescriptor::initWithPhysicalMask( |
| 111 | task_t inTask, |
| 112 | IOOptionBits options, |
| 113 | mach_vm_size_t capacity, |
| 114 | mach_vm_address_t alignment, |
| 115 | mach_vm_address_t physicalMask) |
| 116 | { |
| 117 | task_t mapTask = NULL; |
| 118 | vm_map_t vmmap = NULL; |
| 119 | mach_vm_address_t highestMask = 0; |
| 120 | IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; |
| 121 | IODMAMapSpecification mapSpec; |
| 122 | bool mapped = false; |
| 123 | bool needZero; |
| 124 | |
| 125 | if (!capacity) return false; |
| 126 | |
| 127 | _options = options; |
| 128 | _capacity = capacity; |
| 129 | _internalFlags = 0; |
| 130 | _internalReserved = 0; |
| 131 | _buffer = 0; |
| 132 | |
| 133 | _ranges.v64 = IONew(IOAddressRange, 1); |
| 134 | if (!_ranges.v64) |
| 135 | return (false); |
| 136 | _ranges.v64->address = 0; |
| 137 | _ranges.v64->length = 0; |
| 138 | // make sure super::free doesn't dealloc _ranges before super::init |
| 139 | _flags = kIOMemoryAsReference; |
| 140 | |
| 141 | // Grab IOMD bits from the Buffer MD options |
| 142 | iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); |
| 143 | |
| 144 | if (!(kIOMemoryMapperNone & options)) |
| 145 | { |
| 146 | IOMapper::checkForSystemMapper(); |
| 147 | mapped = (0 != IOMapper::gSystem); |
| 148 | } |
| 149 | needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); |
| 150 | |
| 151 | if (physicalMask && (alignment <= 1)) |
| 152 | { |
| 153 | alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); |
| 154 | highestMask = (physicalMask | alignment); |
| 155 | alignment++; |
| 156 | if (alignment < page_size) |
| 157 | alignment = page_size; |
| 158 | } |
| 159 | |
| 160 | if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) |
| 161 | alignment = page_size; |
| 162 | |
| 163 | if (alignment >= page_size) |
| 164 | capacity = round_page(capacity); |
| 165 | |
| 166 | if (alignment > page_size) |
| 167 | options |= kIOMemoryPhysicallyContiguous; |
| 168 | |
| 169 | _alignment = alignment; |
| 170 | |
| 171 | if ((capacity + alignment) < _capacity) return (false); |
| 172 | |
| 173 | if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) |
| 174 | return false; |
| 175 | |
| 176 | bzero(&mapSpec, sizeof(mapSpec)); |
| 177 | mapSpec.alignment = _alignment; |
| 178 | mapSpec.numAddressBits = 64; |
| 179 | if (highestMask && mapped) |
| 180 | { |
| 181 | if (highestMask <= 0xFFFFFFFF) |
| 182 | mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); |
| 183 | else |
| 184 | mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); |
| 185 | highestMask = 0; |
| 186 | } |
| 187 | |
| 188 | // set memory entry cache mode, pageable, purgeable |
| 189 | iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; |
| 190 | if (options & kIOMemoryPageable) |
| 191 | { |
| 192 | iomdOptions |= kIOMemoryBufferPageable; |
| 193 | if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; |
| 194 | } |
| 195 | else |
| 196 | { |
| 197 | vmmap = kernel_map; |
| 198 | |
| 199 | // Buffer shouldn't auto prepare they should be prepared explicitly |
| 200 | // But it never was enforced so what are you going to do? |
| 201 | iomdOptions |= kIOMemoryAutoPrepare; |
| 202 | |
| 203 | /* Allocate a wired-down buffer inside kernel space. */ |
| 204 | |
| 205 | bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); |
| 206 | |
| 207 | if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) |
| 208 | { |
| 209 | contig |= (!mapped); |
| 210 | contig |= (0 != (kIOMemoryMapperNone & options)); |
| 211 | #if 0 |
| 212 | // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now |
| 213 | contig |= true; |
| 214 | #endif |
| 215 | } |
| 216 | |
| 217 | if (contig || highestMask || (alignment > page_size)) |
| 218 | { |
| 219 | _internalFlags |= kInternalFlagPhysical; |
| 220 | if (highestMask) |
| 221 | { |
| 222 | _internalFlags |= kInternalFlagPageSized; |
| 223 | capacity = round_page(capacity); |
| 224 | } |
| 225 | _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( |
| 226 | capacity, highestMask, alignment, contig); |
| 227 | } |
| 228 | else if (needZero |
| 229 | && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) |
| 230 | { |
| 231 | _internalFlags |= kInternalFlagPageAllocated; |
| 232 | needZero = false; |
| 233 | _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); |
| 234 | if (_buffer) |
| 235 | { |
| 236 | IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); |
| 237 | #if IOALLOCDEBUG |
| 238 | OSAddAtomic(capacity, &debug_iomalloc_size); |
| 239 | #endif |
| 240 | } |
| 241 | } |
| 242 | else if (alignment > 1) |
| 243 | { |
| 244 | _buffer = IOMallocAligned(capacity, alignment); |
| 245 | } |
| 246 | else |
| 247 | { |
| 248 | _buffer = IOMalloc(capacity); |
| 249 | } |
| 250 | if (!_buffer) |
| 251 | { |
| 252 | return false; |
| 253 | } |
| 254 | if (needZero) bzero(_buffer, capacity); |
| 255 | } |
| 256 | |
| 257 | if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { |
| 258 | vm_size_t size = round_page(capacity); |
| 259 | |
| 260 | // initWithOptions will create memory entry |
| 261 | iomdOptions |= kIOMemoryPersistent; |
| 262 | |
| 263 | if( options & kIOMemoryPageable) { |
| 264 | #if IOALLOCDEBUG |
| 265 | OSAddAtomicLong(size, &debug_iomallocpageable_size); |
| 266 | #endif |
| 267 | mapTask = inTask; |
| 268 | if (NULL == inTask) |
| 269 | inTask = kernel_task; |
| 270 | } |
| 271 | else if (options & kIOMapCacheMask) |
| 272 | { |
| 273 | // Prefetch each page to put entries into the pmap |
| 274 | volatile UInt8 * startAddr = (UInt8 *)_buffer; |
| 275 | volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; |
| 276 | |
| 277 | while (startAddr < endAddr) |
| 278 | { |
| 279 | UInt8 dummyVar = *startAddr; |
| 280 | (void) dummyVar; |
| 281 | startAddr += page_size; |
| 282 | } |
| 283 | } |
| 284 | } |
| 285 | |
| 286 | _ranges.v64->address = (mach_vm_address_t) _buffer;; |
| 287 | _ranges.v64->length = _capacity; |
| 288 | |
| 289 | if (!super::initWithOptions(_ranges.v64, 1, 0, |
| 290 | inTask, iomdOptions, /* System mapper */ 0)) |
| 291 | return false; |
| 292 | |
| 293 | _internalFlags |= kInternalFlagInit; |
| 294 | #if IOTRACKING |
| 295 | if (!(options & kIOMemoryPageable)) trackingAccumSize(capacity); |
| 296 | #endif /* IOTRACKING */ |
| 297 | |
| 298 | // give any system mapper the allocation params |
| 299 | if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, |
| 300 | &mapSpec, sizeof(mapSpec))) |
| 301 | return false; |
| 302 | |
| 303 | if (mapTask) |
| 304 | { |
| 305 | if (!reserved) { |
| 306 | reserved = IONew( ExpansionData, 1 ); |
| 307 | if( !reserved) |
| 308 | return( false ); |
| 309 | } |
| 310 | reserved->map = createMappingInTask(mapTask, 0, |
| 311 | kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); |
| 312 | if (!reserved->map) |
| 313 | { |
| 314 | _buffer = 0; |
| 315 | return( false ); |
| 316 | } |
| 317 | release(); // map took a retain on this |
| 318 | reserved->map->retain(); |
| 319 | removeMapping(reserved->map); |
| 320 | mach_vm_address_t buffer = reserved->map->getAddress(); |
| 321 | _buffer = (void *) buffer; |
| 322 | if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) |
| 323 | _ranges.v64->address = buffer; |
| 324 | } |
| 325 | |
| 326 | setLength(_capacity); |
| 327 | |
| 328 | return true; |
| 329 | } |
| 330 | |
| 331 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( |
| 332 | task_t inTask, |
| 333 | IOOptionBits options, |
| 334 | vm_size_t capacity, |
| 335 | vm_offset_t alignment) |
| 336 | { |
| 337 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
| 338 | |
| 339 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { |
| 340 | me->release(); |
| 341 | me = 0; |
| 342 | } |
| 343 | return me; |
| 344 | } |
| 345 | |
| 346 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( |
| 347 | task_t inTask, |
| 348 | IOOptionBits options, |
| 349 | mach_vm_size_t capacity, |
| 350 | mach_vm_address_t physicalMask) |
| 351 | { |
| 352 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
| 353 | |
| 354 | if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) |
| 355 | { |
| 356 | me->release(); |
| 357 | me = 0; |
| 358 | } |
| 359 | return me; |
| 360 | } |
| 361 | |
| 362 | #ifndef __LP64__ |
| 363 | bool IOBufferMemoryDescriptor::initWithOptions( |
| 364 | IOOptionBits options, |
| 365 | vm_size_t capacity, |
| 366 | vm_offset_t alignment) |
| 367 | { |
| 368 | return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); |
| 369 | } |
| 370 | #endif /* !__LP64__ */ |
| 371 | |
| 372 | IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( |
| 373 | IOOptionBits options, |
| 374 | vm_size_t capacity, |
| 375 | vm_offset_t alignment) |
| 376 | { |
| 377 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
| 378 | |
| 379 | if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { |
| 380 | me->release(); |
| 381 | me = 0; |
| 382 | } |
| 383 | return me; |
| 384 | } |
| 385 | |
| 386 | |
| 387 | /* |
| 388 | * withCapacity: |
| 389 | * |
| 390 | * Returns a new IOBufferMemoryDescriptor with a buffer large enough to |
| 391 | * hold capacity bytes. The descriptor's length is initially set to the capacity. |
| 392 | */ |
| 393 | IOBufferMemoryDescriptor * |
| 394 | IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, |
| 395 | IODirection inDirection, |
| 396 | bool inContiguous) |
| 397 | { |
| 398 | return( IOBufferMemoryDescriptor::withOptions( |
| 399 | inDirection | kIOMemoryUnshared |
| 400 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), |
| 401 | inCapacity, inContiguous ? inCapacity : 1 )); |
| 402 | } |
| 403 | |
| 404 | #ifndef __LP64__ |
| 405 | /* |
| 406 | * initWithBytes: |
| 407 | * |
| 408 | * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). |
| 409 | * The descriptor's length and capacity are set to the input buffer's size. |
| 410 | */ |
| 411 | bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, |
| 412 | vm_size_t inLength, |
| 413 | IODirection inDirection, |
| 414 | bool inContiguous) |
| 415 | { |
| 416 | if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared |
| 417 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), |
| 418 | inLength, inLength, (mach_vm_address_t)0)) |
| 419 | return false; |
| 420 | |
| 421 | // start out with no data |
| 422 | setLength(0); |
| 423 | |
| 424 | if (!appendBytes(inBytes, inLength)) |
| 425 | return false; |
| 426 | |
| 427 | return true; |
| 428 | } |
| 429 | #endif /* !__LP64__ */ |
| 430 | |
| 431 | /* |
| 432 | * withBytes: |
| 433 | * |
| 434 | * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). |
| 435 | * The descriptor's length and capacity are set to the input buffer's size. |
| 436 | */ |
| 437 | IOBufferMemoryDescriptor * |
| 438 | IOBufferMemoryDescriptor::withBytes(const void * inBytes, |
| 439 | vm_size_t inLength, |
| 440 | IODirection inDirection, |
| 441 | bool inContiguous) |
| 442 | { |
| 443 | IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; |
| 444 | |
| 445 | if (me && !me->initWithPhysicalMask( |
| 446 | kernel_task, inDirection | kIOMemoryUnshared |
| 447 | | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), |
| 448 | inLength, inLength, 0 )) |
| 449 | { |
| 450 | me->release(); |
| 451 | me = 0; |
| 452 | } |
| 453 | |
| 454 | if (me) |
| 455 | { |
| 456 | // start out with no data |
| 457 | me->setLength(0); |
| 458 | |
| 459 | if (!me->appendBytes(inBytes, inLength)) |
| 460 | { |
| 461 | me->release(); |
| 462 | me = 0; |
| 463 | } |
| 464 | } |
| 465 | return me; |
| 466 | } |
| 467 | |
| 468 | /* |
| 469 | * free: |
| 470 | * |
| 471 | * Free resources |
| 472 | */ |
| 473 | void IOBufferMemoryDescriptor::free() |
| 474 | { |
| 475 | // Cache all of the relevant information on the stack for use |
| 476 | // after we call super::free()! |
| 477 | IOOptionBits flags = _flags; |
| 478 | IOOptionBits internalFlags = _internalFlags; |
| 479 | IOOptionBits options = _options; |
| 480 | vm_size_t size = _capacity; |
| 481 | void * buffer = _buffer; |
| 482 | IOMemoryMap * map = 0; |
| 483 | IOAddressRange * range = _ranges.v64; |
| 484 | vm_offset_t alignment = _alignment; |
| 485 | |
| 486 | if (alignment >= page_size) |
| 487 | size = round_page(size); |
| 488 | |
| 489 | if (reserved) |
| 490 | { |
| 491 | map = reserved->map; |
| 492 | IODelete( reserved, ExpansionData, 1 ); |
| 493 | if (map) |
| 494 | map->release(); |
| 495 | } |
| 496 | |
| 497 | if ((options & kIOMemoryPageable) |
| 498 | || (kInternalFlagPageSized & internalFlags)) size = round_page(size); |
| 499 | |
| 500 | #if IOTRACKING |
| 501 | if (!(options & kIOMemoryPageable) |
| 502 | && buffer |
| 503 | && (kInternalFlagInit & _internalFlags)) trackingAccumSize(-size); |
| 504 | #endif /* IOTRACKING */ |
| 505 | |
| 506 | /* super::free may unwire - deallocate buffer afterwards */ |
| 507 | super::free(); |
| 508 | |
| 509 | if (options & kIOMemoryPageable) |
| 510 | { |
| 511 | #if IOALLOCDEBUG |
| 512 | OSAddAtomicLong(-size, &debug_iomallocpageable_size); |
| 513 | #endif |
| 514 | } |
| 515 | else if (buffer) |
| 516 | { |
| 517 | if (kInternalFlagPhysical & internalFlags) |
| 518 | { |
| 519 | IOKernelFreePhysical((mach_vm_address_t) buffer, size); |
| 520 | } |
| 521 | else if (kInternalFlagPageAllocated & internalFlags) |
| 522 | { |
| 523 | uintptr_t page; |
| 524 | page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); |
| 525 | if (page) |
| 526 | { |
| 527 | kmem_free(kernel_map, page, page_size); |
| 528 | } |
| 529 | #if IOALLOCDEBUG |
| 530 | OSAddAtomic(-size, &debug_iomalloc_size); |
| 531 | #endif |
| 532 | IOStatisticsAlloc(kIOStatisticsFreeAligned, size); |
| 533 | } |
| 534 | else if (alignment > 1) |
| 535 | { |
| 536 | IOFreeAligned(buffer, size); |
| 537 | } |
| 538 | else |
| 539 | { |
| 540 | IOFree(buffer, size); |
| 541 | } |
| 542 | } |
| 543 | if (range && (kIOMemoryAsReference & flags)) |
| 544 | IODelete(range, IOAddressRange, 1); |
| 545 | } |
| 546 | |
| 547 | /* |
| 548 | * getCapacity: |
| 549 | * |
| 550 | * Get the buffer capacity |
| 551 | */ |
| 552 | vm_size_t IOBufferMemoryDescriptor::getCapacity() const |
| 553 | { |
| 554 | return _capacity; |
| 555 | } |
| 556 | |
| 557 | /* |
| 558 | * setLength: |
| 559 | * |
| 560 | * Change the buffer length of the memory descriptor. When a new buffer |
| 561 | * is created, the initial length of the buffer is set to be the same as |
| 562 | * the capacity. The length can be adjusted via setLength for a shorter |
| 563 | * transfer (there is no need to create more buffer descriptors when you |
| 564 | * can reuse an existing one, even for different transfer sizes). Note |
| 565 | * that the specified length must not exceed the capacity of the buffer. |
| 566 | */ |
| 567 | void IOBufferMemoryDescriptor::setLength(vm_size_t length) |
| 568 | { |
| 569 | assert(length <= _capacity); |
| 570 | if (length > _capacity) return; |
| 571 | |
| 572 | _length = length; |
| 573 | _ranges.v64->length = length; |
| 574 | } |
| 575 | |
| 576 | /* |
| 577 | * setDirection: |
| 578 | * |
| 579 | * Change the direction of the transfer. This method allows one to redirect |
| 580 | * the descriptor's transfer direction. This eliminates the need to destroy |
| 581 | * and create new buffers when different transfer directions are needed. |
| 582 | */ |
| 583 | void IOBufferMemoryDescriptor::setDirection(IODirection direction) |
| 584 | { |
| 585 | _flags = (_flags & ~kIOMemoryDirectionMask) | direction; |
| 586 | #ifndef __LP64__ |
| 587 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
| 588 | #endif /* !__LP64__ */ |
| 589 | } |
| 590 | |
| 591 | /* |
| 592 | * appendBytes: |
| 593 | * |
| 594 | * Add some data to the end of the buffer. This method automatically |
| 595 | * maintains the memory descriptor buffer length. Note that appendBytes |
| 596 | * will not copy past the end of the memory descriptor's current capacity. |
| 597 | */ |
| 598 | bool |
| 599 | IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) |
| 600 | { |
| 601 | vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); |
| 602 | IOByteCount offset; |
| 603 | |
| 604 | assert(_length <= _capacity); |
| 605 | |
| 606 | offset = _length; |
| 607 | _length += actualBytesToCopy; |
| 608 | _ranges.v64->length += actualBytesToCopy; |
| 609 | |
| 610 | if (_task == kernel_task) |
| 611 | bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), |
| 612 | actualBytesToCopy); |
| 613 | else |
| 614 | writeBytes(offset, bytes, actualBytesToCopy); |
| 615 | |
| 616 | return true; |
| 617 | } |
| 618 | |
| 619 | /* |
| 620 | * getBytesNoCopy: |
| 621 | * |
| 622 | * Return the virtual address of the beginning of the buffer |
| 623 | */ |
| 624 | void * IOBufferMemoryDescriptor::getBytesNoCopy() |
| 625 | { |
| 626 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
| 627 | return _buffer; |
| 628 | else |
| 629 | return (void *)_ranges.v64->address; |
| 630 | } |
| 631 | |
| 632 | |
| 633 | /* |
| 634 | * getBytesNoCopy: |
| 635 | * |
| 636 | * Return the virtual address of an offset from the beginning of the buffer |
| 637 | */ |
| 638 | void * |
| 639 | IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) |
| 640 | { |
| 641 | IOVirtualAddress address; |
| 642 | |
| 643 | if ((start + withLength) < start) return 0; |
| 644 | |
| 645 | if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) |
| 646 | address = (IOVirtualAddress) _buffer; |
| 647 | else |
| 648 | address = _ranges.v64->address; |
| 649 | |
| 650 | if (start < _length && (start + withLength) <= _length) |
| 651 | return (void *)(address + start); |
| 652 | return 0; |
| 653 | } |
| 654 | |
| 655 | #ifndef __LP64__ |
| 656 | void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
| 657 | IOByteCount * lengthOfSegment) |
| 658 | { |
| 659 | void * bytes = getBytesNoCopy(offset, 0); |
| 660 | |
| 661 | if (bytes && lengthOfSegment) |
| 662 | *lengthOfSegment = _length - offset; |
| 663 | |
| 664 | return bytes; |
| 665 | } |
| 666 | #endif /* !__LP64__ */ |
| 667 | |
| 668 | #ifdef __LP64__ |
| 669 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); |
| 670 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); |
| 671 | #else /* !__LP64__ */ |
| 672 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 0); |
| 673 | OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor, 1); |
| 674 | #endif /* !__LP64__ */ |
| 675 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); |
| 676 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); |
| 677 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); |
| 678 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); |
| 679 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); |
| 680 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); |
| 681 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); |
| 682 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); |
| 683 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); |
| 684 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); |
| 685 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); |
| 686 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); |
| 687 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); |
| 688 | OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); |
| 689 | |