| 1 | /* |
| 2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #include <IOKit/assert.h> |
| 30 | |
| 31 | #include <libkern/OSTypes.h> |
| 32 | #include <libkern/OSByteOrder.h> |
| 33 | #include <libkern/OSDebug.h> |
| 34 | |
| 35 | #include <IOKit/IOReturn.h> |
| 36 | #include <IOKit/IOLib.h> |
| 37 | #include <IOKit/IODMACommand.h> |
| 38 | #include <IOKit/IOMapper.h> |
| 39 | #include <IOKit/IOMemoryDescriptor.h> |
| 40 | #include <IOKit/IOBufferMemoryDescriptor.h> |
| 41 | |
| 42 | #include "IOKitKernelInternal.h" |
| 43 | |
| 44 | #define MAPTYPE(type) ((UInt) (type) & kTypeMask) |
| 45 | #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) |
| 46 | |
| 47 | enum |
| 48 | { |
| 49 | kWalkSyncIn = 0x01, // bounce -> md |
| 50 | kWalkSyncOut = 0x02, // bounce <- md |
| 51 | kWalkSyncAlways = 0x04, |
| 52 | kWalkPreflight = 0x08, |
| 53 | kWalkDoubleBuffer = 0x10, |
| 54 | kWalkPrepare = 0x20, |
| 55 | kWalkComplete = 0x40, |
| 56 | kWalkClient = 0x80 |
| 57 | }; |
| 58 | |
| 59 | |
| 60 | #define fInternalState reserved |
| 61 | #define fState reserved->fState |
| 62 | #define fMDSummary reserved->fMDSummary |
| 63 | |
| 64 | |
| 65 | #if 1 |
| 66 | // no direction => OutIn |
| 67 | #define SHOULD_COPY_DIR(op, direction) \ |
| 68 | ((kIODirectionNone == (direction)) \ |
| 69 | || (kWalkSyncAlways & (op)) \ |
| 70 | || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \ |
| 71 | & (direction))) |
| 72 | |
| 73 | #else |
| 74 | #define SHOULD_COPY_DIR(state, direction) (true) |
| 75 | #endif |
| 76 | |
| 77 | #if 0 |
| 78 | #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } |
| 79 | #else |
| 80 | #define DEBG(fmt, args...) {} |
| 81 | #endif |
| 82 | |
| 83 | /**************************** class IODMACommand ***************************/ |
| 84 | |
| 85 | #undef super |
| 86 | #define super IOCommand |
| 87 | OSDefineMetaClassAndStructors(IODMACommand, IOCommand); |
| 88 | |
| 89 | OSMetaClassDefineReservedUsed(IODMACommand, 0); |
| 90 | OSMetaClassDefineReservedUsed(IODMACommand, 1); |
| 91 | OSMetaClassDefineReservedUsed(IODMACommand, 2); |
| 92 | OSMetaClassDefineReservedUsed(IODMACommand, 3); |
| 93 | OSMetaClassDefineReservedUsed(IODMACommand, 4); |
| 94 | OSMetaClassDefineReservedUsed(IODMACommand, 5); |
| 95 | OSMetaClassDefineReservedUsed(IODMACommand, 6); |
| 96 | OSMetaClassDefineReservedUnused(IODMACommand, 7); |
| 97 | OSMetaClassDefineReservedUnused(IODMACommand, 8); |
| 98 | OSMetaClassDefineReservedUnused(IODMACommand, 9); |
| 99 | OSMetaClassDefineReservedUnused(IODMACommand, 10); |
| 100 | OSMetaClassDefineReservedUnused(IODMACommand, 11); |
| 101 | OSMetaClassDefineReservedUnused(IODMACommand, 12); |
| 102 | OSMetaClassDefineReservedUnused(IODMACommand, 13); |
| 103 | OSMetaClassDefineReservedUnused(IODMACommand, 14); |
| 104 | OSMetaClassDefineReservedUnused(IODMACommand, 15); |
| 105 | |
| 106 | IODMACommand * |
| 107 | IODMACommand::withRefCon(void * refCon) |
| 108 | { |
| 109 | IODMACommand * me = new IODMACommand; |
| 110 | |
| 111 | if (me && !me->initWithRefCon(refCon)) |
| 112 | { |
| 113 | me->release(); |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | return me; |
| 118 | } |
| 119 | |
| 120 | IODMACommand * |
| 121 | IODMACommand::withSpecification(SegmentFunction outSegFunc, |
| 122 | const SegmentOptions * segmentOptions, |
| 123 | uint32_t mappingOptions, |
| 124 | IOMapper * mapper, |
| 125 | void * refCon) |
| 126 | { |
| 127 | IODMACommand * me = new IODMACommand; |
| 128 | |
| 129 | if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, |
| 130 | mapper, refCon)) |
| 131 | { |
| 132 | me->release(); |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | return me; |
| 137 | } |
| 138 | |
| 139 | IODMACommand * |
| 140 | IODMACommand::withSpecification(SegmentFunction outSegFunc, |
| 141 | UInt8 numAddressBits, |
| 142 | UInt64 maxSegmentSize, |
| 143 | MappingOptions mappingOptions, |
| 144 | UInt64 maxTransferSize, |
| 145 | UInt32 alignment, |
| 146 | IOMapper *mapper, |
| 147 | void *refCon) |
| 148 | { |
| 149 | IODMACommand * me = new IODMACommand; |
| 150 | |
| 151 | if (me && !me->initWithSpecification(outSegFunc, |
| 152 | numAddressBits, maxSegmentSize, |
| 153 | mappingOptions, maxTransferSize, |
| 154 | alignment, mapper, refCon)) |
| 155 | { |
| 156 | me->release(); |
| 157 | return 0; |
| 158 | } |
| 159 | |
| 160 | return me; |
| 161 | } |
| 162 | |
| 163 | IODMACommand * |
| 164 | IODMACommand::cloneCommand(void *refCon) |
| 165 | { |
| 166 | SegmentOptions segmentOptions = |
| 167 | { |
| 168 | .fStructSize = sizeof(segmentOptions), |
| 169 | .fNumAddressBits = (uint8_t)fNumAddressBits, |
| 170 | .fMaxSegmentSize = fMaxSegmentSize, |
| 171 | .fMaxTransferSize = fMaxTransferSize, |
| 172 | .fAlignment = fAlignMask + 1, |
| 173 | .fAlignmentLength = fAlignMaskInternalSegments + 1, |
| 174 | .fAlignmentInternalSegments = fAlignMaskLength + 1 |
| 175 | }; |
| 176 | |
| 177 | return (IODMACommand::withSpecification(fOutSeg, &segmentOptions, |
| 178 | fMappingOptions, fMapper, refCon)); |
| 179 | } |
| 180 | |
| 181 | #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) |
| 182 | |
| 183 | bool |
| 184 | IODMACommand::initWithRefCon(void * refCon) |
| 185 | { |
| 186 | if (!super::init()) return (false); |
| 187 | |
| 188 | if (!reserved) |
| 189 | { |
| 190 | reserved = IONew(IODMACommandInternal, 1); |
| 191 | if (!reserved) return false; |
| 192 | } |
| 193 | bzero(reserved, sizeof(IODMACommandInternal)); |
| 194 | fRefCon = refCon; |
| 195 | |
| 196 | return (true); |
| 197 | } |
| 198 | |
| 199 | bool |
| 200 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, |
| 201 | const SegmentOptions * segmentOptions, |
| 202 | uint32_t mappingOptions, |
| 203 | IOMapper * mapper, |
| 204 | void * refCon) |
| 205 | { |
| 206 | if (!initWithRefCon(refCon)) return false; |
| 207 | |
| 208 | if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, |
| 209 | mappingOptions, mapper)) return false; |
| 210 | |
| 211 | return (true); |
| 212 | } |
| 213 | |
| 214 | bool |
| 215 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, |
| 216 | UInt8 numAddressBits, |
| 217 | UInt64 maxSegmentSize, |
| 218 | MappingOptions mappingOptions, |
| 219 | UInt64 maxTransferSize, |
| 220 | UInt32 alignment, |
| 221 | IOMapper *mapper, |
| 222 | void *refCon) |
| 223 | { |
| 224 | SegmentOptions segmentOptions = |
| 225 | { |
| 226 | .fStructSize = sizeof(segmentOptions), |
| 227 | .fNumAddressBits = numAddressBits, |
| 228 | .fMaxSegmentSize = maxSegmentSize, |
| 229 | .fMaxTransferSize = maxTransferSize, |
| 230 | .fAlignment = alignment, |
| 231 | .fAlignmentLength = 1, |
| 232 | .fAlignmentInternalSegments = alignment |
| 233 | }; |
| 234 | |
| 235 | return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon)); |
| 236 | } |
| 237 | |
| 238 | IOReturn |
| 239 | IODMACommand::setSpecification(SegmentFunction outSegFunc, |
| 240 | const SegmentOptions * segmentOptions, |
| 241 | uint32_t mappingOptions, |
| 242 | IOMapper * mapper) |
| 243 | { |
| 244 | IOService * device = 0; |
| 245 | UInt8 numAddressBits; |
| 246 | UInt64 maxSegmentSize; |
| 247 | UInt64 maxTransferSize; |
| 248 | UInt32 alignment; |
| 249 | |
| 250 | bool is32Bit; |
| 251 | |
| 252 | if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument); |
| 253 | |
| 254 | is32Bit = ((OutputHost32 == outSegFunc) |
| 255 | || (OutputBig32 == outSegFunc) |
| 256 | || (OutputLittle32 == outSegFunc)); |
| 257 | |
| 258 | numAddressBits = segmentOptions->fNumAddressBits; |
| 259 | maxSegmentSize = segmentOptions->fMaxSegmentSize; |
| 260 | maxTransferSize = segmentOptions->fMaxTransferSize; |
| 261 | alignment = segmentOptions->fAlignment; |
| 262 | if (is32Bit) |
| 263 | { |
| 264 | if (!numAddressBits) |
| 265 | numAddressBits = 32; |
| 266 | else if (numAddressBits > 32) |
| 267 | return (kIOReturnBadArgument); // Wrong output function for bits |
| 268 | } |
| 269 | |
| 270 | if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument); |
| 271 | |
| 272 | if (!maxSegmentSize) maxSegmentSize--; // Set Max segment to -1 |
| 273 | if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1 |
| 274 | |
| 275 | if (mapper && !OSDynamicCast(IOMapper, mapper)) |
| 276 | { |
| 277 | device = mapper; |
| 278 | mapper = 0; |
| 279 | } |
| 280 | if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) |
| 281 | { |
| 282 | IOMapper::checkForSystemMapper(); |
| 283 | mapper = IOMapper::gSystem; |
| 284 | } |
| 285 | |
| 286 | fNumSegments = 0; |
| 287 | fOutSeg = outSegFunc; |
| 288 | fNumAddressBits = numAddressBits; |
| 289 | fMaxSegmentSize = maxSegmentSize; |
| 290 | fMappingOptions = mappingOptions; |
| 291 | fMaxTransferSize = maxTransferSize; |
| 292 | if (!alignment) alignment = 1; |
| 293 | fAlignMask = alignment - 1; |
| 294 | |
| 295 | alignment = segmentOptions->fAlignmentLength; |
| 296 | if (!alignment) alignment = 1; |
| 297 | fAlignMaskLength = alignment - 1; |
| 298 | |
| 299 | alignment = segmentOptions->fAlignmentInternalSegments; |
| 300 | if (!alignment) alignment = (fAlignMask + 1); |
| 301 | fAlignMaskInternalSegments = alignment - 1; |
| 302 | |
| 303 | switch (MAPTYPE(mappingOptions)) |
| 304 | { |
| 305 | case kMapped: break; |
| 306 | case kUnmapped: break; |
| 307 | case kNonCoherent: break; |
| 308 | |
| 309 | case kBypassed: |
| 310 | if (!mapper) break; |
| 311 | return (kIOReturnBadArgument); |
| 312 | |
| 313 | default: |
| 314 | return (kIOReturnBadArgument); |
| 315 | }; |
| 316 | |
| 317 | if (mapper != fMapper) |
| 318 | { |
| 319 | if (mapper) mapper->retain(); |
| 320 | if (fMapper) fMapper->release(); |
| 321 | fMapper = mapper; |
| 322 | } |
| 323 | |
| 324 | fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); |
| 325 | fInternalState->fDevice = device; |
| 326 | |
| 327 | return (kIOReturnSuccess); |
| 328 | } |
| 329 | |
| 330 | void |
| 331 | IODMACommand::free() |
| 332 | { |
| 333 | if (reserved) IODelete(reserved, IODMACommandInternal, 1); |
| 334 | |
| 335 | if (fMapper) fMapper->release(); |
| 336 | |
| 337 | super::free(); |
| 338 | } |
| 339 | |
| 340 | IOReturn |
| 341 | IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) |
| 342 | { |
| 343 | IOReturn err = kIOReturnSuccess; |
| 344 | |
| 345 | if (mem == fMemory) |
| 346 | { |
| 347 | if (!autoPrepare) |
| 348 | { |
| 349 | while (fActive) |
| 350 | complete(); |
| 351 | } |
| 352 | return kIOReturnSuccess; |
| 353 | } |
| 354 | |
| 355 | if (fMemory) { |
| 356 | // As we are almost certainly being called from a work loop thread |
| 357 | // if fActive is true it is probably not a good time to potentially |
| 358 | // block. Just test for it and return an error |
| 359 | if (fActive) |
| 360 | return kIOReturnBusy; |
| 361 | clearMemoryDescriptor(); |
| 362 | } |
| 363 | |
| 364 | if (mem) { |
| 365 | bzero(&fMDSummary, sizeof(fMDSummary)); |
| 366 | err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), |
| 367 | &fMDSummary, sizeof(fMDSummary)); |
| 368 | if (err) |
| 369 | return err; |
| 370 | |
| 371 | ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; |
| 372 | |
| 373 | if ((kMapped == MAPTYPE(fMappingOptions)) |
| 374 | && fMapper) |
| 375 | fInternalState->fCheckAddressing = false; |
| 376 | else |
| 377 | fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); |
| 378 | |
| 379 | fInternalState->fNewMD = true; |
| 380 | mem->retain(); |
| 381 | fMemory = mem; |
| 382 | fInternalState->fSetActiveNoMapper = (!fMapper); |
| 383 | if (fInternalState->fSetActiveNoMapper) mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); |
| 384 | if (autoPrepare) { |
| 385 | err = prepare(); |
| 386 | if (err) { |
| 387 | clearMemoryDescriptor(); |
| 388 | } |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | return err; |
| 393 | } |
| 394 | |
| 395 | IOReturn |
| 396 | IODMACommand::clearMemoryDescriptor(bool autoComplete) |
| 397 | { |
| 398 | if (fActive && !autoComplete) return (kIOReturnNotReady); |
| 399 | |
| 400 | if (fMemory) |
| 401 | { |
| 402 | while (fActive) complete(); |
| 403 | if (fInternalState->fSetActiveNoMapper) fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); |
| 404 | fMemory->release(); |
| 405 | fMemory = 0; |
| 406 | } |
| 407 | |
| 408 | return (kIOReturnSuccess); |
| 409 | } |
| 410 | |
| 411 | const IOMemoryDescriptor * |
| 412 | IODMACommand::getMemoryDescriptor() const |
| 413 | { |
| 414 | return fMemory; |
| 415 | } |
| 416 | |
| 417 | IOMemoryDescriptor * |
| 418 | IODMACommand::getIOMemoryDescriptor() const |
| 419 | { |
| 420 | IOMemoryDescriptor * mem; |
| 421 | |
| 422 | mem = reserved->fCopyMD; |
| 423 | if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); |
| 424 | |
| 425 | return (mem); |
| 426 | } |
| 427 | |
| 428 | IOReturn |
| 429 | IODMACommand::segmentOp( |
| 430 | void *reference, |
| 431 | IODMACommand *target, |
| 432 | Segment64 segment, |
| 433 | void *segments, |
| 434 | UInt32 segmentIndex) |
| 435 | { |
| 436 | IOOptionBits op = (uintptr_t) reference; |
| 437 | addr64_t maxPhys, address; |
| 438 | uint64_t length; |
| 439 | uint32_t numPages; |
| 440 | uint32_t mask; |
| 441 | |
| 442 | IODMACommandInternal * state = target->reserved; |
| 443 | |
| 444 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) |
| 445 | maxPhys = (1ULL << target->fNumAddressBits); |
| 446 | else |
| 447 | maxPhys = 0; |
| 448 | maxPhys--; |
| 449 | |
| 450 | address = segment.fIOVMAddr; |
| 451 | length = segment.fLength; |
| 452 | |
| 453 | assert(length); |
| 454 | |
| 455 | if (!state->fMisaligned) |
| 456 | { |
| 457 | mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); |
| 458 | state->fMisaligned |= (0 != (mask & address)); |
| 459 | if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n" , address, length, mask); |
| 460 | } |
| 461 | if (!state->fMisaligned) |
| 462 | { |
| 463 | mask = target->fAlignMaskLength; |
| 464 | state->fMisaligned |= (0 != (mask & length)); |
| 465 | if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n" , address, length, mask); |
| 466 | } |
| 467 | |
| 468 | if (state->fMisaligned && (kWalkPreflight & op)) |
| 469 | return (kIOReturnNotAligned); |
| 470 | |
| 471 | if (!state->fDoubleBuffer) |
| 472 | { |
| 473 | if ((address + length - 1) <= maxPhys) |
| 474 | { |
| 475 | length = 0; |
| 476 | } |
| 477 | else if (address <= maxPhys) |
| 478 | { |
| 479 | DEBG("tail %qx, %qx" , address, length); |
| 480 | length = (address + length - maxPhys - 1); |
| 481 | address = maxPhys + 1; |
| 482 | DEBG("-> %qx, %qx\n" , address, length); |
| 483 | } |
| 484 | } |
| 485 | |
| 486 | if (!length) |
| 487 | return (kIOReturnSuccess); |
| 488 | |
| 489 | numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); |
| 490 | |
| 491 | if (kWalkPreflight & op) |
| 492 | { |
| 493 | state->fCopyPageCount += numPages; |
| 494 | } |
| 495 | else |
| 496 | { |
| 497 | vm_page_t lastPage; |
| 498 | lastPage = NULL; |
| 499 | if (kWalkPrepare & op) |
| 500 | { |
| 501 | lastPage = state->fCopyNext; |
| 502 | for (IOItemCount idx = 0; idx < numPages; idx++) |
| 503 | { |
| 504 | vm_page_set_offset(lastPage, atop_64(address) + idx); |
| 505 | lastPage = vm_page_get_next(lastPage); |
| 506 | } |
| 507 | } |
| 508 | |
| 509 | if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) |
| 510 | { |
| 511 | lastPage = state->fCopyNext; |
| 512 | for (IOItemCount idx = 0; idx < numPages; idx++) |
| 513 | { |
| 514 | if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) |
| 515 | { |
| 516 | addr64_t cpuAddr = address; |
| 517 | addr64_t remapAddr; |
| 518 | uint64_t chunk; |
| 519 | |
| 520 | if ((kMapped == MAPTYPE(target->fMappingOptions)) |
| 521 | && target->fMapper) |
| 522 | { |
| 523 | cpuAddr = target->fMapper->mapToPhysicalAddress(address); |
| 524 | } |
| 525 | |
| 526 | remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); |
| 527 | if (!state->fDoubleBuffer) |
| 528 | { |
| 529 | remapAddr += (address & PAGE_MASK); |
| 530 | } |
| 531 | chunk = PAGE_SIZE - (address & PAGE_MASK); |
| 532 | if (chunk > length) |
| 533 | chunk = length; |
| 534 | |
| 535 | DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n" , remapAddr, |
| 536 | (kWalkSyncIn & op) ? "->" : "<-" , |
| 537 | address, chunk, op); |
| 538 | |
| 539 | if (kWalkSyncIn & op) |
| 540 | { // cppvNoModSnk |
| 541 | copypv(remapAddr, cpuAddr, chunk, |
| 542 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
| 543 | } |
| 544 | else |
| 545 | { |
| 546 | copypv(cpuAddr, remapAddr, chunk, |
| 547 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
| 548 | } |
| 549 | address += chunk; |
| 550 | length -= chunk; |
| 551 | } |
| 552 | lastPage = vm_page_get_next(lastPage); |
| 553 | } |
| 554 | } |
| 555 | state->fCopyNext = lastPage; |
| 556 | } |
| 557 | |
| 558 | return kIOReturnSuccess; |
| 559 | } |
| 560 | |
| 561 | IOBufferMemoryDescriptor * |
| 562 | IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) |
| 563 | { |
| 564 | mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask |
| 565 | return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, |
| 566 | direction, length, mask)); |
| 567 | } |
| 568 | |
| 569 | IOReturn |
| 570 | IODMACommand::walkAll(UInt8 op) |
| 571 | { |
| 572 | IODMACommandInternal * state = fInternalState; |
| 573 | |
| 574 | IOReturn ret = kIOReturnSuccess; |
| 575 | UInt32 numSegments; |
| 576 | UInt64 offset; |
| 577 | |
| 578 | if (kWalkPreflight & op) |
| 579 | { |
| 580 | state->fMisaligned = false; |
| 581 | state->fDoubleBuffer = false; |
| 582 | state->fPrepared = false; |
| 583 | state->fCopyNext = NULL; |
| 584 | state->fCopyPageAlloc = 0; |
| 585 | state->fCopyPageCount = 0; |
| 586 | state->fNextRemapPage = NULL; |
| 587 | state->fCopyMD = 0; |
| 588 | |
| 589 | if (!(kWalkDoubleBuffer & op)) |
| 590 | { |
| 591 | offset = 0; |
| 592 | numSegments = 0-1; |
| 593 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); |
| 594 | } |
| 595 | |
| 596 | op &= ~kWalkPreflight; |
| 597 | |
| 598 | state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); |
| 599 | state->fForceDoubleBuffer = false; |
| 600 | if (state->fDoubleBuffer) |
| 601 | state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); |
| 602 | |
| 603 | if (state->fCopyPageCount) |
| 604 | { |
| 605 | vm_page_t mapBase = NULL; |
| 606 | |
| 607 | DEBG("preflight fCopyPageCount %d\n" , state->fCopyPageCount); |
| 608 | |
| 609 | if (!fMapper && !state->fDoubleBuffer) |
| 610 | { |
| 611 | kern_return_t kr; |
| 612 | |
| 613 | if (fMapper) panic("fMapper copying" ); |
| 614 | |
| 615 | kr = vm_page_alloc_list(state->fCopyPageCount, |
| 616 | KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); |
| 617 | if (KERN_SUCCESS != kr) |
| 618 | { |
| 619 | DEBG("vm_page_alloc_list(%d) failed (%d)\n" , state->fCopyPageCount, kr); |
| 620 | mapBase = NULL; |
| 621 | } |
| 622 | } |
| 623 | |
| 624 | if (mapBase) |
| 625 | { |
| 626 | state->fCopyPageAlloc = mapBase; |
| 627 | state->fCopyNext = state->fCopyPageAlloc; |
| 628 | offset = 0; |
| 629 | numSegments = 0-1; |
| 630 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); |
| 631 | state->fPrepared = true; |
| 632 | op &= ~(kWalkSyncIn | kWalkSyncOut); |
| 633 | } |
| 634 | else |
| 635 | { |
| 636 | DEBG("alloc IOBMD\n" ); |
| 637 | state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); |
| 638 | |
| 639 | if (state->fCopyMD) |
| 640 | { |
| 641 | ret = kIOReturnSuccess; |
| 642 | state->fPrepared = true; |
| 643 | } |
| 644 | else |
| 645 | { |
| 646 | DEBG("IODMACommand !alloc IOBMD" ); |
| 647 | return (kIOReturnNoResources); |
| 648 | } |
| 649 | } |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) |
| 654 | { |
| 655 | if (state->fCopyPageCount) |
| 656 | { |
| 657 | DEBG("sync fCopyPageCount %d\n" , state->fCopyPageCount); |
| 658 | |
| 659 | if (state->fCopyPageAlloc) |
| 660 | { |
| 661 | state->fCopyNext = state->fCopyPageAlloc; |
| 662 | offset = 0; |
| 663 | numSegments = 0-1; |
| 664 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); |
| 665 | } |
| 666 | else if (state->fCopyMD) |
| 667 | { |
| 668 | DEBG("sync IOBMD\n" ); |
| 669 | |
| 670 | if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) |
| 671 | { |
| 672 | IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory); |
| 673 | |
| 674 | IOByteCount bytes; |
| 675 | |
| 676 | if (kWalkSyncIn & op) |
| 677 | bytes = poMD->writeBytes(state->fPreparedOffset, |
| 678 | state->fCopyMD->getBytesNoCopy(), |
| 679 | state->fPreparedLength); |
| 680 | else |
| 681 | bytes = poMD->readBytes(state->fPreparedOffset, |
| 682 | state->fCopyMD->getBytesNoCopy(), |
| 683 | state->fPreparedLength); |
| 684 | DEBG("fCopyMD %s %lx bytes\n" , (kWalkSyncIn & op) ? "wrote" : "read" , bytes); |
| 685 | ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; |
| 686 | } |
| 687 | else |
| 688 | ret = kIOReturnSuccess; |
| 689 | } |
| 690 | } |
| 691 | } |
| 692 | |
| 693 | if (kWalkComplete & op) |
| 694 | { |
| 695 | if (state->fCopyPageAlloc) |
| 696 | { |
| 697 | vm_page_free_list(state->fCopyPageAlloc, FALSE); |
| 698 | state->fCopyPageAlloc = 0; |
| 699 | state->fCopyPageCount = 0; |
| 700 | } |
| 701 | if (state->fCopyMD) |
| 702 | { |
| 703 | state->fCopyMD->release(); |
| 704 | state->fCopyMD = 0; |
| 705 | } |
| 706 | |
| 707 | state->fPrepared = false; |
| 708 | } |
| 709 | return (ret); |
| 710 | } |
| 711 | |
| 712 | UInt8 |
| 713 | IODMACommand::getNumAddressBits(void) |
| 714 | { |
| 715 | return (fNumAddressBits); |
| 716 | } |
| 717 | |
| 718 | UInt32 |
| 719 | IODMACommand::getAlignment(void) |
| 720 | { |
| 721 | return (fAlignMask + 1); |
| 722 | } |
| 723 | |
| 724 | uint32_t |
| 725 | IODMACommand::getAlignmentLength(void) |
| 726 | { |
| 727 | return (fAlignMaskLength + 1); |
| 728 | } |
| 729 | |
| 730 | uint32_t |
| 731 | IODMACommand::getAlignmentInternalSegments(void) |
| 732 | { |
| 733 | return (fAlignMaskInternalSegments + 1); |
| 734 | } |
| 735 | |
| 736 | IOReturn |
| 737 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
| 738 | const SegmentOptions * segmentOptions, |
| 739 | uint32_t mappingOptions, |
| 740 | IOMapper * mapper, |
| 741 | UInt64 offset, |
| 742 | UInt64 length, |
| 743 | bool flushCache, |
| 744 | bool synchronize) |
| 745 | { |
| 746 | IOReturn ret; |
| 747 | |
| 748 | if (fActive) return kIOReturnNotPermitted; |
| 749 | |
| 750 | ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); |
| 751 | if (kIOReturnSuccess != ret) return (ret); |
| 752 | |
| 753 | ret = prepare(offset, length, flushCache, synchronize); |
| 754 | |
| 755 | return (ret); |
| 756 | } |
| 757 | |
| 758 | IOReturn |
| 759 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
| 760 | UInt8 numAddressBits, |
| 761 | UInt64 maxSegmentSize, |
| 762 | MappingOptions mappingOptions, |
| 763 | UInt64 maxTransferSize, |
| 764 | UInt32 alignment, |
| 765 | IOMapper *mapper, |
| 766 | UInt64 offset, |
| 767 | UInt64 length, |
| 768 | bool flushCache, |
| 769 | bool synchronize) |
| 770 | { |
| 771 | SegmentOptions segmentOptions = |
| 772 | { |
| 773 | .fStructSize = sizeof(segmentOptions), |
| 774 | .fNumAddressBits = numAddressBits, |
| 775 | .fMaxSegmentSize = maxSegmentSize, |
| 776 | .fMaxTransferSize = maxTransferSize, |
| 777 | .fAlignment = alignment, |
| 778 | .fAlignmentLength = 1, |
| 779 | .fAlignmentInternalSegments = alignment |
| 780 | }; |
| 781 | |
| 782 | return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, |
| 783 | offset, length, flushCache, synchronize)); |
| 784 | } |
| 785 | |
| 786 | |
| 787 | IOReturn |
| 788 | IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) |
| 789 | { |
| 790 | IODMACommandInternal * state = fInternalState; |
| 791 | IOReturn ret = kIOReturnSuccess; |
| 792 | uint32_t mappingOptions = fMappingOptions; |
| 793 | |
| 794 | // check specification has been set |
| 795 | if (!fOutSeg) return (kIOReturnNotReady); |
| 796 | |
| 797 | if (!length) length = fMDSummary.fLength; |
| 798 | |
| 799 | if (length > fMaxTransferSize) return kIOReturnNoSpace; |
| 800 | |
| 801 | if (fActive++) |
| 802 | { |
| 803 | if ((state->fPreparedOffset != offset) |
| 804 | || (state->fPreparedLength != length)) |
| 805 | ret = kIOReturnNotReady; |
| 806 | } |
| 807 | else |
| 808 | { |
| 809 | if (fAlignMaskLength & length) return (kIOReturnNotAligned); |
| 810 | |
| 811 | state->fPreparedOffset = offset; |
| 812 | state->fPreparedLength = length; |
| 813 | |
| 814 | state->fMapContig = false; |
| 815 | state->fMisaligned = false; |
| 816 | state->fDoubleBuffer = false; |
| 817 | state->fPrepared = false; |
| 818 | state->fCopyNext = NULL; |
| 819 | state->fCopyPageAlloc = 0; |
| 820 | state->fCopyPageCount = 0; |
| 821 | state->fNextRemapPage = NULL; |
| 822 | state->fCopyMD = 0; |
| 823 | state->fLocalMapperAlloc = 0; |
| 824 | state->fLocalMapperAllocValid = false; |
| 825 | state->fLocalMapperAllocLength = 0; |
| 826 | |
| 827 | state->fSourceAlignMask = fAlignMask; |
| 828 | if (fMapper) |
| 829 | state->fSourceAlignMask &= page_mask; |
| 830 | |
| 831 | state->fCursor = state->fIterateOnly |
| 832 | || (!state->fCheckAddressing |
| 833 | && (!state->fSourceAlignMask |
| 834 | || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); |
| 835 | |
| 836 | if (!state->fCursor) |
| 837 | { |
| 838 | IOOptionBits op = kWalkPrepare | kWalkPreflight; |
| 839 | if (synchronize) |
| 840 | op |= kWalkSyncOut; |
| 841 | ret = walkAll(op); |
| 842 | } |
| 843 | |
| 844 | if (IS_NONCOHERENT(mappingOptions) && flushCache) |
| 845 | { |
| 846 | if (state->fCopyMD) |
| 847 | { |
| 848 | state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); |
| 849 | } |
| 850 | else |
| 851 | { |
| 852 | IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory); |
| 853 | md->performOperation(kIOMemoryIncoherentIOStore, offset, length); |
| 854 | } |
| 855 | } |
| 856 | |
| 857 | if (fMapper) |
| 858 | { |
| 859 | IOMDDMAMapArgs mapArgs; |
| 860 | bzero(&mapArgs, sizeof(mapArgs)); |
| 861 | mapArgs.fMapper = fMapper; |
| 862 | mapArgs.fCommand = this; |
| 863 | mapArgs.fMapSpec.device = state->fDevice; |
| 864 | mapArgs.fMapSpec.alignment = fAlignMask + 1; |
| 865 | mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; |
| 866 | mapArgs.fLength = state->fPreparedLength; |
| 867 | const IOMemoryDescriptor * md = state->fCopyMD; |
| 868 | if (md) { mapArgs.fOffset = 0; } else |
| 869 | { |
| 870 | md = fMemory; |
| 871 | mapArgs.fOffset = state->fPreparedOffset; |
| 872 | } |
| 873 | ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); |
| 874 | //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength); |
| 875 | |
| 876 | if (kIOReturnSuccess == ret) |
| 877 | { |
| 878 | state->fLocalMapperAlloc = mapArgs.fAlloc; |
| 879 | state->fLocalMapperAllocValid = true; |
| 880 | state->fLocalMapperAllocLength = mapArgs.fAllocLength; |
| 881 | state->fMapContig = mapArgs.fMapContig; |
| 882 | } |
| 883 | if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess; |
| 884 | } |
| 885 | if (kIOReturnSuccess == ret) state->fPrepared = true; |
| 886 | } |
| 887 | return ret; |
| 888 | } |
| 889 | |
| 890 | IOReturn |
| 891 | IODMACommand::complete(bool invalidateCache, bool synchronize) |
| 892 | { |
| 893 | IODMACommandInternal * state = fInternalState; |
| 894 | IOReturn ret = kIOReturnSuccess; |
| 895 | IOMemoryDescriptor * copyMD; |
| 896 | |
| 897 | if (fActive < 1) |
| 898 | return kIOReturnNotReady; |
| 899 | |
| 900 | if (!--fActive) |
| 901 | { |
| 902 | copyMD = state->fCopyMD; |
| 903 | if (copyMD) copyMD->retain(); |
| 904 | |
| 905 | if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) |
| 906 | { |
| 907 | if (copyMD) |
| 908 | { |
| 909 | copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); |
| 910 | } |
| 911 | else |
| 912 | { |
| 913 | IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory); |
| 914 | md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); |
| 915 | } |
| 916 | } |
| 917 | |
| 918 | if (!state->fCursor) |
| 919 | { |
| 920 | IOOptionBits op = kWalkComplete; |
| 921 | if (synchronize) |
| 922 | op |= kWalkSyncIn; |
| 923 | ret = walkAll(op); |
| 924 | } |
| 925 | |
| 926 | if (state->fLocalMapperAllocValid) |
| 927 | { |
| 928 | IOMDDMAMapArgs mapArgs; |
| 929 | bzero(&mapArgs, sizeof(mapArgs)); |
| 930 | mapArgs.fMapper = fMapper; |
| 931 | mapArgs.fCommand = this; |
| 932 | mapArgs.fAlloc = state->fLocalMapperAlloc; |
| 933 | mapArgs.fAllocLength = state->fLocalMapperAllocLength; |
| 934 | const IOMemoryDescriptor * md = copyMD; |
| 935 | if (md) { mapArgs.fOffset = 0; } |
| 936 | else |
| 937 | { |
| 938 | md = fMemory; |
| 939 | mapArgs.fOffset = state->fPreparedOffset; |
| 940 | } |
| 941 | |
| 942 | ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs)); |
| 943 | |
| 944 | state->fLocalMapperAlloc = 0; |
| 945 | state->fLocalMapperAllocValid = false; |
| 946 | state->fLocalMapperAllocLength = 0; |
| 947 | } |
| 948 | if (copyMD) copyMD->release(); |
| 949 | state->fPrepared = false; |
| 950 | } |
| 951 | |
| 952 | return ret; |
| 953 | } |
| 954 | |
| 955 | IOReturn |
| 956 | IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) |
| 957 | { |
| 958 | IODMACommandInternal * state = fInternalState; |
| 959 | if (fActive < 1) |
| 960 | return (kIOReturnNotReady); |
| 961 | |
| 962 | if (offset) |
| 963 | *offset = state->fPreparedOffset; |
| 964 | if (length) |
| 965 | *length = state->fPreparedLength; |
| 966 | |
| 967 | return (kIOReturnSuccess); |
| 968 | } |
| 969 | |
| 970 | IOReturn |
| 971 | IODMACommand::synchronize(IOOptionBits options) |
| 972 | { |
| 973 | IODMACommandInternal * state = fInternalState; |
| 974 | IOReturn ret = kIOReturnSuccess; |
| 975 | IOOptionBits op; |
| 976 | |
| 977 | if (kIODirectionOutIn == (kIODirectionOutIn & options)) |
| 978 | return kIOReturnBadArgument; |
| 979 | |
| 980 | if (fActive < 1) |
| 981 | return kIOReturnNotReady; |
| 982 | |
| 983 | op = 0; |
| 984 | if (kForceDoubleBuffer & options) |
| 985 | { |
| 986 | if (state->fDoubleBuffer) return kIOReturnSuccess; |
| 987 | ret = complete(false /* invalidateCache */, true /* synchronize */); |
| 988 | state->fCursor = false; |
| 989 | state->fForceDoubleBuffer = true; |
| 990 | ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */); |
| 991 | |
| 992 | return (ret); |
| 993 | } |
| 994 | else if (state->fCursor) |
| 995 | return kIOReturnSuccess; |
| 996 | |
| 997 | if (kIODirectionIn & options) |
| 998 | op |= kWalkSyncIn | kWalkSyncAlways; |
| 999 | else if (kIODirectionOut & options) |
| 1000 | op |= kWalkSyncOut | kWalkSyncAlways; |
| 1001 | |
| 1002 | ret = walkAll(op); |
| 1003 | |
| 1004 | return ret; |
| 1005 | } |
| 1006 | |
| 1007 | struct IODMACommandTransferContext |
| 1008 | { |
| 1009 | void * buffer; |
| 1010 | UInt64 bufferOffset; |
| 1011 | UInt64 remaining; |
| 1012 | UInt32 op; |
| 1013 | }; |
| 1014 | enum |
| 1015 | { |
| 1016 | kIODMACommandTransferOpReadBytes = 1, |
| 1017 | kIODMACommandTransferOpWriteBytes = 2 |
| 1018 | }; |
| 1019 | |
| 1020 | IOReturn |
| 1021 | IODMACommand::transferSegment(void *reference, |
| 1022 | IODMACommand *target, |
| 1023 | Segment64 segment, |
| 1024 | void *segments, |
| 1025 | UInt32 segmentIndex) |
| 1026 | { |
| 1027 | IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; |
| 1028 | UInt64 length = min(segment.fLength, context->remaining); |
| 1029 | addr64_t ioAddr = segment.fIOVMAddr; |
| 1030 | addr64_t cpuAddr = ioAddr; |
| 1031 | |
| 1032 | context->remaining -= length; |
| 1033 | |
| 1034 | while (length) |
| 1035 | { |
| 1036 | UInt64 copyLen = length; |
| 1037 | if ((kMapped == MAPTYPE(target->fMappingOptions)) |
| 1038 | && target->fMapper) |
| 1039 | { |
| 1040 | cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); |
| 1041 | copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); |
| 1042 | ioAddr += copyLen; |
| 1043 | } |
| 1044 | |
| 1045 | switch (context->op) |
| 1046 | { |
| 1047 | case kIODMACommandTransferOpReadBytes: |
| 1048 | copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen, |
| 1049 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); |
| 1050 | break; |
| 1051 | case kIODMACommandTransferOpWriteBytes: |
| 1052 | copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen, |
| 1053 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); |
| 1054 | break; |
| 1055 | } |
| 1056 | length -= copyLen; |
| 1057 | context->bufferOffset += copyLen; |
| 1058 | } |
| 1059 | |
| 1060 | return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun); |
| 1061 | } |
| 1062 | |
| 1063 | UInt64 |
| 1064 | IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length) |
| 1065 | { |
| 1066 | IODMACommandInternal * state = fInternalState; |
| 1067 | IODMACommandTransferContext context; |
| 1068 | Segment64 segments[1]; |
| 1069 | UInt32 numSegments = 0-1; |
| 1070 | |
| 1071 | if (fActive < 1) |
| 1072 | return (0); |
| 1073 | |
| 1074 | if (offset >= state->fPreparedLength) |
| 1075 | return (0); |
| 1076 | length = min(length, state->fPreparedLength - offset); |
| 1077 | |
| 1078 | context.buffer = buffer; |
| 1079 | context.bufferOffset = 0; |
| 1080 | context.remaining = length; |
| 1081 | context.op = transferOp; |
| 1082 | (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); |
| 1083 | |
| 1084 | return (length - context.remaining); |
| 1085 | } |
| 1086 | |
| 1087 | UInt64 |
| 1088 | IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length) |
| 1089 | { |
| 1090 | return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length)); |
| 1091 | } |
| 1092 | |
| 1093 | UInt64 |
| 1094 | IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length) |
| 1095 | { |
| 1096 | return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length)); |
| 1097 | } |
| 1098 | |
| 1099 | IOReturn |
| 1100 | IODMACommand::genIOVMSegments(UInt64 *offsetP, |
| 1101 | void *segmentsP, |
| 1102 | UInt32 *numSegmentsP) |
| 1103 | { |
| 1104 | return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, |
| 1105 | offsetP, segmentsP, numSegmentsP)); |
| 1106 | } |
| 1107 | |
| 1108 | IOReturn |
| 1109 | IODMACommand::genIOVMSegments(uint32_t op, |
| 1110 | InternalSegmentFunction outSegFunc, |
| 1111 | void *reference, |
| 1112 | UInt64 *offsetP, |
| 1113 | void *segmentsP, |
| 1114 | UInt32 *numSegmentsP) |
| 1115 | { |
| 1116 | IODMACommandInternal * internalState = fInternalState; |
| 1117 | IOOptionBits mdOp = kIOMDWalkSegments; |
| 1118 | IOReturn ret = kIOReturnSuccess; |
| 1119 | |
| 1120 | if (!(kWalkComplete & op) && !fActive) |
| 1121 | return kIOReturnNotReady; |
| 1122 | |
| 1123 | if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) |
| 1124 | return kIOReturnBadArgument; |
| 1125 | |
| 1126 | IOMDDMAWalkSegmentArgs *state = |
| 1127 | (IOMDDMAWalkSegmentArgs *)(void *) fState; |
| 1128 | |
| 1129 | UInt64 offset = *offsetP + internalState->fPreparedOffset; |
| 1130 | UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; |
| 1131 | |
| 1132 | if (offset >= memLength) |
| 1133 | return kIOReturnOverrun; |
| 1134 | |
| 1135 | if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { |
| 1136 | state->fOffset = 0; |
| 1137 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; |
| 1138 | internalState->fNextRemapPage = NULL; |
| 1139 | internalState->fNewMD = false; |
| 1140 | mdOp = kIOMDFirstSegment; |
| 1141 | if (fMapper) |
| 1142 | { |
| 1143 | if (internalState->fLocalMapperAllocValid) |
| 1144 | { |
| 1145 | state->fMapped = kIOMDDMAWalkMappedLocal; |
| 1146 | state->fMappedBase = internalState->fLocalMapperAlloc; |
| 1147 | } |
| 1148 | else state->fMapped = true; |
| 1149 | } |
| 1150 | }; |
| 1151 | |
| 1152 | UInt32 segIndex = 0; |
| 1153 | UInt32 numSegments = *numSegmentsP; |
| 1154 | Segment64 curSeg = { 0, 0 }; |
| 1155 | bool curSegValid = false; |
| 1156 | addr64_t maxPhys; |
| 1157 | |
| 1158 | if (fNumAddressBits && (fNumAddressBits < 64)) |
| 1159 | maxPhys = (1ULL << fNumAddressBits); |
| 1160 | else |
| 1161 | maxPhys = 0; |
| 1162 | maxPhys--; |
| 1163 | |
| 1164 | while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) |
| 1165 | { |
| 1166 | // state = next seg |
| 1167 | if (!internalState->fIOVMAddrValid) { |
| 1168 | |
| 1169 | IOReturn rtn; |
| 1170 | |
| 1171 | state->fOffset = offset; |
| 1172 | state->fLength = memLength - offset; |
| 1173 | |
| 1174 | if (internalState->fMapContig && internalState->fLocalMapperAllocValid) |
| 1175 | { |
| 1176 | state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; |
| 1177 | rtn = kIOReturnSuccess; |
| 1178 | #if 0 |
| 1179 | { |
| 1180 | uint64_t checkOffset; |
| 1181 | IOPhysicalLength segLen; |
| 1182 | for (checkOffset = 0; checkOffset < state->fLength; ) |
| 1183 | { |
| 1184 | addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); |
| 1185 | if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) |
| 1186 | { |
| 1187 | panic("%llx != %llx:%llx, %llx phys: %llx %llx\n" , offset, |
| 1188 | state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, |
| 1189 | phys, checkOffset); |
| 1190 | } |
| 1191 | checkOffset += page_size - (phys & page_mask); |
| 1192 | } |
| 1193 | } |
| 1194 | #endif |
| 1195 | } |
| 1196 | else |
| 1197 | { |
| 1198 | const IOMemoryDescriptor * memory = |
| 1199 | internalState->fCopyMD ? internalState->fCopyMD : fMemory; |
| 1200 | rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); |
| 1201 | mdOp = kIOMDWalkSegments; |
| 1202 | } |
| 1203 | |
| 1204 | if (rtn == kIOReturnSuccess) |
| 1205 | { |
| 1206 | internalState->fIOVMAddrValid = true; |
| 1207 | assert(state->fLength); |
| 1208 | if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { |
| 1209 | UInt64 length = state->fLength; |
| 1210 | offset += length; |
| 1211 | curSeg.fLength += length; |
| 1212 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; |
| 1213 | } |
| 1214 | } |
| 1215 | else if (rtn == kIOReturnOverrun) |
| 1216 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end |
| 1217 | else |
| 1218 | return rtn; |
| 1219 | } |
| 1220 | |
| 1221 | // seg = state, offset = end of seg |
| 1222 | if (!curSegValid) |
| 1223 | { |
| 1224 | UInt64 length = state->fLength; |
| 1225 | offset += length; |
| 1226 | curSeg.fIOVMAddr = state->fIOVMAddr; |
| 1227 | curSeg.fLength = length; |
| 1228 | curSegValid = true; |
| 1229 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; |
| 1230 | } |
| 1231 | |
| 1232 | if (!internalState->fIOVMAddrValid) |
| 1233 | { |
| 1234 | // maxPhys |
| 1235 | if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) |
| 1236 | { |
| 1237 | if (internalState->fCursor) |
| 1238 | { |
| 1239 | curSegValid = curSeg.fIOVMAddr = 0; |
| 1240 | ret = kIOReturnMessageTooLarge; |
| 1241 | break; |
| 1242 | } |
| 1243 | else if (curSeg.fIOVMAddr <= maxPhys) |
| 1244 | { |
| 1245 | UInt64 remain, newLength; |
| 1246 | |
| 1247 | newLength = (maxPhys + 1 - curSeg.fIOVMAddr); |
| 1248 | DEBG("trunc %qx, %qx-> %qx\n" , curSeg.fIOVMAddr, curSeg.fLength, newLength); |
| 1249 | remain = curSeg.fLength - newLength; |
| 1250 | state->fIOVMAddr = newLength + curSeg.fIOVMAddr; |
| 1251 | internalState->fIOVMAddrValid = true; |
| 1252 | curSeg.fLength = newLength; |
| 1253 | state->fLength = remain; |
| 1254 | offset -= remain; |
| 1255 | } |
| 1256 | else |
| 1257 | { |
| 1258 | UInt64 addr = curSeg.fIOVMAddr; |
| 1259 | ppnum_t addrPage = atop_64(addr); |
| 1260 | vm_page_t remap = NULL; |
| 1261 | UInt64 remain, newLength; |
| 1262 | |
| 1263 | DEBG("sparse switch %qx, %qx " , addr, curSeg.fLength); |
| 1264 | |
| 1265 | remap = internalState->fNextRemapPage; |
| 1266 | if (remap && (addrPage == vm_page_get_offset(remap))) |
| 1267 | { |
| 1268 | } |
| 1269 | else for (remap = internalState->fCopyPageAlloc; |
| 1270 | remap && (addrPage != vm_page_get_offset(remap)); |
| 1271 | remap = vm_page_get_next(remap)) |
| 1272 | { |
| 1273 | } |
| 1274 | |
| 1275 | if (!remap) panic("no remap page found" ); |
| 1276 | |
| 1277 | curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) |
| 1278 | + (addr & PAGE_MASK); |
| 1279 | curSegValid = true; |
| 1280 | internalState->fNextRemapPage = vm_page_get_next(remap); |
| 1281 | |
| 1282 | newLength = PAGE_SIZE - (addr & PAGE_MASK); |
| 1283 | if (newLength < curSeg.fLength) |
| 1284 | { |
| 1285 | remain = curSeg.fLength - newLength; |
| 1286 | state->fIOVMAddr = addr + newLength; |
| 1287 | internalState->fIOVMAddrValid = true; |
| 1288 | curSeg.fLength = newLength; |
| 1289 | state->fLength = remain; |
| 1290 | offset -= remain; |
| 1291 | } |
| 1292 | DEBG("-> %qx, %qx offset %qx\n" , curSeg.fIOVMAddr, curSeg.fLength, offset); |
| 1293 | } |
| 1294 | } |
| 1295 | |
| 1296 | // reduce size of output segment |
| 1297 | uint64_t reduce, leftover = 0; |
| 1298 | |
| 1299 | // fMaxSegmentSize |
| 1300 | if (curSeg.fLength > fMaxSegmentSize) |
| 1301 | { |
| 1302 | leftover += curSeg.fLength - fMaxSegmentSize; |
| 1303 | curSeg.fLength = fMaxSegmentSize; |
| 1304 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; |
| 1305 | internalState->fIOVMAddrValid = true; |
| 1306 | } |
| 1307 | |
| 1308 | // alignment current length |
| 1309 | |
| 1310 | reduce = (curSeg.fLength & fAlignMaskLength); |
| 1311 | if (reduce && (curSeg.fLength > reduce)) |
| 1312 | { |
| 1313 | leftover += reduce; |
| 1314 | curSeg.fLength -= reduce; |
| 1315 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; |
| 1316 | internalState->fIOVMAddrValid = true; |
| 1317 | } |
| 1318 | |
| 1319 | // alignment next address |
| 1320 | |
| 1321 | reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); |
| 1322 | if (reduce && (curSeg.fLength > reduce)) |
| 1323 | { |
| 1324 | leftover += reduce; |
| 1325 | curSeg.fLength -= reduce; |
| 1326 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; |
| 1327 | internalState->fIOVMAddrValid = true; |
| 1328 | } |
| 1329 | |
| 1330 | if (leftover) |
| 1331 | { |
| 1332 | DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n" , |
| 1333 | leftover, offset, |
| 1334 | curSeg.fIOVMAddr, curSeg.fLength); |
| 1335 | state->fLength = leftover; |
| 1336 | offset -= leftover; |
| 1337 | } |
| 1338 | |
| 1339 | // |
| 1340 | |
| 1341 | if (internalState->fCursor) |
| 1342 | { |
| 1343 | bool misaligned; |
| 1344 | uint32_t mask; |
| 1345 | |
| 1346 | mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); |
| 1347 | misaligned = (0 != (mask & curSeg.fIOVMAddr)); |
| 1348 | if (!misaligned) |
| 1349 | { |
| 1350 | mask = fAlignMaskLength; |
| 1351 | misaligned |= (0 != (mask & curSeg.fLength)); |
| 1352 | } |
| 1353 | if (misaligned) |
| 1354 | { |
| 1355 | if (misaligned) DEBG("cursor misaligned %qx:%qx\n" , curSeg.fIOVMAddr, curSeg.fLength); |
| 1356 | curSegValid = curSeg.fIOVMAddr = 0; |
| 1357 | ret = kIOReturnNotAligned; |
| 1358 | break; |
| 1359 | } |
| 1360 | } |
| 1361 | |
| 1362 | if (offset >= memLength) |
| 1363 | { |
| 1364 | curSeg.fLength -= (offset - memLength); |
| 1365 | offset = memLength; |
| 1366 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end |
| 1367 | break; |
| 1368 | } |
| 1369 | } |
| 1370 | |
| 1371 | if (internalState->fIOVMAddrValid) { |
| 1372 | if ((segIndex + 1 == numSegments)) |
| 1373 | break; |
| 1374 | |
| 1375 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); |
| 1376 | curSegValid = curSeg.fIOVMAddr = 0; |
| 1377 | if (kIOReturnSuccess != ret) |
| 1378 | break; |
| 1379 | } |
| 1380 | } |
| 1381 | |
| 1382 | if (curSegValid) { |
| 1383 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); |
| 1384 | } |
| 1385 | |
| 1386 | if (kIOReturnSuccess == ret) |
| 1387 | { |
| 1388 | state->fOffset = offset; |
| 1389 | *offsetP = offset - internalState->fPreparedOffset; |
| 1390 | *numSegmentsP = segIndex; |
| 1391 | } |
| 1392 | return ret; |
| 1393 | } |
| 1394 | |
| 1395 | IOReturn |
| 1396 | IODMACommand::clientOutputSegment( |
| 1397 | void *reference, IODMACommand *target, |
| 1398 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1399 | { |
| 1400 | SegmentFunction segmentFunction = (SegmentFunction) reference; |
| 1401 | IOReturn ret = kIOReturnSuccess; |
| 1402 | |
| 1403 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) |
| 1404 | && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) |
| 1405 | && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) |
| 1406 | { |
| 1407 | DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n" , segment.fIOVMAddr, segment.fLength); |
| 1408 | ret = kIOReturnMessageTooLarge; |
| 1409 | } |
| 1410 | |
| 1411 | if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) |
| 1412 | { |
| 1413 | DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n" , segment.fIOVMAddr, segment.fLength); |
| 1414 | ret = kIOReturnMessageTooLarge; |
| 1415 | } |
| 1416 | |
| 1417 | return (ret); |
| 1418 | } |
| 1419 | |
| 1420 | IOReturn |
| 1421 | IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, |
| 1422 | UInt64 *offsetP, |
| 1423 | void *segmentsP, |
| 1424 | UInt32 *numSegmentsP) |
| 1425 | { |
| 1426 | return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, |
| 1427 | offsetP, segmentsP, numSegmentsP)); |
| 1428 | } |
| 1429 | |
| 1430 | bool |
| 1431 | IODMACommand::OutputHost32(IODMACommand *, |
| 1432 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1433 | { |
| 1434 | Segment32 *base = (Segment32 *) vSegList; |
| 1435 | base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; |
| 1436 | base[outSegIndex].fLength = (UInt32) segment.fLength; |
| 1437 | return true; |
| 1438 | } |
| 1439 | |
| 1440 | bool |
| 1441 | IODMACommand::OutputBig32(IODMACommand *, |
| 1442 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1443 | { |
| 1444 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
| 1445 | const UInt offLen = offAddr + sizeof(UInt32); |
| 1446 | OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); |
| 1447 | OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); |
| 1448 | return true; |
| 1449 | } |
| 1450 | |
| 1451 | bool |
| 1452 | IODMACommand::OutputLittle32(IODMACommand *, |
| 1453 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1454 | { |
| 1455 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
| 1456 | const UInt offLen = offAddr + sizeof(UInt32); |
| 1457 | OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); |
| 1458 | OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); |
| 1459 | return true; |
| 1460 | } |
| 1461 | |
| 1462 | bool |
| 1463 | IODMACommand::OutputHost64(IODMACommand *, |
| 1464 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1465 | { |
| 1466 | Segment64 *base = (Segment64 *) vSegList; |
| 1467 | base[outSegIndex] = segment; |
| 1468 | return true; |
| 1469 | } |
| 1470 | |
| 1471 | bool |
| 1472 | IODMACommand::OutputBig64(IODMACommand *, |
| 1473 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1474 | { |
| 1475 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
| 1476 | const UInt offLen = offAddr + sizeof(UInt64); |
| 1477 | OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); |
| 1478 | OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); |
| 1479 | return true; |
| 1480 | } |
| 1481 | |
| 1482 | bool |
| 1483 | IODMACommand::OutputLittle64(IODMACommand *, |
| 1484 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
| 1485 | { |
| 1486 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
| 1487 | const UInt offLen = offAddr + sizeof(UInt64); |
| 1488 | OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); |
| 1489 | OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); |
| 1490 | return true; |
| 1491 | } |
| 1492 | |
| 1493 | |
| 1494 | |