bool IOBufferMemoryDescriptor::initWithPhysicalMask( task_t inTask, IOOptionBits options, mach_vm_size_t capacity, mach_vm_address_t alignment, mach_vm_address_t physicalMask) { task_t mapTask = NULL; vm_map_t vmmap = NULL; mach_vm_address_t highestMask = 0; IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; IODMAMapSpecification mapSpec; bool mapped = false; bool needZero; if (!capacity) return false; _options = options; _capacity = capacity; _internalFlags = 0; _internalReserved = 0; _buffer = 0; _ranges.v64 = IONew(IOAddressRange, 1); if (!_ranges.v64) return (false); _ranges.v64->address = 0; _ranges.v64->length = 0; // make sure super::free doesn't dealloc _ranges before super::init _flags = kIOMemoryAsReference; // Grab IOMD bits from the Buffer MD options iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); if (!(kIOMemoryMapperNone & options)) { IOMapper::checkForSystemMapper(); mapped = (0 != IOMapper::gSystem); } needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); if (physicalMask && (alignment <= 1)) { alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); highestMask = (physicalMask | alignment); alignment++; if (alignment < page_size) alignment = page_size; } if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) alignment = page_size; if (alignment >= page_size) capacity = round_page(capacity); if (alignment > page_size) options |= kIOMemoryPhysicallyContiguous; _alignment = alignment; if ((capacity + alignment) < _capacity) return (false); if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; bzero(&mapSpec, sizeof(mapSpec)); mapSpec.alignment = _alignment; mapSpec.numAddressBits = 64; if (highestMask && mapped) { if (highestMask <= 0xFFFFFFFF) mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); else mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); highestMask = 0; } // set memory entry cache mode, pageable, purgeable iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; } else { vmmap = kernel_map; // Buffer shouldn't auto prepare they should be prepared explicitly // But it never was enforced so what are you going to do? iomdOptions |= kIOMemoryAutoPrepare; /* Allocate a wired-down buffer inside kernel space. */ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { contig |= (!mapped); contig |= (0 != (kIOMemoryMapperNone & options)); #if 0 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now contig |= true; #endif } if (contig || highestMask || (alignment > page_size)) { _internalFlags |= kInternalFlagPhysical; if (highestMask) { _internalFlags |= kInternalFlagPageSized; capacity = round_page(capacity); } _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( capacity, highestMask, alignment, contig); } else if (needZero && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) { _internalFlags |= kInternalFlagPageAllocated; needZero = false; _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); if (_buffer) { IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); #if IOALLOCDEBUG OSAddAtomic(capacity, &debug_iomalloc_size); #endif } } else if (alignment > 1) { _buffer = IOMallocAligned(capacity, alignment); } else { _buffer = IOMalloc(capacity); } if (!_buffer) { return false; } if (needZero) bzero(_buffer, capacity); } if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { vm_size_t size = round_page(capacity); // initWithOptions will create memory entry iomdOptions |= kIOMemoryPersistent; if( options & kIOMemoryPageable) { #if IOALLOCDEBUG OSAddAtomicLong(size, &debug_iomallocpageable_size); #endif mapTask = inTask; if (NULL == inTask) inTask = kernel_task; } else if (options & kIOMapCacheMask) { // Prefetch each page to put entries into the pmap volatile UInt8 * startAddr = (UInt8 *)_buffer; volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; while (startAddr < endAddr) { UInt8 dummyVar = *startAddr; (void) dummyVar; startAddr += page_size; } } } _ranges.v64->address = (mach_vm_address_t) _buffer;; _ranges.v64->length = _capacity; if (!super::initWithOptions(_ranges.v64, 1, 0, inTask, iomdOptions, /* System mapper */ 0)) return false; // give any system mapper the allocation params if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, &mapSpec, sizeof(mapSpec))) return false; if (mapTask) { if (!reserved) { reserved = IONew( ExpansionData, 1 ); if( !reserved) return( false ); } reserved->map = createMappingInTask(mapTask, 0, kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); if (!reserved->map) { _buffer = 0; return( false ); } release(); // map took a retain on this reserved->map->retain(); removeMapping(reserved->map); mach_vm_address_t buffer = reserved->map->getAddress(); _buffer = (void *) buffer; if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) _ranges.v64->address = buffer; } setLength(_capacity); return true; }
bool IOBufferMemoryDescriptor::initWithPhysicalMask( task_t inTask, IOOptionBits options, mach_vm_size_t capacity, mach_vm_address_t alignment, mach_vm_address_t physicalMask) { kern_return_t kr; task_t mapTask = NULL; vm_map_t vmmap = NULL; mach_vm_address_t highestMask = 0; IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; IODMAMapSpecification mapSpec; bool mapped = false; bool needZero; if (!capacity) return false; _options = options; _capacity = capacity; _internalFlags = 0; _internalReserved = 0; _buffer = 0; _ranges.v64 = IONew(IOAddressRange, 1); if (!_ranges.v64) return (false); _ranges.v64->address = 0; _ranges.v64->length = 0; // make sure super::free doesn't dealloc _ranges before super::init _flags = kIOMemoryAsReference; // Grab IOMD bits from the Buffer MD options iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); if (!(kIOMemoryMapperNone & options)) { IOMapper::checkForSystemMapper(); mapped = (0 != IOMapper::gSystem); } needZero = mapped; if (physicalMask && (alignment <= 1)) { alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); highestMask = (physicalMask | alignment); alignment++; if (alignment < page_size) alignment = page_size; } if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) alignment = page_size; if (alignment >= page_size) capacity = round_page(capacity); if (alignment > page_size) options |= kIOMemoryPhysicallyContiguous; _alignment = alignment; if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) return false; bzero(&mapSpec, sizeof(mapSpec)); mapSpec.alignment = _alignment; mapSpec.numAddressBits = 64; if (highestMask && mapped) { if (highestMask <= 0xFFFFFFFF) mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); else mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); highestMask = 0; } // set flags for entry + object create vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE; // set memory entry cache mode switch (options & kIOMapCacheMask) { case kIOMapInhibitCache: SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); break; case kIOMapWriteThruCache: SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); break; case kIOMapWriteCombineCache: SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); break; case kIOMapCopybackCache: SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); break; case kIOMapCopybackInnerCache: SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode); break; case kIOMapDefaultCache: default: SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); break; } if (options & kIOMemoryPageable) { iomdOptions |= kIOMemoryBufferPageable; // must create the entry before any pages are allocated // set flags for entry + object create memEntryCacheMode |= MAP_MEM_NAMED_CREATE; if (options & kIOMemoryPurgeable) memEntryCacheMode |= MAP_MEM_PURGABLE; } else { memEntryCacheMode |= MAP_MEM_NAMED_REUSE; vmmap = kernel_map; // Buffer shouldn't auto prepare they should be prepared explicitly // But it never was enforced so what are you going to do? iomdOptions |= kIOMemoryAutoPrepare; /* Allocate a wired-down buffer inside kernel space. */ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { contig |= (!mapped); contig |= (0 != (kIOMemoryMapperNone & options)); #if 0 // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now contig |= true; #endif } if (contig || highestMask || (alignment > page_size)) { _internalFlags |= kInternalFlagPhysical; if (highestMask) { _internalFlags |= kInternalFlagPageSized; capacity = round_page(capacity); } _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( capacity, highestMask, alignment, contig); } else if (needZero && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes))) { _internalFlags |= kInternalFlagPageAllocated; needZero = false; _buffer = (void *) iopa_alloc(capacity, alignment); } else if (alignment > 1) { _buffer = IOMallocAligned(capacity, alignment); } else { _buffer = IOMalloc(capacity); } if (!_buffer) { return false; } if (needZero) bzero(_buffer, capacity); } if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { ipc_port_t sharedMem; vm_size_t size = round_page(capacity); kr = mach_make_memory_entry(vmmap, &size, (vm_offset_t)_buffer, memEntryCacheMode, &sharedMem, NULL ); if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) { ipc_port_release_send( sharedMem ); kr = kIOReturnVMError; } if( KERN_SUCCESS != kr) return( false ); _memEntry = (void *) sharedMem; if( options & kIOMemoryPageable) { #if IOALLOCDEBUG debug_iomallocpageable_size += size; #endif mapTask = inTask; if (NULL == inTask) inTask = kernel_task; } else if (options & kIOMapCacheMask) { // Prefetch each page to put entries into the pmap volatile UInt8 * startAddr = (UInt8 *)_buffer; volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; while (startAddr < endAddr) { *startAddr; startAddr += page_size; } } } _ranges.v64->address = (mach_vm_address_t) _buffer;; _ranges.v64->length = _capacity; if (!super::initWithOptions(_ranges.v64, 1, 0, inTask, iomdOptions, /* System mapper */ 0)) return false; // give any system mapper the allocation params if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, &mapSpec, sizeof(mapSpec))) return false; if (mapTask) { if (!reserved) { reserved = IONew( ExpansionData, 1 ); if( !reserved) return( false ); } reserved->map = createMappingInTask(mapTask, 0, kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0); if (!reserved->map) { _buffer = 0; return( false ); } release(); // map took a retain on this reserved->map->retain(); removeMapping(reserved->map); mach_vm_address_t buffer = reserved->map->getAddress(); _buffer = (void *) buffer; if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) _ranges.v64->address = buffer; } setLength(_capacity); return true; }