예제 #1
0
void
IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size)
{
    mach_vm_address_t allocationAddress;
    mach_vm_size_t    adjustedSize;

    if (!address)
	return;

    assert(size);

    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);
    if (adjustedSize >= page_size) {

	kmem_free( kernel_map, (vm_offset_t) address, size);

    } else {

	adjustedSize = *((mach_vm_size_t *)
			(address - sizeof(mach_vm_address_t) - sizeof(mach_vm_size_t)));
	allocationAddress = *((mach_vm_address_t *)
			(address - sizeof(mach_vm_address_t) ));
	kfree((void *)allocationAddress, adjustedSize);
    }

    IOStatisticsAlloc(kIOStatisticsFreeContiguous, size);
#if IOALLOCDEBUG
    debug_iomalloc_size -= size;
#endif
}
예제 #2
0
void IOFreeAligned(void * address, vm_size_t size)
{
    vm_address_t	allocationAddress;
    vm_size_t	adjustedSize;

    if( !address)
	return;

    assert(size);

    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);
    if (adjustedSize >= page_size) {

        kmem_free( kernel_map, (vm_offset_t) address, size);

    } else {
      	adjustedSize = *((vm_size_t *)( (vm_address_t) address
                                - sizeof(vm_address_t) - sizeof(vm_size_t)));
        allocationAddress = *((vm_address_t *)( (vm_address_t) address
				- sizeof(vm_address_t) ));

	if (adjustedSize >= page_size)
	    kmem_free( kernel_map, allocationAddress, adjustedSize);
	else
	  kfree((void *)allocationAddress, adjustedSize);
    }

#if IOALLOCDEBUG
    debug_iomalloc_size -= size;
#endif

    IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
}
예제 #3
0
static uintptr_t 
iopa_alloc(vm_size_t bytes, uint32_t balign)
{
    static const uint64_t align_masks[] = {
	0xFFFFFFFFFFFFFFFF,
	0xAAAAAAAAAAAAAAAA,
	0x8888888888888888,
	0x8080808080808080,
	0x8000800080008000,
	0x8000000080000000,
	0x8000000000000000,
    };
    io_pagealloc_t * pa;
    uintptr_t        addr = 0;
    uint32_t         count;
    uint64_t         align;

    if (!bytes) bytes = 1;
    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
    align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];

    IOSimpleLockLock(gIOPageAllocLock);
    pa = (typeof(pa)) queue_first(&gIOPageAllocList);
    while (!queue_end(&gIOPageAllocList, &pa->link))
    {
	addr = iopa_allocinpage(pa, count, align);
	if (addr)
	{
	    gIOPageAllocBytes += bytes;
	    break;
	}
	pa = (typeof(pa)) queue_next(&pa->link);
    }
    IOSimpleLockUnlock(gIOPageAllocLock);
    if (!addr)
    {
        pa = iopa_allocpage();
	if (pa)
	{
	    addr = iopa_allocinpage(pa, count, align);
	    IOSimpleLockLock(gIOPageAllocLock);
	    if (pa->avail) enqueue_head(&gIOPageAllocList, &pa->link);
	    gIOPageAllocCount++;
	    if (addr) gIOPageAllocBytes += bytes;
	    IOSimpleLockUnlock(gIOPageAllocLock);
	}
    }

    if (addr)
    {
        assert((addr & ((1 << log2up(balign)) - 1)) == 0);
    	IOStatisticsAlloc(kIOStatisticsMallocAligned, bytes);
#if IOALLOCDEBUG
	debug_iomalloc_size += bytes;
#endif
    }

    return (addr);
}
예제 #4
0
void IOFree(void * address, vm_size_t size)
{
    if (address) {
		kfree(address, size);
#if IOALLOCDEBUG
		debug_iomalloc_size -= size;
#endif
		IOStatisticsAlloc(kIOStatisticsFree, size);
    }
}
예제 #5
0
파일: IOLib.cpp 프로젝트: Prajna/xnu
void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
			   IOPhysicalAddress * physicalAddress)
{
    mach_vm_address_t	address = 0;

    if (size == 0)
	return 0;
    if (alignment == 0) 
	alignment = 1;

    /* Do we want a physical address? */
    if (!physicalAddress)
    {
	address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
    }
    else do
    {
	IOBufferMemoryDescriptor * bmd;
	mach_vm_address_t          physicalMask;
	vm_offset_t		   alignMask;

	alignMask = alignment - 1;
	physicalMask = (0xFFFFFFFF ^ alignMask);

	bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
		kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
	if (!bmd)
	    break;
	
	_IOMallocContiguousEntry *
	entry = IONew(_IOMallocContiguousEntry, 1);
	if (!entry)
	{
	    bmd->release();
	    break;
	}
	entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
	entry->md          = bmd;
	lck_mtx_lock(gIOMallocContiguousEntriesLock);
	queue_enter( &gIOMallocContiguousEntries, entry, 
		    _IOMallocContiguousEntry *, link );
	lck_mtx_unlock(gIOMallocContiguousEntriesLock);

	address          = (mach_vm_address_t) entry->virtualAddr;
	*physicalAddress = bmd->getPhysicalAddress();
    }
    while (false);

	if (address) {
	    IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
    }

    return (void *) address;
}
예제 #6
0
void * IOMalloc(vm_size_t size)
{
    void * address;

    address = (void *)kalloc(size);
    if ( address ) {
#if IOALLOCDEBUG
		debug_iomalloc_size += size;
#endif
		IOStatisticsAlloc(kIOStatisticsMalloc, size);
    }

    return address;
}
예제 #7
0
static void 
iopa_free(uintptr_t addr, vm_size_t bytes)
{
    io_pagealloc_t * pa;
    uint32_t         count;
    uintptr_t        chunk;

    if (!bytes) bytes = 1;

    chunk = (addr & page_mask);
    assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));

    pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
    assert(kIOPageAllocSignature == pa->signature);

    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
    chunk /= kIOPageAllocChunkBytes;

    IOSimpleLockLock(gIOPageAllocLock);
    if (!pa->avail)
    {
	assert(!pa->link.next);
	enqueue_tail(&gIOPageAllocList, &pa->link);
    }
    pa->avail |= ((-1ULL << (64 - count)) >> chunk);
    if (pa->avail != -2ULL) pa = 0;
    else
    {
        remque(&pa->link);
        pa->link.next = 0;
        pa->signature = 0;
	gIOPageAllocCount--;
    }
    gIOPageAllocBytes -= bytes;
    IOSimpleLockUnlock(gIOPageAllocLock);
    if (pa) iopa_freepage(pa);

#if IOALLOCDEBUG
    debug_iomalloc_size -= bytes;
#endif
    IOStatisticsAlloc(kIOStatisticsFreeAligned, bytes);
}
예제 #8
0
mach_vm_address_t
IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, 
			                mach_vm_size_t alignment, bool contiguous)
{
    kern_return_t	kr;
    mach_vm_address_t	address;
    mach_vm_address_t	allocationAddress;
    mach_vm_size_t	adjustedSize;
    mach_vm_address_t	alignMask;

    if (size == 0)
	return (0);
    if (alignment == 0) 
        alignment = 1;

    alignMask = alignment - 1;
    adjustedSize = (2 * size) + sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t);

    contiguous = (contiguous && (adjustedSize > page_size))
                   || (alignment > page_size);

    if (contiguous || maxPhys)
    {
        int options = 0;
	vm_offset_t virt;

	adjustedSize = size;
        contiguous = (contiguous && (adjustedSize > page_size))
                           || (alignment > page_size);

	if (!contiguous)
	{
	    if (maxPhys <= 0xFFFFFFFF)
	    {
		maxPhys = 0;
		options |= KMA_LOMEM;
	    }
	    else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage))
	    {
		maxPhys = 0;
	    }
	}
	if (contiguous || maxPhys)
	{
	    kr = kmem_alloc_contig(kernel_map, &virt, size,
				   alignMask, atop(maxPhys), atop(alignMask), 0);
	}
	else
	{
	    kr = kernel_memory_allocate(kernel_map, &virt,
					size, alignMask, options);
	}
	if (KERN_SUCCESS == kr)
	    address = virt;
	else
	    address = 0;
    }
    else
    {
	adjustedSize += alignMask;
        allocationAddress = (mach_vm_address_t) kalloc(adjustedSize);

        if (allocationAddress) {

            address = (allocationAddress + alignMask
                    + (sizeof(mach_vm_size_t) + sizeof(mach_vm_address_t)))
                    & (~alignMask);

            if (atop_32(address) != atop_32(address + size - 1))
                address = round_page(address);

            *((mach_vm_size_t *)(address - sizeof(mach_vm_size_t)
                            - sizeof(mach_vm_address_t))) = adjustedSize;
            *((mach_vm_address_t *)(address - sizeof(mach_vm_address_t)))
                            = allocationAddress;
	} else
	    address = 0;
    }

    if (address) {
    IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
#if IOALLOCDEBUG
	debug_iomalloc_size += size;
#endif
    }

    return (address);
}
예제 #9
0
void * IOMallocAligned(vm_size_t size, vm_size_t alignment)
{
    kern_return_t	kr;
    vm_offset_t		address;
    vm_offset_t		allocationAddress;
    vm_size_t		adjustedSize;
    uintptr_t		alignMask;

    if (size == 0)
        return 0;
    if (alignment == 0) 
        alignment = 1;

    alignMask = alignment - 1;
    adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t);

    if (size > adjustedSize) {
	    address = 0;    /* overflow detected */
    }
    else if (adjustedSize >= page_size) {

        kr = kernel_memory_allocate(kernel_map, &address,
					size, alignMask, 0);
	if (KERN_SUCCESS != kr)
	    address = 0;

    } else {

	adjustedSize += alignMask;

	if (adjustedSize >= page_size) {

	    kr = kernel_memory_allocate(kernel_map, &allocationAddress,
					    adjustedSize, 0, 0);
	    if (KERN_SUCCESS != kr)
		allocationAddress = 0;

	} else
	    allocationAddress = (vm_address_t) kalloc(adjustedSize);

        if (allocationAddress) {
            address = (allocationAddress + alignMask
                    + (sizeof(vm_size_t) + sizeof(vm_address_t)))
                    & (~alignMask);

            *((vm_size_t *)(address - sizeof(vm_size_t) - sizeof(vm_address_t))) 
			    = adjustedSize;
            *((vm_address_t *)(address - sizeof(vm_address_t)))
                            = allocationAddress;
	} else
	    address = 0;
    }

    assert(0 == (address & alignMask));

    if( address) {
#if IOALLOCDEBUG
		debug_iomalloc_size += size;
#endif
    	IOStatisticsAlloc(kIOStatisticsMallocAligned, size);
	}

    return (void *) address;
}
예제 #10
0
/*
 * free:
 *
 * Free resources
 */
void IOBufferMemoryDescriptor::free()
{
    // Cache all of the relevant information on the stack for use
    // after we call super::free()!
    IOOptionBits     flags         = _flags;
    IOOptionBits     internalFlags = _internalFlags;
    IOOptionBits     options   = _options;
    vm_size_t        size      = _capacity;
    void *           buffer    = _buffer;
    IOMemoryMap *    map       = 0;
    IOAddressRange * range     = _ranges.v64;
    vm_offset_t      alignment = _alignment;

    if (alignment >= page_size)
	size = round_page(size);

    if (reserved)
    {
	map = reserved->map;
        IODelete( reserved, ExpansionData, 1 );
	if (map)
	    map->release();
    }

    /* super::free may unwire - deallocate buffer afterwards */
    super::free();

    if (options & kIOMemoryPageable)
    {
#if IOALLOCDEBUG
	OSAddAtomicLong(-(round_page(size)), &debug_iomallocpageable_size);
#endif
    }
    else if (buffer)
    {
	if (kInternalFlagPageSized & internalFlags) size = round_page(size);

        if (kInternalFlagPhysical & internalFlags)
        {
            IOKernelFreePhysical((mach_vm_address_t) buffer, size);
	}
	else if (kInternalFlagPageAllocated & internalFlags)
	{
	    uintptr_t page;
            page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
	    if (page)
	    {
		kmem_free(kernel_map, page, page_size);
	    }
#if IOALLOCDEBUG
		OSAddAtomic(-size, &debug_iomalloc_size);
#endif
	    IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
	}
        else if (alignment > 1)
	{
            IOFreeAligned(buffer, size);
	}
        else
	{
            IOFree(buffer, size);
	}
    }
    if (range && (kIOMemoryAsReference & flags))
	IODelete(range, IOAddressRange, 1);
}
예제 #11
0
bool IOBufferMemoryDescriptor::initWithPhysicalMask(
				task_t		  inTask,
				IOOptionBits      options,
				mach_vm_size_t    capacity,
				mach_vm_address_t alignment,
				mach_vm_address_t physicalMask)
{
    task_t		  mapTask = NULL;
    vm_map_t 		  vmmap = NULL;
    mach_vm_address_t     highestMask = 0;
    IOOptionBits	  iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
    IODMAMapSpecification mapSpec;
    bool                  mapped = false;
    bool                  needZero;

    if (!capacity) return false;

    _options   	      = options;
    _capacity         = capacity;
    _internalFlags    = 0;
    _internalReserved = 0;
    _buffer	      = 0;

    _ranges.v64 = IONew(IOAddressRange, 1);
    if (!_ranges.v64)
	return (false);
    _ranges.v64->address = 0;
    _ranges.v64->length  = 0;
    //  make sure super::free doesn't dealloc _ranges before super::init
    _flags = kIOMemoryAsReference;

    // Grab IOMD bits from the Buffer MD options
    iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);

    if (!(kIOMemoryMapperNone & options))
    {
	IOMapper::checkForSystemMapper();
	mapped = (0 != IOMapper::gSystem);
    }
    needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));

    if (physicalMask && (alignment <= 1))
    {
	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
	highestMask = (physicalMask | alignment);
	alignment++;
	if (alignment < page_size)
            alignment = page_size;
    }

    if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
	alignment = page_size;

    if (alignment >= page_size)
	capacity = round_page(capacity);

    if (alignment > page_size)
	options |= kIOMemoryPhysicallyContiguous;

    _alignment = alignment;

    if ((capacity + alignment) < _capacity) return (false);

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
	return false;

    bzero(&mapSpec, sizeof(mapSpec));
    mapSpec.alignment      = _alignment;
    mapSpec.numAddressBits = 64;
    if (highestMask && mapped)
    {
	if (highestMask <= 0xFFFFFFFF)
	    mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
	else
	    mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
	highestMask = 0;
    }

    // set memory entry cache mode, pageable, purgeable
    iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
    if (options & kIOMemoryPageable)
    {
	iomdOptions |= kIOMemoryBufferPageable;
	if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
    }
    else
    {
	vmmap = kernel_map;

	// Buffer shouldn't auto prepare they should be prepared explicitly
	// But it never was enforced so what are you going to do?
	iomdOptions |= kIOMemoryAutoPrepare;

	/* Allocate a wired-down buffer inside kernel space. */

	bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));

	if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
	{
	    contig |= (!mapped);
	    contig |= (0 != (kIOMemoryMapperNone & options));
#if 0
	    // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
	    contig |= true;
#endif
	}

	if (contig || highestMask || (alignment > page_size))
	{
            _internalFlags |= kInternalFlagPhysical;
            if (highestMask)
            {
                _internalFlags |= kInternalFlagPageSized;
                capacity = round_page(capacity);
            }
            _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
            				capacity, highestMask, alignment, contig);
	}
	else if (needZero
		  && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
	{
            _internalFlags |= kInternalFlagPageAllocated;
            needZero        = false;
            _buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
	    if (_buffer)
	    {
		IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
#if IOALLOCDEBUG
		OSAddAtomic(capacity, &debug_iomalloc_size);
#endif
	    }
	}
	else if (alignment > 1)
	{
            _buffer = IOMallocAligned(capacity, alignment);
	}
	else
	{
            _buffer = IOMalloc(capacity);
	}
	if (!_buffer)
	{
            return false;
	}
	if (needZero) bzero(_buffer, capacity);
    }

    if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
	vm_size_t	size = round_page(capacity);

	// initWithOptions will create memory entry
	iomdOptions |= kIOMemoryPersistent;

	if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
	    OSAddAtomicLong(size, &debug_iomallocpageable_size);
#endif
	    mapTask = inTask;
	    if (NULL == inTask)
		inTask = kernel_task;
	}
	else if (options & kIOMapCacheMask)
	{
	    // Prefetch each page to put entries into the pmap
	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;

	    while (startAddr < endAddr)
	    {
		UInt8 dummyVar = *startAddr;
		(void) dummyVar;
		startAddr += page_size;
 	    }
	}
    }

    _ranges.v64->address = (mach_vm_address_t) _buffer;;
    _ranges.v64->length  = _capacity;

    if (!super::initWithOptions(_ranges.v64, 1, 0,
				inTask, iomdOptions, /* System mapper */ 0))
	return false;

    // give any system mapper the allocation params
    if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, 
    						&mapSpec, sizeof(mapSpec)))
	return false;

    if (mapTask)
    {
	if (!reserved) {
	    reserved = IONew( ExpansionData, 1 );
	    if( !reserved)
		return( false );
	}
	reserved->map = createMappingInTask(mapTask, 0, 
			    kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
	if (!reserved->map)
	{
	    _buffer = 0;
	    return( false );
	}
	release();	    // map took a retain on this
	reserved->map->retain();
	removeMapping(reserved->map);
	mach_vm_address_t buffer = reserved->map->getAddress();
	_buffer = (void *) buffer;
	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
	    _ranges.v64->address = buffer;
    }

    setLength(_capacity);

    return true;
}