Example #1
0
bool AppleLVMGroup::resizeSet(UInt32 newMemberCount)
{
    UInt32 oldMemberCount = arMemberCount;

    UInt64 * oldBlockCounts = arMemberBlockCounts;
    arMemberBlockCounts = IONew(UInt64, newMemberCount);
    bzero(arMemberBlockCounts, sizeof(UInt64) * newMemberCount);
    if (oldBlockCounts) {
	bcopy(oldBlockCounts, arMemberBlockCounts, sizeof(UInt64) * oldMemberCount);
	IODelete(oldBlockCounts, sizeof(UInt64), oldMemberCount);
    }

    UInt64 * oldStartingOffset = arMemberStartingOffset;
    arMemberStartingOffset = IONew(UInt64, newMemberCount);
    bzero(arMemberStartingOffset, sizeof(UInt64) * newMemberCount);
    if (oldStartingOffset) {
	bcopy(oldStartingOffset, arMemberStartingOffset, sizeof(UInt64) * oldMemberCount);
	IODelete(oldStartingOffset, sizeof(UInt64), oldMemberCount);
    }

    AppleLVMVolume ** oldMetaDataVolumes = arMetaDataVolumes;
    arMetaDataVolumes = IONew(AppleLVMVolume *, newMemberCount);
    bzero(arMetaDataVolumes, sizeof(AppleLVMVolume *) * newMemberCount);
    if (oldMetaDataVolumes) {
	bcopy(oldMetaDataVolumes, arMetaDataVolumes, sizeof(AppleLVMVolume *) * oldMemberCount);
	IODelete(oldMetaDataVolumes, sizeof(AppleLVMVolume *), oldMemberCount);
    }

    if (super::resizeSet(newMemberCount) == false) return false;

    if (oldMemberCount && arMemberCount > oldMemberCount) arExpectingLiveAdd += arMemberCount - oldMemberCount;

    return true;
}
Example #2
0
bool IOEventSource::init(OSObject *inOwner,
                         Action inAction)
{
    if (!inOwner)
        return false;

    owner = inOwner;

    if ( !super::init() )
        return false;

    (void) setAction(inAction);
    enabled = true;

    if(!reserved) {
        reserved = IONew(ExpansionData, 1);
        if (!reserved) {
            return false;
        }
    }

    IOStatisticsRegisterCounter();
		
    return true;
}
Example #3
0
File: IOLib.cpp Project: Prajna/xnu
void * IOMallocContiguous(vm_size_t size, vm_size_t alignment,
			   IOPhysicalAddress * physicalAddress)
{
    mach_vm_address_t	address = 0;

    if (size == 0)
	return 0;
    if (alignment == 0) 
	alignment = 1;

    /* Do we want a physical address? */
    if (!physicalAddress)
    {
	address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true);
    }
    else do
    {
	IOBufferMemoryDescriptor * bmd;
	mach_vm_address_t          physicalMask;
	vm_offset_t		   alignMask;

	alignMask = alignment - 1;
	physicalMask = (0xFFFFFFFF ^ alignMask);

	bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(
		kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask);
	if (!bmd)
	    break;
	
	_IOMallocContiguousEntry *
	entry = IONew(_IOMallocContiguousEntry, 1);
	if (!entry)
	{
	    bmd->release();
	    break;
	}
	entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy();
	entry->md          = bmd;
	lck_mtx_lock(gIOMallocContiguousEntriesLock);
	queue_enter( &gIOMallocContiguousEntries, entry, 
		    _IOMallocContiguousEntry *, link );
	lck_mtx_unlock(gIOMallocContiguousEntriesLock);

	address          = (mach_vm_address_t) entry->virtualAddr;
	*physicalAddress = bmd->getPhysicalAddress();
    }
    while (false);

	if (address) {
	    IOStatisticsAlloc(kIOStatisticsMallocContiguous, size);
    }

    return (void *) address;
}
bool AppleRAIDMirrorSet::resizeSet(UInt32 newMemberCount)
{
    UInt32 oldMemberCount = arMemberCount;

    // if downsizing, just hold on to the extra space
    if (arLastAllocCount < newMemberCount) {
        if (arLastSeek) IODelete(arLastSeek, UInt64, arLastAllocCount);
        arLastSeek = IONew(UInt64, newMemberCount);
        if (!arLastSeek) return false;

        if (arSkippedIOCount) IODelete(arSkippedIOCount, UInt64, arLastAllocCount);
        arSkippedIOCount = IONew(UInt64, newMemberCount);
        if (!arSkippedIOCount) return false;
    }
    bzero(arLastSeek, sizeof(UInt64) * newMemberCount);
    bzero(arSkippedIOCount, sizeof(UInt64) * newMemberCount);

    if (super::resizeSet(newMemberCount) == false) return false;

    if (oldMemberCount && arMemberCount > oldMemberCount) arExpectingLiveAdd += arMemberCount - oldMemberCount;

    return true;
}
bool IOHIDInterface::init( OSDictionary * dictionary )
{
    if ( !super::init(dictionary) )
        return false;

    _reserved = IONew( ExpansionData, 1 );

    if (!_reserved)
        return false;
		
	bzero(_reserved, sizeof(ExpansionData));
            
        
    bzero(_maxReportSize, sizeof(IOByteCount) * kIOHIDReportTypeCount);
        
    return true;
}
Example #6
0
IORecursiveLock * IORecursiveLockAlloc( void )
{
    _IORecursiveLock * lock;

    lock = IONew( _IORecursiveLock, 1);
    if( !lock)
        return( 0 );

    lock->mutex = mutex_alloc(ETAP_IO_AHA);
    if( lock->mutex) {
        lock->thread = 0;
        lock->count  = 0;
    } else {
        IODelete( lock, _IORecursiveLock, 1);
        lock = 0;
    }

    return( (IORecursiveLock *) lock );
}
Example #7
0
IOWorkLoop *
IOWorkLoop::workLoopWithOptions(IOOptionBits options)
{
    IOWorkLoop *me = new IOWorkLoop;

    if (me && options) {
	me->reserved = IONew(ExpansionData, 1);
	if (!me->reserved) {
	    me->release();
	    return 0;
	}
	me->reserved->options = options;
    }

    if (me && !me->init()) {
        me->release();
        return 0;
    }

    return me;
}
Example #8
0
// allocate element at index
bool IORangeAllocator::allocElement( UInt32 index )
{
    UInt32			newCapacity;
    IORangeAllocatorElement *	newElements;

    if( ((numElements == capacity) && capacityIncrement)
     || (!elements)) {

	newCapacity = capacity + capacityIncrement;
	newElements = IONew( IORangeAllocatorElement, newCapacity );
	if( !newElements)
	    return( false );

	if( elements) {
	    bcopy( elements,
		   newElements,
		   index * sizeof( IORangeAllocatorElement));
	    bcopy( elements + index,
		   newElements + index + 1,
		   (numElements - index) * sizeof( IORangeAllocatorElement));

	    IODelete( elements, IORangeAllocatorElement, capacity );
	}

	elements = newElements;
	capacity = newCapacity;

    } else {

	bcopy( elements + index,
	       elements + index + 1,
	       (numElements - index) * sizeof( IORangeAllocatorElement));
    }
    numElements++;

    return( true );
}
Example #9
0
bool IOWorkLoop::init()
{
    // The super init and gateLock allocation MUST be done first.
    if ( !super::init() )
        return false;
	
	// Allocate our ExpansionData if it hasn't been allocated already.
	if ( !reserved )
	{
		reserved = IONew(ExpansionData,1);
		if ( !reserved )
			return false;
		
		bzero(reserved,sizeof(ExpansionData));
	}
	
#if DEBUG
	OSBacktrace ( reserved->allocationBacktrace, sizeof ( reserved->allocationBacktrace ) / sizeof ( reserved->allocationBacktrace[0] ) );
#endif
	
    if ( gateLock == NULL ) {
        if ( !( gateLock = IORecursiveLockAlloc()) )
            return false;
    }
	
    if ( workToDoLock == NULL ) {
        if ( !(workToDoLock = IOSimpleLockAlloc()) )
            return false;
        IOSimpleLockInit(workToDoLock);
        workToDo = false;
    }

    if (!reserved) {
        reserved = IONew(ExpansionData, 1);
        reserved->options = 0;
    }
	
    IOStatisticsRegisterCounter();

    if ( controlG == NULL ) {
        controlG = IOCommandGate::commandGate(
            this,
            OSMemberFunctionCast(
                IOCommandGate::Action,
                this,
                &IOWorkLoop::_maintRequest));

        if ( !controlG )
            return false;
        // Point the controlGate at the workLoop.  Usually addEventSource
        // does this automatically.  The problem is in this case addEventSource
        // uses the control gate and it has to be bootstrapped.
        controlG->setWorkLoop(this);
        if (addEventSource(controlG) != kIOReturnSuccess)
            return false;
    }

    if ( workThread == NULL ) {
        thread_continue_t cptr = OSMemberFunctionCast(
            thread_continue_t,
            this,
            &IOWorkLoop::threadMain);
        if (KERN_SUCCESS != kernel_thread_start(cptr, this, &workThread))
            return false;
    }

    (void) thread_set_tag(workThread, THREAD_TAG_IOWORKLOOP);
    return true;
}
Example #10
0
bool IOBufferMemoryDescriptor::initWithPhysicalMask(
				task_t		  inTask,
				IOOptionBits      options,
				mach_vm_size_t    capacity,
				mach_vm_address_t alignment,
				mach_vm_address_t physicalMask)
{
    task_t		  mapTask = NULL;
    vm_map_t 		  vmmap = NULL;
    mach_vm_address_t     highestMask = 0;
    IOOptionBits	  iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
    IODMAMapSpecification mapSpec;
    bool                  mapped = false;
    bool                  needZero;

    if (!capacity) return false;

    _options   	      = options;
    _capacity         = capacity;
    _internalFlags    = 0;
    _internalReserved = 0;
    _buffer	      = 0;

    _ranges.v64 = IONew(IOAddressRange, 1);
    if (!_ranges.v64)
	return (false);
    _ranges.v64->address = 0;
    _ranges.v64->length  = 0;
    //  make sure super::free doesn't dealloc _ranges before super::init
    _flags = kIOMemoryAsReference;

    // Grab IOMD bits from the Buffer MD options
    iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);

    if (!(kIOMemoryMapperNone & options))
    {
	IOMapper::checkForSystemMapper();
	mapped = (0 != IOMapper::gSystem);
    }
    needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));

    if (physicalMask && (alignment <= 1))
    {
	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
	highestMask = (physicalMask | alignment);
	alignment++;
	if (alignment < page_size)
            alignment = page_size;
    }

    if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
	alignment = page_size;

    if (alignment >= page_size)
	capacity = round_page(capacity);

    if (alignment > page_size)
	options |= kIOMemoryPhysicallyContiguous;

    _alignment = alignment;

    if ((capacity + alignment) < _capacity) return (false);

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
	return false;

    bzero(&mapSpec, sizeof(mapSpec));
    mapSpec.alignment      = _alignment;
    mapSpec.numAddressBits = 64;
    if (highestMask && mapped)
    {
	if (highestMask <= 0xFFFFFFFF)
	    mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
	else
	    mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
	highestMask = 0;
    }

    // set memory entry cache mode, pageable, purgeable
    iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
    if (options & kIOMemoryPageable)
    {
	iomdOptions |= kIOMemoryBufferPageable;
	if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
    }
    else
    {
	vmmap = kernel_map;

	// Buffer shouldn't auto prepare they should be prepared explicitly
	// But it never was enforced so what are you going to do?
	iomdOptions |= kIOMemoryAutoPrepare;

	/* Allocate a wired-down buffer inside kernel space. */

	bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));

	if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
	{
	    contig |= (!mapped);
	    contig |= (0 != (kIOMemoryMapperNone & options));
#if 0
	    // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
	    contig |= true;
#endif
	}

	if (contig || highestMask || (alignment > page_size))
	{
            _internalFlags |= kInternalFlagPhysical;
            if (highestMask)
            {
                _internalFlags |= kInternalFlagPageSized;
                capacity = round_page(capacity);
            }
            _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
            				capacity, highestMask, alignment, contig);
	}
	else if (needZero
		  && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
	{
            _internalFlags |= kInternalFlagPageAllocated;
            needZero        = false;
            _buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
	    if (_buffer)
	    {
		IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
#if IOALLOCDEBUG
		OSAddAtomic(capacity, &debug_iomalloc_size);
#endif
	    }
	}
	else if (alignment > 1)
	{
            _buffer = IOMallocAligned(capacity, alignment);
	}
	else
	{
            _buffer = IOMalloc(capacity);
	}
	if (!_buffer)
	{
            return false;
	}
	if (needZero) bzero(_buffer, capacity);
    }

    if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
	vm_size_t	size = round_page(capacity);

	// initWithOptions will create memory entry
	iomdOptions |= kIOMemoryPersistent;

	if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
	    OSAddAtomicLong(size, &debug_iomallocpageable_size);
#endif
	    mapTask = inTask;
	    if (NULL == inTask)
		inTask = kernel_task;
	}
	else if (options & kIOMapCacheMask)
	{
	    // Prefetch each page to put entries into the pmap
	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;

	    while (startAddr < endAddr)
	    {
		UInt8 dummyVar = *startAddr;
		(void) dummyVar;
		startAddr += page_size;
 	    }
	}
    }

    _ranges.v64->address = (mach_vm_address_t) _buffer;;
    _ranges.v64->length  = _capacity;

    if (!super::initWithOptions(_ranges.v64, 1, 0,
				inTask, iomdOptions, /* System mapper */ 0))
	return false;

    // give any system mapper the allocation params
    if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, 
    						&mapSpec, sizeof(mapSpec)))
	return false;

    if (mapTask)
    {
	if (!reserved) {
	    reserved = IONew( ExpansionData, 1 );
	    if( !reserved)
		return( false );
	}
	reserved->map = createMappingInTask(mapTask, 0, 
			    kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
	if (!reserved->map)
	{
	    _buffer = 0;
	    return( false );
	}
	release();	    // map took a retain on this
	reserved->map->retain();
	removeMapping(reserved->map);
	mach_vm_address_t buffer = reserved->map->getAddress();
	_buffer = (void *) buffer;
	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
	    _ranges.v64->address = buffer;
    }

    setLength(_capacity);

    return true;
}
/*
 * Common KeyMap initialization
 */
bool IOHIKeyboardMapper::init(	IOHIKeyboard *delegate,
								const UInt8 *map,
								UInt32 mappingLen,
								bool mappingShouldBeFreed )
{
	_mappingShouldBeFreed		= mappingShouldBeFreed;
	_parsedMapping.mapping		= map;
	_parsedMapping.mappingLen	= mappingLen;

	if (!super::init())	 return false;

	_delegate				  = delegate;

	if (!parseKeyMapping(map, mappingLen, &_parsedMapping))	return false;

//	_hidSystem					= NULL;
//	_stateDirty					= false;

	_reserved = IONew(ExpansionData, 1);
    bzero(_reserved, sizeof(ExpansionData));

//	_ejectTimerEventSource		= 0;
//
//	_f12Eject_State			= 0;
//
//	_eject_Delay_MS			= kEjectF12DelayMS;
//
//	_slowKeys_State			= 0;
//
//	_slowKeys_Delay_MS		= 0;
//
//	_slowKeysTimerEventSource	= 0;

	_specialKeyModifierFlags	= 0;

//	_supportsF12Eject		= 0;

//	_cached_KeyBits			= 0;

	_cachedAlphaLockModDefs = 0;

	// If there are right hand modifiers defined, set a property
	if (_delegate && (_parsedMapping.maxMod > 0))
	{

		if ( _delegate->doesKeyLock(NX_KEYTYPE_CAPS_LOCK) )
		{
			_delegate->setProperty( kIOHIDKeyboardCapsLockDoesLockKey, kOSBooleanTrue);
			_cachedAlphaLockModDefs = _parsedMapping.modDefs[NX_MODIFIERKEY_ALPHALOCK];
		}
		else
		{
			_delegate->setProperty( kIOHIDKeyboardCapsLockDoesLockKey, kOSBooleanFalse);
		}

		UInt32 supportedModifiers = 0;
		OSNumber * number = 0;

		number = (OSNumber *)_delegate->copyProperty(kIOHIDKeyboardSupportedModifiersKey);

		if (number) supportedModifiers = number->unsigned32BitValue();
		OSSafeReleaseNULL(number);

		_delegate->setProperty( kIOHIDKeyboardSupportedModifiersKey, supportedModifiers, 32 );

		if ( (supportedModifiers & NX_DEVICERSHIFTKEYMASK) ||
			 (supportedModifiers & NX_DEVICERCTLKEYMASK) ||
			 (supportedModifiers & NX_DEVICERALTKEYMASK) ||
			 (supportedModifiers & NX_DEVICERCMDKEYMASK) )
		{
			_delegate->setProperty("HIDKeyboardRightModifierSupport", kOSBooleanTrue);
		}
	}

	if (_parsedMapping.numDefs && _delegate)
	{
		_delegate->setProperty("HIDKeyboardKeysDefined", kOSBooleanTrue);

	}

	if ( !_delegate->doesKeyLock(NX_KEYTYPE_CAPS_LOCK) )
	{
		UInt32 myFlags = _delegate->deviceFlags();

		if ( _delegate->alphaLock() )
		{
			_specialKeyModifierFlags	|= NX_ALPHASHIFTMASK;
			myFlags						|= NX_ALPHASHIFTMASK;

			_delegate->IOHIKeyboard::setDeviceFlags(myFlags);
		}
		else
		{
			_specialKeyModifierFlags	&= ~NX_ALPHASHIFTMASK;
			myFlags						&= ~NX_ALPHASHIFTMASK;

			_delegate->IOHIKeyboard::setDeviceFlags(myFlags);
		}
	}

    return true;
}
bool IOBufferMemoryDescriptor::initWithOptions(
    IOOptionBits options,
    vm_size_t    capacity,
    vm_offset_t  alignment,
    task_t	    inTask)
{
    vm_map_t map = 0;
    IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;

    if (!capacity)
        return false;

    _options   	  = options;
    _capacity     = capacity;
    _physAddrs    = 0;
    _physSegCount = 0;
    _buffer	  = 0;

    // Grab the direction and the Auto Prepare bits from the Buffer MD options
    iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);

    if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
        alignment = page_size;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;

    _alignment = alignment;
    if (options & kIOMemoryPageable)
    {
        iomdOptions |= kIOMemoryBufferPageable;
        if (inTask == kernel_task)
        {
            /* Allocate some kernel address space. */
            _buffer = IOMallocPageable(capacity, alignment);
            if (_buffer)
                map = IOPageableMapForAddress((vm_address_t) _buffer);
        }
        else
        {
            kern_return_t kr;

            if( !reserved) {
                reserved = IONew( ExpansionData, 1 );
                if( !reserved)
                    return( false );
            }
            map = get_task_map(inTask);
            vm_map_reference(map);
            reserved->map = map;
            kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
                              VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
            if( KERN_SUCCESS != kr)
                return( false );

            // we have to make sure that these pages don't get copied on fork.
            kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
            if( KERN_SUCCESS != kr)
                return( false );
        }
    }
    else
    {
        // @@@ gvdl: Need to remove this
        // Buffer should never auto prepare they should be prepared explicitly
        // But it never was enforced so what are you going to do?
        iomdOptions |= kIOMemoryAutoPrepare;

        /* Allocate a wired-down buffer inside kernel space. */
        if (options & kIOMemoryPhysicallyContiguous)
            _buffer = IOMallocContiguous(capacity, alignment, 0);
        else if (alignment > 1)
            _buffer = IOMallocAligned(capacity, alignment);
        else
            _buffer = IOMalloc(capacity);
    }

    if (!_buffer)
        return false;

    _singleRange.v.address = (vm_address_t) _buffer;
    _singleRange.v.length  = capacity;

    if (!super::initWithOptions(&_singleRange.v, 1, 0,
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;

    if (options & kIOMemoryPageable) {
        kern_return_t kr;
        ipc_port_t sharedMem = (ipc_port_t) _memEntry;
        vm_size_t size = round_page_32(_ranges.v[0].length);

        // must create the entry before any pages are allocated
        if( 0 == sharedMem) {

            // set memory entry cache
            vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
            switch (options & kIOMapCacheMask)
            {
            case kIOMapInhibitCache:
                SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
                break;

            case kIOMapWriteThruCache:
                SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
                break;

            case kIOMapWriteCombineCache:
                SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
                break;

            case kIOMapCopybackCache:
                SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
                break;

            case kIOMapDefaultCache:
            default:
                SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
                break;
            }

            kr = mach_make_memory_entry( map,
                                         &size, _ranges.v[0].address,
                                         memEntryCacheMode, &sharedMem,
                                         NULL );

            if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
                ipc_port_release_send( sharedMem );
                kr = kIOReturnVMError;
            }
            if( KERN_SUCCESS != kr)
                sharedMem = 0;
            _memEntry = (void *) sharedMem;
        }
    }

    setLength(capacity);

    return true;
}
bool IOEthernetInterface::init(IONetworkController * controller)
{
    OSObject *  obj;

    _reserved = IONew( ExpansionData, 1 );
	if( _reserved == 0 )
		return false;
	memset(_reserved, 0, sizeof(ExpansionData));
	
    if ( super::init(controller) == false )
    	return false;

	// initialize enet specific fields.
	setInterfaceType(IFT_ETHER);
	setMaxTransferUnit( ETHERMTU );
	setMediaAddressLength( ETHER_ADDR_LEN );
	setMediaHeaderLength( ETHER_HDR_LEN );
	setFlags( IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS,
			  IFF_RUNNING   | IFF_MULTICAST );
	
    // Add an IONetworkData with room to hold an IOEthernetStats structure.
    // This class does not reference the data object created, and no harm
    // is done if the data object is released or replaced.

    IONetworkData * data = IONetworkData::withInternalBuffer(
                                              kIOEthernetStatsKey,
                                              sizeof(IOEthernetStats));
    if (data)
    {
        addNetworkData(data);
        data->release();
    }

    _inputEventThreadCall = thread_call_allocate(
                            handleEthernetInputEvent, this );
    if (!_inputEventThreadCall)
        return false;

    // Create and initialize the filter dictionaries.

    _requiredFilters = OSDictionary::withCapacity(2);
    _activeFilters   = OSDictionary::withCapacity(2);

    if ( (_requiredFilters == 0) || (_activeFilters == 0) )
        return false;

    obj = controller->copyProperty(kIOPacketFilters);
    if (obj && ((_supportedFilters = OSDynamicCast(OSDictionary, obj)) == 0))
        obj->release();
    if (!_supportedFilters)
        return false;

    // Cache the bit mask of wake filters supported by the driver.
    // This value will not change.

    _supportedWakeFilters = GET_SUPPORTED_FILTERS(
                            gIOEthernetWakeOnLANFilterGroup );

    // Retain the Disabled WOL filters OSNumber.
    // Its value will be updated live for link and WOL changed events.

    obj = _supportedFilters->getObject(
            gIOEthernetDisabledWakeOnLANFilterGroup );
    _disabledWakeFilters = OSDynamicCast(OSNumber, obj);
    if (_disabledWakeFilters)
        _disabledWakeFilters->retain();

    // Controller's Unicast (directed) and Broadcast filters should always
    // be enabled. Those bits should never be cleared.

    if ( !SET_REQUIRED_FILTERS( gIONetworkFilterGroup,
                                kIOPacketFilterUnicast |
                                kIOPacketFilterBroadcast )
      || !SET_REQUIRED_FILTERS( gIOEthernetWakeOnLANFilterGroup, 0 )
      || !SET_ACTIVE_FILTERS(   gIONetworkFilterGroup, 0 )
      || !SET_ACTIVE_FILTERS(   gIOEthernetWakeOnLANFilterGroup, 0 ) )
    {
         return false;
    }

    _publishedFeatureID = 0;

    // Publish filter dictionaries to property table.

    setProperty( kIORequiredPacketFilters, _requiredFilters );
    setProperty( kIOActivePacketFilters,   _activeFilters );

    return true;
}
static 
IOReturn _CreateID(queue_head_t * taskList, IOOptionBits options,
                    IOAccelID requestedID, IOAccelID * idOut)
{
    IOReturn          err;
    Boolean           found;
    IOAccelIDRecord * record;
    IOAccelIDRecord * dup;

    record = IONew(IOAccelIDRecord, 1);
    record->retain = 1;

    IOLockLock(gLock);

    gTotalCount++;

    do
    {
        if (kIOAccelSpecificID & options)
        {
            if ((requestedID > 4095) || (requestedID < -4096))
            {
                err = kIOReturnExclusiveAccess;
                break;
            }
    
            found = false;
            queue_iterate(&gGlobalList,
                            dup,
                            IOAccelIDRecord *,
                            glob_link)
            {
                found = (dup->id == requestedID);
                if (found)
                    break;
            }
    
            if (found)
            {
                err = kIOReturnExclusiveAccess;
                break;
            }
    
            record->id = requestedID;
        }
        else
        {
            record->id = ((IOAccelID) (intptr_t) record) ^ (kTweakBits & gTweak++);
        }

        if (taskList)
        {
            queue_enter(taskList, record,
                            IOAccelIDRecord *, task_link);
        }
        else
            record->task_link.next = 0;

        queue_enter(&gGlobalList, record,
                        IOAccelIDRecord *, glob_link);

        *idOut = record->id;
        err = kIOReturnSuccess;
    }
Example #15
0
bool IOBufferMemoryDescriptor::initWithPhysicalMask(
				task_t		  inTask,
				IOOptionBits      options,
				mach_vm_size_t    capacity,
				mach_vm_address_t alignment,
				mach_vm_address_t physicalMask)
{
    kern_return_t 	  kr;
    task_t		  mapTask = NULL;
    vm_map_t 		  vmmap = NULL;
    mach_vm_address_t     highestMask = 0;
    IOOptionBits	  iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
    IODMAMapSpecification mapSpec;
    bool                  mapped = false;
    bool                  needZero;

    if (!capacity)
        return false;

    _options   	      = options;
    _capacity         = capacity;
    _internalFlags    = 0;
    _internalReserved = 0;
    _buffer	      = 0;

    _ranges.v64 = IONew(IOAddressRange, 1);
    if (!_ranges.v64)
	return (false);
    _ranges.v64->address = 0;
    _ranges.v64->length  = 0;
    //  make sure super::free doesn't dealloc _ranges before super::init
    _flags = kIOMemoryAsReference;

    // Grab IOMD bits from the Buffer MD options
    iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);

    if (!(kIOMemoryMapperNone & options))
    {
	IOMapper::checkForSystemMapper();
	mapped = (0 != IOMapper::gSystem);
    }
    needZero = mapped;

    if (physicalMask && (alignment <= 1))
    {
	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
	highestMask = (physicalMask | alignment);
	alignment++;
	if (alignment < page_size)
            alignment = page_size;
    }

    if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
	alignment = page_size;

    if (alignment >= page_size)
	capacity = round_page(capacity);

    if (alignment > page_size)
	options |= kIOMemoryPhysicallyContiguous;

    _alignment = alignment;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
	return false;

    bzero(&mapSpec, sizeof(mapSpec));
    mapSpec.alignment      = _alignment;
    mapSpec.numAddressBits = 64;
    if (highestMask && mapped)
    {
	if (highestMask <= 0xFFFFFFFF)
	    mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
	else
	    mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
	highestMask = 0;
    }

    // set flags for entry + object create
    vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;

    // set memory entry cache mode
    switch (options & kIOMapCacheMask)
    {
	case kIOMapInhibitCache:
	    SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
	    break;

	case kIOMapWriteThruCache:
	    SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
	    break;

	case kIOMapWriteCombineCache:
	    SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
	    break;

	case kIOMapCopybackCache:
	    SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
	    break;

	case kIOMapCopybackInnerCache:
	    SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
	    break;

	case kIOMapDefaultCache:
	default:
	    SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
	    break;
    }

    if (options & kIOMemoryPageable)
    {
	iomdOptions |= kIOMemoryBufferPageable;

	// must create the entry before any pages are allocated

	// set flags for entry + object create
	memEntryCacheMode |= MAP_MEM_NAMED_CREATE;

	if (options & kIOMemoryPurgeable)
	    memEntryCacheMode |= MAP_MEM_PURGABLE;
    }
    else
    {
	memEntryCacheMode |= MAP_MEM_NAMED_REUSE;
	vmmap = kernel_map;

	// Buffer shouldn't auto prepare they should be prepared explicitly
	// But it never was enforced so what are you going to do?
	iomdOptions |= kIOMemoryAutoPrepare;

	/* Allocate a wired-down buffer inside kernel space. */

	bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));

	if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
	{
	    contig |= (!mapped);
	    contig |= (0 != (kIOMemoryMapperNone & options));
#if 0
	    // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
	    contig |= true;
#endif
	}

	if (contig || highestMask || (alignment > page_size))
	{
            _internalFlags |= kInternalFlagPhysical;
            if (highestMask)
            {
                _internalFlags |= kInternalFlagPageSized;
                capacity = round_page(capacity);
            }
            _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
            				capacity, highestMask, alignment, contig);
	}
	else if (needZero
		  && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
	{
            _internalFlags |= kInternalFlagPageAllocated;
            needZero        = false;
            _buffer         = (void *) iopa_alloc(capacity, alignment);
	}
	else if (alignment > 1)
	{
            _buffer = IOMallocAligned(capacity, alignment);
	}
	else
	{
            _buffer = IOMalloc(capacity);
	}
	if (!_buffer)
	{
            return false;
	}
	if (needZero) bzero(_buffer, capacity);
    }

    if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
	ipc_port_t	sharedMem;
	vm_size_t	size = round_page(capacity);

	kr = mach_make_memory_entry(vmmap,
				    &size, (vm_offset_t)_buffer,
				    memEntryCacheMode, &sharedMem,
				    NULL );

	if( (KERN_SUCCESS == kr) && (size != round_page(capacity))) {
	    ipc_port_release_send( sharedMem );
	    kr = kIOReturnVMError;
	}
	if( KERN_SUCCESS != kr)
	    return( false );

	_memEntry = (void *) sharedMem;

	if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
	    debug_iomallocpageable_size += size;
#endif
	    mapTask = inTask;
	    if (NULL == inTask)
		inTask = kernel_task;
	}
	else if (options & kIOMapCacheMask)
	{
	    // Prefetch each page to put entries into the pmap
	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;

	    while (startAddr < endAddr)
	    {
		*startAddr;
		startAddr += page_size;
 	    }
	}
    }

    _ranges.v64->address = (mach_vm_address_t) _buffer;;
    _ranges.v64->length  = _capacity;

    if (!super::initWithOptions(_ranges.v64, 1, 0,
				inTask, iomdOptions, /* System mapper */ 0))
	return false;

    // give any system mapper the allocation params
    if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, 
    						&mapSpec, sizeof(mapSpec)))
	return false;

    if (mapTask)
    {
	if (!reserved) {
	    reserved = IONew( ExpansionData, 1 );
	    if( !reserved)
		return( false );
	}
	reserved->map = createMappingInTask(mapTask, 0, 
			    kIOMapAnywhere | (options & kIOMapCacheMask), 0, 0);
	if (!reserved->map)
	{
	    _buffer = 0;
	    return( false );
	}
	release();	    // map took a retain on this
	reserved->map->retain();
	removeMapping(reserved->map);
	mach_vm_address_t buffer = reserved->map->getAddress();
	_buffer = (void *) buffer;
	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
	    _ranges.v64->address = buffer;
    }

    setLength(_capacity);

    return true;
}
bool RadeonController::start( IOService * provider )
{
	if (!super::start(provider)) return false;
	
	device = OSDynamicCast(IOPCIDevice, provider);
	if (device == NULL) return false;
	
	//get user options
	OSBoolean *prop;
	
	OSDictionary *dict = OSDynamicCast(OSDictionary, getProperty("UserOptions"));
	
	bzero(&options, sizeof(UserOptions));
	options.HWCursorSupport = FALSE;
	options.enableGammaTable = FALSE;
	options.enableOSXI2C = FALSE;
	
	options.lowPowerMode = FALSE;
	if (dict) {
		prop = OSDynamicCast(OSBoolean, dict->getObject("enableHWCursor"));
		if (prop) options.HWCursorSupport = prop->getValue();
		prop = OSDynamicCast(OSBoolean, dict->getObject("debugMode"));
		if (prop) options.debugMode = prop->getValue();
		if (options.debugMode) options.HWCursorSupport = FALSE;
		prop = OSDynamicCast(OSBoolean, dict->getObject("enableGammaTable"));
		if (prop) options.enableGammaTable = prop->getValue();
		prop = OSDynamicCast(OSBoolean, dict->getObject("lowPowerMode"));
		if (prop) options.lowPowerMode = prop->getValue();
	}
	options.verbosity = 1;
#ifdef DEBUG
	if (0 == getRegistryRoot()->getProperty("RadeonDumpReady")) {
		getRegistryRoot()->setProperty("RadeonDumpReady", kOSBooleanTrue);
		DumpMsg.mVerbose = 1;
		DumpMsg.client = 1;
		DumpMsg.mMsgBufferSize = 65535;
		if (dict) {
			OSNumber *optionNum;
			optionNum = OSDynamicCast(OSNumber, dict->getObject("verboseLevel"));
			if (optionNum) DumpMsg.mVerbose = optionNum->unsigned32BitValue();
			optionNum = OSDynamicCast(OSNumber, dict->getObject("MsgBufferSize"));
			if (optionNum) DumpMsg.mMsgBufferSize = max(65535, optionNum->unsigned32BitValue());
		}	
		DumpMsg.mMsgBufferEnabled = false;
		DumpMsg.mMsgBufferPos = 0;
		DumpMsg.mMessageLock = IOLockAlloc();
		DumpMsg.mMsgBuffer = (char *) IOMalloc(DumpMsg.mMsgBufferSize);
		if (!DumpMsg.mMsgBuffer) {
			IOLog("error: couldn't allocate message buffer (%ld bytes)\n", DumpMsg.mMsgBufferSize);
			return false;
		}
		enableMsgBuffer(true);
	} else DumpMsg.client += 1;
	options.verbosity = DumpMsg.mVerbose;
#endif
	
	device->setMemoryEnable(true);
	IOMap = device->mapDeviceMemoryWithRegister( kIOPCIConfigBaseAddress2 );
	if (IOMap == NULL) return false;
	FBMap = device->mapDeviceMemoryWithRegister( kIOPCIConfigBaseAddress0 );
	if (FBMap == NULL) return false;
	memoryMap.MMIOBase = (pointer) IOMap->getVirtualAddress();
	memoryMap.MMIOMapSize = IOMap->getLength();
	memoryMap.FbBase = (pointer) FBMap->getVirtualAddress();
	memoryMap.FbMapSize = FBMap->getLength();
	memoryMap.FbPhysBase = (unsigned long)FBMap->getPhysicalAddress();
	memoryMap.bitsPerPixel = 32;
	memoryMap.bitsPerComponent = 8;
	memoryMap.colorFormat = 0;	//0 for non-64 bit
	
	memoryMap.BIOSCopy = NULL;
	memoryMap.BIOSLength = 0;
	
	IOMemoryDescriptor * mem;
	mem = IOMemoryDescriptor::withPhysicalAddress((IOPhysicalAddress) RHD_VBIOS_BASE, RHD_VBIOS_SIZE, kIODirectionOut);
	if (mem) {
		memoryMap.BIOSCopy = (unsigned char *)IOMalloc(RHD_VBIOS_SIZE);
		if (memoryMap.BIOSCopy) {
			mem->prepare(kIODirectionOut);
			if (!(memoryMap.BIOSLength = mem->readBytes(0, memoryMap.BIOSCopy, RHD_VBIOS_SIZE))) {
				LOG("Cannot read BIOS image\n");
				memoryMap.BIOSLength = 0;
			}
			if ((unsigned int)memoryMap.BIOSLength != RHD_VBIOS_SIZE)
				LOG("Read only %d of %d bytes of BIOS image\n", memoryMap.BIOSLength, RHD_VBIOS_SIZE);
			mem->complete(kIODirectionOut);
		}
	}

	if (dict) {
		const char typeKey[2][8] = {"@0,TYPE", "@1,TYPE"};
		const char EDIDKey[2][8] = {"@0,EDID", "@1,EDID"};
		const char fixedModesKey[2][17] = {"@0,UseFixedModes", "@1,UseFixedModes"};
		OSString *type;
		OSData *edidData;
		OSBoolean *boolData;
		int i;
		for (i = 0;i < 2;i++) {
			type = OSDynamicCast(OSString, dict->getObject(typeKey[i]));
			if (!type) continue;
			edidData = OSDynamicCast(OSData, dict->getObject(EDIDKey[i]));
			if (edidData == NULL) continue;
			options.EDID_Block[i] = (unsigned char *)IOMalloc(edidData->getLength());
			if (options.EDID_Block[i] == NULL) continue;
			strncpy(options.outputTypes[i], type->getCStringNoCopy(), outputTypeLength);
			bcopy(edidData->getBytesNoCopy(), options.EDID_Block[i], edidData->getLength());
			options.EDID_Length[i] = edidData->getLength();
			
			boolData = OSDynamicCast(OSBoolean, dict->getObject(fixedModesKey[i]));
			if (boolData) options.UseFixedModes[i] = boolData->getValue();
		}
	}
		
	xf86Screens[0] = IONew(ScrnInfoRec, 1);	//using global variable, will change it later
	ScrnInfoPtr pScrn = xf86Screens[0];
	if (pScrn == NULL) return false;
	bzero(pScrn, sizeof(ScrnInfoRec));
	MAKE_REG_ENTRY(&nub, device);
	pciRec.chipType = device->configRead16(kIOPCIConfigDeviceID);
	pciRec.subsysVendor = device->configRead16(kIOPCIConfigSubSystemVendorID);
	pciRec.subsysCard = device->configRead16(kIOPCIConfigSubSystemID);
	pciRec.biosSize = 16;	//RHD_VBIOS_SIZE = 1 << 16
	pScrn->PciTag = &nub;
	pScrn->PciInfo = &pciRec;
	pScrn->options = &options;
	pScrn->memPhysBase = (unsigned long)FBMap->getPhysicalAddress();
	pScrn->fbOffset = 0;	//scanout offset
	pScrn->bitsPerPixel = 32;
	pScrn->bitsPerComponent = 8;
	pScrn->colorFormat = 0;
	pScrn->depth = pScrn->bitsPerPixel;
	pScrn->memoryMap = &memoryMap;
	
	
	createNubs(provider);
	
	setModel(device);
	
	return true;
}
Example #17
0
IOReturn
IOPolledFileOpen(const char * filename,
                 uint64_t setFileSize, uint64_t fsFreeSize,
                 void * write_file_addr, size_t write_file_len,
                 IOPolledFileIOVars ** fileVars,
                 OSData ** imagePath,
                 uint8_t * volumeCryptKey, size_t keySize)
{
    IOReturn             err = kIOReturnSuccess;
    IOPolledFileIOVars * vars;
    _OpenFileContext     ctx;
    OSData *             extentsData;
    OSNumber *           num;
    IOService *          part = 0;
    dev_t                block_dev;
    dev_t                image_dev;
    AbsoluteTime         startTime, endTime;
    uint64_t             nsec;

    vars = IONew(IOPolledFileIOVars, 1);
    if (!vars) return (kIOReturnNoMemory);
    bzero(vars, sizeof(*vars));
    vars->allocated = true;

    do
    {
        extentsData = OSData::withCapacity(32);
        ctx.extents = extentsData;
        ctx.size    = 0;
        clock_get_uptime(&startTime);

        vars->fileRef = kern_open_file_for_direct_io(filename,
                        (write_file_addr != NULL) || (0 != setFileSize),
                        &file_extent_callback, &ctx,
                        setFileSize,
                        fsFreeSize,
                        // write file:
                        0, write_file_addr, write_file_len,
                        // results
                        &block_dev,
                        &image_dev,
                        &vars->block0,
                        &vars->maxiobytes,
                        &vars->flags);
#if 0
        uint32_t msDelay = (131071 & random());
        HIBLOG("sleep %d\n", msDelay);
        IOSleep(msDelay);
#endif
        clock_get_uptime(&endTime);
        SUB_ABSOLUTETIME(&endTime, &startTime);
        absolutetime_to_nanoseconds(endTime, &nsec);

        if (!vars->fileRef) err = kIOReturnNoSpace;

        HIBLOG("kern_open_file_for_direct_io took %qd ms\n", nsec / 1000000ULL);
        if (kIOReturnSuccess != err) break;

        HIBLOG("Opened file %s, size %qd, extents %ld, maxio %qx ssd %d\n", filename, ctx.size,
               (extentsData->getLength() / sizeof(IOPolledFileExtent)) - 1,
               vars->maxiobytes, kIOPolledFileSSD & vars->flags);
        assert(!vars->block0);
        if (extentsData->getLength() < sizeof(IOPolledFileExtent))
        {
            err = kIOReturnNoSpace;
            break;
        }

        vars->fileSize = ctx.size;
        vars->extentMap = (IOPolledFileExtent *) extentsData->getBytesNoCopy();

        part = IOCopyMediaForDev(image_dev);
        if (!part)
        {
            err = kIOReturnNotFound;
            break;
        }

        if (!(vars->pollers = IOPolledFilePollers::copyPollers(part))) break;

        if ((num = OSDynamicCast(OSNumber, part->getProperty(kIOMediaPreferredBlockSizeKey))))
            vars->blockSize = num->unsigned32BitValue();
        if (vars->blockSize < 4096) vars->blockSize = 4096;

        HIBLOG("polled file major %d, minor %d, blocksize %ld, pollers %d\n",
               major(image_dev), minor(image_dev), (long)vars->blockSize,
               vars->pollers->pollers->getCount());

        OSString * keyUUID = NULL;
        if (volumeCryptKey)
        {
            err = IOGetVolumeCryptKey(block_dev, &keyUUID, volumeCryptKey, keySize);
        }

        *fileVars    = vars;
        vars->fileExtents = extentsData;

        // make imagePath
        OSData * data;
        if (imagePath)
        {
#if defined(__i386__) || defined(__x86_64__)
            char str2[24 + sizeof(uuid_string_t) + 2];

            if (keyUUID)
                snprintf(str2, sizeof(str2), "%qx:%s",
                         vars->extentMap[0].start, keyUUID->getCStringNoCopy());
            else
                snprintf(str2, sizeof(str2), "%qx", vars->extentMap[0].start);

            err = IOService::getPlatform()->callPlatformFunction(
                      gIOCreateEFIDevicePathSymbol, false,
                      (void *) part, (void *) str2,
                      (void *) (uintptr_t) true, (void *) &data);
#else
            data = 0;
            err = kIOReturnSuccess;
#endif
            if (kIOReturnSuccess != err)
            {
                HIBLOG("error 0x%x getting path\n", err);
                break;
            }
            *imagePath = data;
        }
    }
    while (false);

    if (kIOReturnSuccess != err)
    {
        HIBLOG("error 0x%x opening polled file\n", err);
        IOPolledFileClose(&vars, 0, 0, 0, 0, 0);
    }

    if (part) part->release();

    return (err);
}