Example #1
0
kernel_vars_t *
alloc_kvar_pages( void )
{
	IOPhysicalAddress phys;
	kernel_vars_t *kv = IOMallocContiguous( NUM_KVARS_PAGES * 0x1000, 0x1000, &phys );
	return kv;
}
Example #2
0
/*-----------------------------------------------------------------------------*
 * This routine allocates/initializes shared memory for communication between 
 * the script and the driver. In addition other driver resources semaphores, 
 * queues are initialized here.
 *
 *-----------------------------------------------------------------------------*/
bool Sym8xxSCSIController::Sym8xxInitVars()
{
    UInt32			i;

    adapter = (AdapterInterface *)IOMallocContiguous( page_size, page_size, (IOPhysicalAddress *)&adapterPhys );
    if ( adapter == 0 )
    {
        return false;
    }
    bzero( adapter, page_size );

    // create a pool of IOMemoryDescriptors, the memory of which is size of an SRB
    for( i = 0; i < MAX_SCSI_IOMDS; i++ )
    {
	SCSI_IOMDs[i]  = IOMemoryDescriptor::withAddress( this, sizeof( SRB ), kIODirectionInOut );
	// we set the address to 'this' for no reason other than we want to provide a non-zero
	// address that IOMemoryDescriptor will use to create itself.  The ACTUAL address that will
	// get used in these IOMemoryDescriptors will be dynamically reinitialized with ::initWithAddress()
	// as necessary.
    }

    /*
     * We keep two copies of the Nexus pointer array. One contains physical addresses and
     * is located in the script/driver shared storage. The other copy holds the corresponding
     * virtual addresses to the active Nexus structures and is located in the drivers instance
     * data.
     * Both tables can be accessed through indirect pointers in the script/driver communication
     * area. This is the preferred method to access these arrays.
     */ 
    adapter->nexusPtrsVirt = (Nexus **)nexusArrayVirt;
    adapter->nexusPtrsPhys = (Nexus **)adapter->nexusArrayPhys;

    for (i=0; i < MAX_SCSI_TAG; i ++ )
    {
        adapter->nexusPtrsVirt[i] = (Nexus *) -1;
        adapter->nexusPtrsPhys[i] = (Nexus *) -1;
    }
 
    /*
     * The script/driver communication area also contains a 16-entry table clock
     * settings for each target.
     */ 
    for (i=0; i < MAX_SCSI_TARGETS; i++ )
    {
        adapter->targetClocks[i].scntl3Reg = SCNTL3_INIT_875;
    }


    return true;
}
RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
{
    /*
     * validate input.
     */
    AssertPtr(pPhys);
    Assert(cb > 0);
    RT_ASSERT_PREEMPTIBLE();
    IPRT_DARWIN_SAVE_EFL_AC();

    /*
     * Allocate the memory and ensure that the API is still providing
     * memory that's always below 4GB.
     */
    cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    IOPhysicalAddress PhysAddr;
    void *pv = IOMallocContiguous(cb, PAGE_SIZE, &PhysAddr);
    if (pv)
    {
        if (PhysAddr + (cb - 1) <= (IOPhysicalAddress)0xffffffff)
        {
            if (!((uintptr_t)pv & PAGE_OFFSET_MASK))
            {
                *pPhys = PhysAddr;
                IPRT_DARWIN_RESTORE_EFL_AC();
                return pv;
            }
            AssertMsgFailed(("IOMallocContiguous didn't return a page aligned address - %p!\n", pv));
        }
        else
            AssertMsgFailed(("IOMallocContiguous returned high address! PhysAddr=%RX64 cb=%#zx\n", (uint64_t)PhysAddr, cb));
        IOFreeContiguous(pv, cb);
    }

    IPRT_DARWIN_RESTORE_EFL_AC();
    return NULL;
}
Example #4
0
IOReturn AppleI386AGP::createAGPSpace( IOAGPDevice * master, 
				      IOOptionBits options,
				      IOPhysicalAddress * address, 
				      IOPhysicalLength * length )
{
    IOReturn		err;
    IOPCIAddressSpace 	target = getBridgeSpace();
    IOPhysicalLength	agpLength;
    UInt32		agpCtrl;
    
    enum { agpSpacePerPage = 4 * 1024 * 1024 };
    enum { agpBytesPerGartByte = 1024 };
    enum { alignLen = 4 * 1024 * 1024 - 1 };

    destroyAGPSpace( master );

    agpCommandMask = 0xffffffff;
    agpCommandMask &= ~kIOAGPFastWrite;
//  agpCommandMask &= ~kIOAGPSideBandAddresssing;

    {
	// There's an nVidia NV11 ROM (revision 1017) that says that it can do fast writes,
	// but can't, and can often lock the machine up when fast writes are enabled.
	
	#define kNVIDIANV11EntryName	"NVDA,NVMac"
	#define kNVROMRevPropertyName 	"rom-revision"
	#define kNVBadRevision			'1017'

	const UInt32    badRev = kNVBadRevision;
	OSData *	data;

	if( (0 == strcmp( kNVIDIANV11EntryName, master->getName()))
	 && (data = OSDynamicCast(OSData, master->getProperty(kNVROMRevPropertyName)))
	 && (data->isEqualTo( &badRev, sizeof(badRev) )))

	    agpCommandMask &= ~kIOAGPFastWrite;
    }

    agpLength = *length;
    if( !agpLength)
	agpLength = 32 * 1024 * 1024;

    agpLength = (agpLength + alignLen) & ~alignLen;

    err = kIOReturnVMError;
    do {

	gartLength = agpLength / agpBytesPerGartByte;
	gartArray = (volatile UInt32 *) IOMallocContiguous( 
				gartLength, 4096, &gartPhys );
	if( !gartArray)
	    continue;
	IOSetProcessorCacheMode(kernel_task, (vm_address_t) gartArray, gartLength, kIOInhibitCache);
        bzero( (void *) gartArray, gartLength);

//	IOUnmapPages( kernel_map, (vm_address_t) gartArray, gartLength );
	// is this std?
        systemBase	= configRead32( target, kiAPBASE ) & 0xfffffff0;
	DEBG("APSIZE: %08lx\n", (UInt32)configRead8(target, kiAPSIZE));
        systemLength	= (((configRead8( target, kiAPSIZE ) & 0x3f) ^ 0x3f) + 1) << 22;

	DEBG("sysB %08lx, sysL %08lx\n", systemBase, systemLength);

	if( !systemLength)
	    continue;

if (systemLength > agpLength)
    systemLength = agpLength;

	DEBG("sysB %08lx, sysL %08lx\n", systemBase, systemLength);

	agpRange = IORangeAllocator::withRange( agpLength, 4096 );
	if( !agpRange)
	    continue;

        *address = systemBase;
        *length = systemLength;

	agpCtrl = configRead32(target, kiAGPCTRL);
	agpCtrl &= ~(1 << 7);
	configWrite32( target, kiAGPCTRL, agpCtrl ); 		// b7 gtlb ena

//        configWrite32( target, kiAGPCTRL, 0 << 7 ); 		// b7 gtlb ena

//        assert( 0 == (gartPhys & 0xfff));

        configWrite32( target, kiATTBASE, gartPhys );

	agpCtrl = configRead32(target, kiAGPCTRL);
	//agpCtrl |= (1 << 7);
	configWrite32( target, kiAGPCTRL, agpCtrl ); 		// b7 gtlb ena

	DEBG("kiAGPCTRL %08lx, kiATTBASE %08lx\n", 
	    configRead32( target, kiAGPCTRL ), 
	    configRead32( target, kiATTBASE ));

        err = kIOReturnSuccess;

    } while( false );

    if( kIOReturnSuccess == err)
        setAGPEnable( master, true, 0 );
    else
	destroyAGPSpace( master );

    return( err );
}
Example #5
0
bool
MolEnet::start( IOService * provider )
{
	IOPhysicalAddress tx_phys, rx_phys;
	IOPhysicalAddress tx_of_phys, rx_of_phys;
	int i, result;

	if( !super::start(provider) )
		return false;

	if( OSI_Enet2Open() ) {
		is_open = 1;
		return false;
	}
	
	//transmitQueue = OSDynamicCast( IOGatedOutputQueue, getOutputQueue() );
	transmitQueue = OSDynamicCast( IOBasicOutputQueue, getOutputQueue() );
	if( !transmitQueue ) {
		printm("MolEnet: output queue initialization failed\n");
		return false;
	}
	transmitQueue->retain();

	// Allocate a IOMbufBigMemoryCursor instance. Currently, the maximum
	// number of segments is set to 2. The maximum length for each segment
	// is set to the maximum ethernet frame size (plus padding).

	txMBufCursor = IOMbufBigMemoryCursor::withSpecification( NETWORK_BUFSIZE, 2 );
	rxMBufCursor = IOMbufBigMemoryCursor::withSpecification( NETWORK_BUFSIZE, 2 );
	if( !txMBufCursor || !rxMBufCursor ) {
		printm("MolEnet: IOMbufBigMemoryCursor allocation failure\n");
		return false;
	}

	// Get a reference to the IOWorkLoop in our superclass.
	IOWorkLoop * myWorkLoop = getWorkLoop();
	assert(myWorkLoop);

	// Allocate a IOInterruptEventSources.
	_irq = IOInterruptEventSource::interruptEventSource( this, 
				     (IOInterruptEventAction)&MolEnet::rxIRQ, provider, 0);
	
        if( !_irq || (myWorkLoop->addEventSource(_irq) != kIOReturnSuccess )) {
		printm("MolEnet: _irq init failure\n");
		return false;
	}

	// Allocate the ring descriptors
	rx_ring = (enet2_ring_t*)IOMallocContiguous( 2 * RX_NUM_EL * sizeof(enet2_ring_t), 
						     sizeof(enet2_ring_t), &rx_phys );
	tx_ring = (enet2_ring_t*)IOMallocContiguous( 2 * TX_NUM_EL * sizeof(enet2_ring_t),
						     sizeof(enet2_ring_t), &tx_phys );		
	if( !rx_ring || !tx_ring )
		return false;

	rx_of_ring = rx_ring + RX_NUM_EL;
	tx_of_ring = tx_ring + TX_NUM_EL;
	rx_of_phys = rx_phys + sizeof(enet2_ring_t) * RX_NUM_EL;
	tx_of_phys = tx_phys + sizeof(enet2_ring_t) * TX_NUM_EL;

	// Allocate receive buffers
	for( i=0; i<RX_NUM_EL; i++ ) {
		if( !(rxMBuf[i]=allocatePacket( NETWORK_BUFSIZE )) ) {
			printm("MolEnet: packet allocation failed\n");
			return false;
		}
		// reserve 2 bytes before the actual packet
		rxMBuf[i]->m_data += 2;
		rxMBuf[i]->m_len -= 2;
	}

	OSI_Enet2Cntrl( kEnet2Reset );
	result = OSI_Enet2RingSetup( kEnet2SetupRXRing, rx_phys, RX_NUM_EL )
		|| OSI_Enet2RingSetup( kEnet2SetupTXRing, tx_phys, TX_NUM_EL )
		|| OSI_Enet2RingSetup( kEnet2SetupRXOverflowRing, rx_of_phys, RX_NUM_EL )
		|| OSI_Enet2RingSetup( kEnet2SetupTXOverflowRing, tx_of_phys, TX_NUM_EL );
	if( result )
		return false;

	if( !resetAndEnable(false) )
		return false;

	// Create a table of supported media types.
	if( !createMediumTables() )
		return false;

	// Attach an IOEthernetInterface client.
	if( !attachInterface( (IONetworkInterface**)&networkInterface, false ) )
		return false;

	// Ready to service interface requests.
	networkInterface->registerService();

	printm("Ethernet driver 1.1\n");
	return true;
}
bool IOBufferMemoryDescriptor::initWithOptions(
    IOOptionBits options,
    vm_size_t    capacity,
    vm_offset_t  alignment,
    task_t	    inTask)
{
    vm_map_t map = 0;
    IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;

    if (!capacity)
        return false;

    _options   	  = options;
    _capacity     = capacity;
    _physAddrs    = 0;
    _physSegCount = 0;
    _buffer	  = 0;

    // Grab the direction and the Auto Prepare bits from the Buffer MD options
    iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);

    if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
        alignment = page_size;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;

    _alignment = alignment;
    if (options & kIOMemoryPageable)
    {
        iomdOptions |= kIOMemoryBufferPageable;
        if (inTask == kernel_task)
        {
            /* Allocate some kernel address space. */
            _buffer = IOMallocPageable(capacity, alignment);
            if (_buffer)
                map = IOPageableMapForAddress((vm_address_t) _buffer);
        }
        else
        {
            kern_return_t kr;

            if( !reserved) {
                reserved = IONew( ExpansionData, 1 );
                if( !reserved)
                    return( false );
            }
            map = get_task_map(inTask);
            vm_map_reference(map);
            reserved->map = map;
            kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
                              VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
            if( KERN_SUCCESS != kr)
                return( false );

            // we have to make sure that these pages don't get copied on fork.
            kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
            if( KERN_SUCCESS != kr)
                return( false );
        }
    }
    else
    {
        // @@@ gvdl: Need to remove this
        // Buffer should never auto prepare they should be prepared explicitly
        // But it never was enforced so what are you going to do?
        iomdOptions |= kIOMemoryAutoPrepare;

        /* Allocate a wired-down buffer inside kernel space. */
        if (options & kIOMemoryPhysicallyContiguous)
            _buffer = IOMallocContiguous(capacity, alignment, 0);
        else if (alignment > 1)
            _buffer = IOMallocAligned(capacity, alignment);
        else
            _buffer = IOMalloc(capacity);
    }

    if (!_buffer)
        return false;

    _singleRange.v.address = (vm_address_t) _buffer;
    _singleRange.v.length  = capacity;

    if (!super::initWithOptions(&_singleRange.v, 1, 0,
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;

    if (options & kIOMemoryPageable) {
        kern_return_t kr;
        ipc_port_t sharedMem = (ipc_port_t) _memEntry;
        vm_size_t size = round_page_32(_ranges.v[0].length);

        // must create the entry before any pages are allocated
        if( 0 == sharedMem) {

            // set memory entry cache
            vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
            switch (options & kIOMapCacheMask)
            {
            case kIOMapInhibitCache:
                SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
                break;

            case kIOMapWriteThruCache:
                SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
                break;

            case kIOMapWriteCombineCache:
                SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
                break;

            case kIOMapCopybackCache:
                SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
                break;

            case kIOMapDefaultCache:
            default:
                SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
                break;
            }

            kr = mach_make_memory_entry( map,
                                         &size, _ranges.v[0].address,
                                         memEntryCacheMode, &sharedMem,
                                         NULL );

            if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
                ipc_port_release_send( sharedMem );
                kr = kIOReturnVMError;
            }
            if( KERN_SUCCESS != kr)
                sharedMem = 0;
            _memEntry = (void *) sharedMem;
        }
    }

    setLength(capacity);

    return true;
}
Example #7
0
bool AppleMacIO::selfTest( void )
{
    IODBDMADescriptor			*dmaDescriptors;
    UInt32				dmaDescriptorsPhys;
    UInt32				i;
    UInt32				status;
    IODBDMADescriptor			*dmaDesc;
    volatile IODBDMAChannelRegisters	*ioBaseDMA;
    bool				ok = false;
    enum { 				kTestChannel = 0x8000 };

    ioBaseDMA = (volatile IODBDMAChannelRegisters *)
		(((UInt32)fMemory->getVirtualAddress())
		+ kTestChannel );

    do {
        dmaDescriptors = (IODBDMADescriptor *)IOMallocContiguous(page_size, 1, & dmaDescriptorsPhys);
        if (!dmaDescriptors)
	    continue;

        if ( (UInt32)dmaDescriptors & (page_size - 1) ) {
            IOLog("AppleMacIO::%s() - DMA Descriptor memory not page aligned!!", __FUNCTION__);
	    continue;
        }

        bzero( dmaDescriptors, page_size );

        IODBDMAReset( ioBaseDMA );

        dmaDesc = dmaDescriptors;

        IOMakeDBDMADescriptor( dmaDesc,
                            kdbdmaNop,
                            kdbdmaKeyStream0,
                            kdbdmaIntNever,
                            kdbdmaBranchNever,
                            kdbdmaWaitNever,
                            0,
                            0 );

        dmaDesc++;

        IOMakeDBDMADescriptorDep( dmaDesc,
                                kdbdmaStoreQuad,
                                kdbdmaKeySystem,
                                kdbdmaIntNever,
                                kdbdmaBranchNever,
                                kdbdmaWaitNever,
                                4,
                                dmaDescriptorsPhys+16*sizeof(IODBDMADescriptor),
                                0x12345678 );

        dmaDesc++;

        IOMakeDBDMADescriptor( dmaDesc,
                            kdbdmaStop,
                            kdbdmaKeyStream0,
                            kdbdmaIntNever,
                            kdbdmaBranchNever,
                            kdbdmaWaitNever,
                            0,
                            0 );


        for ( i = 0; (!ok) && (i < 3); i++ )
        {
            dmaDescriptors[16].operation = 0;

            IOSetDBDMACommandPtr( ioBaseDMA, dmaDescriptorsPhys );
            IODBDMAContinue( ioBaseDMA );

            IODelay( 200 );

            status = IOGetDBDMAChannelStatus( ioBaseDMA );

            if ( ((status & kdbdmaActive) == 0)
                &&  ((status & kdbdmaDead) == 0)
                    && (OSReadSwapInt32( &dmaDescriptors[16].operation, 0 ) == 0x12345678 ))
                ok = true;
        }

        IODBDMAReset( ioBaseDMA );

    } while (false);

    if (dmaDescriptors)
	IOFreeContiguous(dmaDescriptors, page_size);


    return ok;
}