Exemplo n.º 1
0
inline sval
ThinWireMgr::locked_updateAvailable()
{
    _ASSERT_HELD(lock);

#if defined(TARGET_powerpc)
    InterruptState is;
    sval rc;
    disableHardwareInterrupts(is);
    rc = ThinWireChan::thinwireSelect();
    if (_FAILURE(rc)) {
	enableHardwareInterrupts(is);
	return rc;
    }
    readAvailable |= rc;
    enableHardwareInterrupts(is);
#elif defined(TARGET_mips64)
    extern uval32 enabledThinwireSelect();
    readAvailable |= enabledThinwireSelect();
#elif defined(TARGET_amd64)
    InterruptState is; // why not XXX, this one more like powerpc this time
    disableHardwareInterrupts(is);
    readAvailable |= thinwireSelect();
    enableHardwareInterrupts(is);
#elif defined(TARGET_generic64)
    extern uval32 enabledThinwireSelect();
    readAvailable |= enabledThinwireSelect();
#else /* #if defined(TARGET_powerpc) */
#error Need TARGET_specific code
#endif /* #if defined(TARGET_powerpc) */

    return 0;
}
Exemplo n.º 2
0
inline sval
ThinWireMgr::locked_read(uval channel, char *buf, uval length)
{
    _ASSERT_HELD(lock);
    uval rc;

#if defined(TARGET_powerpc)
    InterruptState is;
    disableHardwareInterrupts(is);
    rc = ThinWireChan::twChannels[channel]->read(buf, length);
    enableHardwareInterrupts(is);
#elif defined(TARGET_mips64)
    extern uval enabledThinwireRead(uval channel, char * buf, uval length);
    rc = enabledThinwireRead(channel,buf,length);
#elif defined(TARGET_amd64)
    rc = 0;
    passertMsg(0, "woops not for amd64\n");
#elif defined(TARGET_generic64)
    extern uval enabledThinwireRead(uval channel, char * buf, uval length);
    rc = enabledThinwireRead(channel,buf,length);
#else /* #if defined(TARGET_powerpc) */
#error Need TARGET_specific code
#endif /* #if defined(TARGET_powerpc) */
    return rc;
}
Exemplo n.º 3
0
SysStatus
ProcessVPList::attachDispatcher(CPUDomainAnnex *cda, DispatcherID dspid,
				HATRef hatRef)
{
    SysStatus rc;
    VPInfo *vpInfo;
    ProcessAnnex *pa;

    tassertMsg(cda->getPP() == Scheduler::GetVP(), "CDA not on this pp.\n");

    RDNum rd; VPNum vp;
    SysTypes::UNPACK_DSPID(dspid, rd, vp);

    if (requests.enter() < 0) {
	return _SERROR(2644, 0, ESRCH);	// process being destroyed
    }

    rc = findProcessAnnex(rd, vp, vpInfo, pa);
    if (_FAILURE(rc)) {
	requests.leave();
	return rc;
    }

    vpInfo->lock.acquire();

    if (pa->pp != ProcessAnnex::NO_PHYS_PROC) {
	rc = _SERROR(2645, 0, EINVAL);
	goto CleanupAndReturn;
    }

    if (vpInfo->dspCounter == 0) {
	tassertMsg(vpInfo->pp == ProcessAnnex::NO_PHYS_PROC,
		   "VP not detached.\n");
	vpInfo->pp = cda->getPP();
	rc = DREF(hatRef)->attachVP(vp);
	tassertMsg(_SUCCESS(rc), "attachVP failed.\n");
	if (!KernelInfo::ControlFlagIsSet(KernelInfo::RUN_SILENT)) {
	    err_printf("Migrated pid 0x%lx, vp %ld to pp %ld.\n",
		       processID, vp, vpInfo->pp);
	}
    } else if (cda->getPP() != vpInfo->pp) {
	rc = _SERROR(2646, 0, EINVAL);
	goto CleanupAndReturn;
    }

    vpInfo->dspCounter++;

    InterruptState is;
    disableHardwareInterrupts(is);
    exceptionLocal.ipcTargetTable.enter(pa);
    pa->attach(cda);
    enableHardwareInterrupts(is);

    rc = 0;

CleanupAndReturn:
    vpInfo->lock.release();
    requests.leave();
    return rc;
}
bool RTL8139::disableAdapter( UInt32 currentLevel )
{
    bool success = false;

	ELG( 0, currentLevel, 'disA', "RTL8139::disableAdapter" );
    DEBUG_LOG( "disableAdapter() ===>\n" );
    DEBUG_LOG( "disable currentLevel %ld\n", currentLevel );

    switch ( currentLevel )
    {
	case kActivationLevel1:
		timerSrc->cancelTimeout();		// Stop the timer event source.
		initAdapter( kResetChip );		// Reset the hardware engine.

		phySetMedium( MEDIUM_INDEX_NONE );	// Power down the PHY

		if ( pciNub )
			pciNub->close( this );	// Close our provider.

		success = true;
		break;

	case kActivationLevel2:
		disableHardwareInterrupts();		// KDP doesn't use interrupts.
		workLoop->disableAllInterrupts();

			// Stop the transmit queue. outputPacket() will not get called
			// after this. KDP calls sendPacket() to send a packet in polled
			// mode and that is unaffected by the state of the output queue.

		fTransmitQueue->stop();
		fTransmitQueue->flush();

		setLinkStatus( kIONetworkLinkValid );	// Valid sans kIONetworkLinkActive
		success = true;
		break;
    }/* end SWITCH */

    if ( false == success )
        IOLog( "disable currentLevel %u failed\n", (unsigned int)currentLevel );

    DEBUG_LOG( "disableAdapter() <===\n" );

    return success;
}/* end disableAdapter */
Exemplo n.º 5
0
bool CLASS::decreaseActivationLevel( UInt32 currentLevel )
{
    bool success = true;

    switch (currentLevel)
    {
        case kActivationLevelKDP:

            hwStop();

            fWatchdogTimer->cancelTimeout();

            // Report link valid and down.

            setLinkStatus( kIONetworkLinkValid );

            // Flush all packets held in the queue and prevent it
            // from accumulating any additional packets.

            fTransmitQueue->setCapacity(0);
            fTransmitQueue->flush();

            if (fPCINub) fPCINub->close(this);

            // Free all runtime resources. Must make sure DMA
            // engine is stopped before this to avoid hard to
            // locate mbuf/memory corruptions.

            swFree();

            break;

        case kActivationLevelBSD:

            fTransmitQueue->stop();

            // Don't need interrupts for KDP, mask all sources.

            disableHardwareInterrupts();

            break;
    }

    return success;
}
Exemplo n.º 6
0
void CLASS::hwStop( void )
{
    int wait;

    disableHardwareInterrupts();

    WriteReg(CR, CR_RXD | CR_TXD);
    for (wait = kHardwareWaitTimeout; wait; wait--)
    {
        IOSleep(kHardwareWaitValue);
        if ((ReadReg(CR) & (CR_RXE | CR_TXE)) == 0)
            break;
    }

    if (!wait)
    {
        ERROR_LOG("%s: %s TIMEOUT\n", getName(), __FUNCTION__);
    }
}
Exemplo n.º 7
0
extern "C" void
ExceptionLocal_PPCPrimitiveAwaitRetry(ProcessAnnex *srcProc, CommID targetID)
{
    TraceOSExceptionAwaitPPCRetry(
        (uval64) CurrentThread, targetID);

    PRESERVE_PPC_PAGE();

    enableHardwareInterrupts();

    Scheduler::DelayMicrosecs(10000);

    disableHardwareInterrupts();

    RESTORE_PPC_PAGE();

    exceptionLocal.dispatchQueue.awaitDispatch(srcProc);
    TraceOSExceptionAwaitPPCRetryDone(srcProc->commID);
}
Exemplo n.º 8
0
void
ProcessVPList::VPInfo::deleteDispatchers()
{
    RDNum rd;
    ProcessAnnex *pa;

    tassertMsg((pp == Scheduler::GetVP()) ||
		    (pp == ProcessAnnex::NO_PHYS_PROC),
	       "Wrong processor.\n");

    for (rd = 0; rd < Scheduler::RDLimit; rd++) {
	pa = dspInfo[rd].pa;
	if (pa != NULL) {
	    tassertMsg((pa->pp == Scheduler::GetVP()) ||
			    (pa->pp == ProcessAnnex::NO_PHYS_PROC),
		       "Wrong processor.\n");
	    if (pa->pp != ProcessAnnex::NO_PHYS_PROC) {
		//N.B. don't use the disableHardwareInterrupts(is) form here
		//     since waitForTerminate() may enable/disable while
		//     blocking and restore value could become stale.
		disableHardwareInterrupts();
		exceptionLocal.ipcTargetTable.remove(pa);
		exceptionLocal.kernelTimer.remove(pa);
		exceptionLocal.ipcRetryManager.remove(pa);
		pa->waitForTerminate();
		pa->detach();
		enableHardwareInterrupts();
	    }
	    if (pa->pendingRemoteIPC != NULL) {
		ExceptionLocal::FreeRemoteIPCBuffer(pa->pendingRemoteIPC);
		pa->pendingRemoteIPC = NULL;
	    }
	    pa->awaitAndFreeAllNotifications();
	    pa->releaseDispatcherMemory();
	    delete pa;
	}
    }

    // Switch to the canonical kernel address space, just in case we are
    // currently "borrowing" the address space we're about to tear down.
    ((HATKernel*)(DREFGOBJK(TheKernelHATRef)))->switchToKernelAddressSpace();
}
Exemplo n.º 9
0
/*virtual*/ SysStatus
CPUDomainAnnex::_setWeight(__in uval wght)
{
    if (Scheduler::GetVP() != pp) {
	return _SERROR(2633, 0, EINVAL);
    }
    if ((wght < KernelScheduler::MIN_WEIGHT) ||
		    (wght > KernelScheduler::MAX_WEIGHT)) {
	return _SERROR(2634, 0, EINVAL);
    }

    InterruptState is;
    disableHardwareInterrupts(is);

    weight = wght;
    drag = KernelScheduler::MAX_WEIGHT / weight;

    enableHardwareInterrupts(is);

    return 0;
}
Exemplo n.º 10
0
/*static*/ SysStatus
ExceptionLocal::PPCAsyncRemote(ProcessAnnex *srcProc, CommID targetID,
                               XHandle xhandle, uval methnum,
                               uval length, uval *buf)
{
    SysStatus rc;
    uval wasActive;
    BaseProcessRef pref;

    TraceOSExceptionPPCAsyncRemote(
        (uval64) CurrentThread, targetID);

    enableHardwareInterrupts();

    wasActive = CurrentThread->isActive();
    if (!wasActive) {
        CurrentThread->activate();
    }

    // find out the process that matches targetID, and call
    rc = DREFGOBJ(TheProcessSetRef)->
         getRefFromPID(SysTypes::PID_FROM_COMMID(targetID), pref);

    if (_SUCCESS(rc)) {
        rc = DREF((ProcessRef)pref)->sendRemoteAsyncMsg(targetID,
                srcProc->commID,
                xhandle, methnum,
                length, buf);
    }

    if (!wasActive) {
        CurrentThread->deactivate();
    }

    disableHardwareInterrupts();

    TraceOSExceptionPPCAsyncRemoteDone(srcProc->commID);

    return rc;
}
Exemplo n.º 11
0
/*virtual*/ SysStatus
CPUDomainAnnex::_setPriorityClass(__in uval pclass)
{
    if (Scheduler::GetVP() != pp) {
	return _SERROR(2631, 0, EINVAL);
    }
    if ((pclass < KernelScheduler::PRIORITY_CLASS_KERNEL) ||
		(pclass > KernelScheduler::PRIORITY_CLASS_UNRUNNABLE)) {
	return _SERROR(2632, 0, EINVAL);
    }

    InterruptState is;
    disableHardwareInterrupts(is);

    priority.part.pclass = pclass;
    if (currentPA != NULL) {
	exceptionLocal.dispatchQueue.removeCPUDomainAnnex(this);
	exceptionLocal.dispatchQueue.addCPUDomainAnnex(this);
    }

    enableHardwareInterrupts(is);

    return 0;
}
Exemplo n.º 12
0
/*
 * PPC_ASYNC call in kernel implemented by just procedure call
 */
void
PPC_ASYNC(SysStatus &rc, CommID targetID, XHandle xhandle, uval methnum)
{
    InterruptState tmp;
    disableHardwareInterrupts(tmp);

    ProcessAnnex *const srcProc = exceptionLocal.kernelProcessAnnex;
    tassertMsg(exceptionLocal.currentProcessAnnex == srcProc,
               "Not in kernel!\n");

    ProcessAnnex *target = exceptionLocal.ipcTargetTable.lookupWild(targetID);

    if (target != NULL) {
        // local case
        rc = target->dispatcher->asyncBufferLocal.storeMsg(srcProc->commID,
                xhandle, methnum,
                GET_PPC_LENGTH(),
                PPCPAGE_DATA);
        TraceOSExceptionPPCAsyncLocal(target->commID, rc);
        RESET_PPC();			// done with PPC_PAGE
        if (_SUCCESS(rc)) {
            target->deliverInterrupt(SoftIntr::ASYNC_MSG);
        }
    } else {
        // remote case
        uval const length = GET_PPC_LENGTH();
        uval buf[AsyncBuffer::MAX_LENGTH];
        memcpy(buf, PPCPAGE_DATA, length);
        RESET_PPC();			// done with PPC_PAGE

        rc = ExceptionLocal::PPCAsyncRemote(srcProc, targetID,
                                            xhandle, methnum, length, buf);
    }

    enableHardwareInterrupts(tmp);
}
Exemplo n.º 13
0
SysStatus
ProcessVPList::createDispatcher(CPUDomainAnnex *cda, DispatcherID dspid,
				EntryPointDesc entry, uval dispatcherAddr,
				uval initMsgLength, char *initMsg,
				ProcessRef procRef, HATRef hatRef)
{
    SysStatus rc;
    VPInfo *vpInfo;
    uval newLimit, size;
    DspTable *newTable;
    ProcessAnnex *pa;
    SegmentTable *segTable;
    Dispatcher *dsp, *dspUser;
    RegionRef dspRegRef;
    FCMRef dspFCMRef;
    uval dspOffset, dspAddrKern;

    tassertMsg(cda->getPP() == Scheduler::GetVP(), "CDA not on this pp.\n");

    RDNum rd; VPNum vp;
    SysTypes::UNPACK_DSPID(dspid, rd, vp);

    if (vp >= Scheduler::VPLimit) {
	return _SERROR(1752, 0, EINVAL);
    }

    if (rd >= Scheduler::RDLimit) {
	return _SERROR(1751, 0, EINVAL);
    }

    if (PAGE_ROUND_DOWN(dispatcherAddr) != dispatcherAddr) {
	return _SERROR(1327, 0, EINVAL);
    }

    if (requests.enter() < 0) {
	return _SERROR(1328, 0, ESRCH);	// process being destroyed
    }

    if ((vp < vpLimit) && (dspTable->vpInfo[vp] != NULL)) {
	vpInfo = dspTable->vpInfo[vp];
    } else {
	// We don't have a VPInfo structure for this vp.  Create one, guarded
	// by stop()'ing requests.  RequestCountWithStop doesn't support an
	// upgrade operation, so we have to "leave" before we can "stop".
	requests.leave();
	if (requests.stop() < 0) {
	    return _SERROR(2640, 0, ESRCH);	// process being destroyed
	}

	if (vp >= vpLimit) {
	    // We have to increase the size of the table.  We make the first
	    // increment larger than subsequent ones to lessen ramp-up costs.
	    newLimit = (vpLimit == 1) ? 16 : (vpLimit * 2);
	    // Make sure the newLimit is large enough to include vp.  We won't
	    // blow up because we know that vp < Scheduler::VPLimit.
	    while (vp >= newLimit) {
		newLimit *= 2;
	    }

	    // Allocate a new table.  DspTable includes space for one VPInfo
	    // pointer, hence the "newLimit - 1" in the following calculation.
	    size = sizeof(DspTable) + ((newLimit - 1) * sizeof(VPInfo *));
	    newTable = (DspTable *) AllocGlobalPadded::alloc(size);
	    tassertMsg(newTable != NULL, "DspTable allocation failed.\n");

	    // Copy content of the old table to the new, and initialize the
	    // rest of the new table.
	    for (uval i = 0; i < vpLimit; i++) {
		newTable->vpInfo[i] = dspTable->vpInfo[i];
	    }
	    for (uval i = vpLimit; i < newLimit; i++) {
		newTable->vpInfo[i] = NULL;
	    }

	    // Free the old table, unless it is the initial (pre-allocated)
	    // table.
	    if (vpLimit > 1) {
		size = sizeof(DspTable) + ((vpLimit - 1) * sizeof(VPInfo *));
		AllocGlobalPadded::free(dspTable, size);
	    }

	    // Install the new table.
	    dspTable = newTable;
	    vpLimit = newLimit;
	}

	// We have to check vpInfo[vp] again now that requests are stop'd.
	vpInfo = dspTable->vpInfo[vp];
	if (vpInfo == NULL) {
	    if (vp == 0) {
		// Space for the first VPInfo structure is pre-allocated.
		vpInfo = &vpInfo0;
	    } else {
		vpInfo = new VPInfo;
		tassertMsg(vpInfo != NULL, "VPInfo allocation failed.\n");
	    }
	    vpInfo->init(cda->getPP());
	    dspTable->vpInfo[vp] = vpInfo;
	    vpCounter++;
	    if (!KernelInfo::ControlFlagIsSet(KernelInfo::RUN_SILENT)) {
		err_printf("Mapping program %s, pid 0x%lx, vp %ld to pp %ld.\n",
			   name, processID, vp, vpInfo->pp);
	    }
	}

	// Restart and then re-enter the request counter.
	requests.restart();
	if (requests.enter() < 0) {
	    return _SERROR(2641, 0, ESRCH);	// process being destroyed
	}
    }

    /*
     * At this point the requests counter has been enter'd and vpInfo points
     * to a valid VPInfo structure for this vp.  All further processing is
     * done under the vp lock.
     */

    vpInfo->lock.acquire();

    if (vpInfo->pp != cda->getPP()) {
	// VP is not on this physical processor.
	rc = _SERROR(1750, 0, EINVAL);
	goto CleanupAndReturn;
    }

    if (vpInfo->dspInfo[rd].pa != NULL) {
	// Dispatcher already exists.
	rc = _SERROR(1329, 0, EEXIST);
	goto CleanupAndReturn;
    }

    dspUser = (Dispatcher *) dispatcherAddr;
    if (isKern) {
	dspFCMRef = NULL;
	dspOffset = 0;
	dsp = dspUser;
	// Set a bogus interrupt bit to make the dispatcher runnable.
	(void) dsp->interrupts.fetchAndSet(SoftIntr::PREEMPT);
    } else {
	rc = DREF(procRef)->vaddrToRegion(dispatcherAddr, dspRegRef);
	if (_FAILURE(rc)) goto CleanupAndReturn;
	rc = DREF(dspRegRef)->vaddrToFCM(vp, dispatcherAddr, 0,
					 dspFCMRef, dspOffset);
	if (_FAILURE(rc)) goto CleanupAndReturn;
	rc = DREF(dspFCMRef)->addReference();
	if (_FAILURE(rc)) goto CleanupAndReturn;
	rc = archAllocDispatcherPage(dispatcherAddr, dspAddrKern);
	tassertMsg(_SUCCESS(rc), "archAllocDispatcherPage failed.\n");
	rc = DREF(dspFCMRef)->establishPage(dspOffset, dspAddrKern, PAGE_SIZE);
	tassertMsg(_SUCCESS(rc), "establishPage failed.\n");
	dsp = (Dispatcher *) dspAddrKern;
	dsp->init(dspid);
	rc = dsp->asyncBufferLocal.storeMsg(_KERNEL_PID, 0,
					    0, initMsgLength, initMsg);
	if (_FAILURE(rc)) {
	    (void) DREF(dspFCMRef)->disEstablishPage(dspOffset, PAGE_SIZE);
	    (void) DREF(dspFCMRef)->removeReference();
	    goto CleanupAndReturn;
	}
	(void) dsp->interrupts.fetchAndSet(SoftIntr::ASYNC_MSG);
    }

    rc = DREF(hatRef)->getSegmentTable(vp, segTable);
    tassertMsg(_SUCCESS(rc), "getSegmentTable failed.\n");

    pa = new ProcessAnnex();
    tassertMsg(pa != NULL, "ProcessAnnex allocation failed.\n");
    pa->init(procRef, processID, userMode, isKern,
	     dspUser, dsp, dspFCMRef, dspOffset,
	     segTable, dspid);

    pa->setEntryPoint(RUN_ENTRY, entry);

    vpInfo->dspInfo[rd].pa = pa;
    vpInfo->dspCounter++;

    InterruptState is;
    disableHardwareInterrupts(is);
    exceptionLocal.ipcTargetTable.enter(pa);
    pa->attach(cda);
    enableHardwareInterrupts(is);

    rc = 0;

CleanupAndReturn:
    vpInfo->lock.release();
    requests.leave();
    return rc;
}
Exemplo n.º 14
0
SysStatus
ProcessVPList::detachDispatcher(CPUDomainAnnex *cda, DispatcherID dspid,
				HATRef hatRef)
{
    SysStatus rc;
    VPInfo *vpInfo;
    ProcessAnnex *pa;
    uval64 ipcRetryIDs;

    tassertMsg(cda->getPP() == Scheduler::GetVP(), "CDA not on this pp.\n");

    RDNum rd; VPNum vp;
    SysTypes::UNPACK_DSPID(dspid, rd, vp);

    if (requests.enter() < 0) {
	return _SERROR(2642, 0, ESRCH);	// process being destroyed
    }

    rc = findProcessAnnex(rd, vp, vpInfo, pa);
    if (_FAILURE(rc)) {
	requests.leave();
	return rc;
    }

    if (!pa->isAttached(cda)) {
	requests.leave();
	return _SERROR(2643, 0, EINVAL);
    }

    vpInfo->lock.acquire();

    disableHardwareInterrupts();

    if (pa->reservedThread != NULL) {
	/*
	 * FIXME:  For now, don't try to detach a dispatcher that is currently
	 *         disabled.  We have to do better in the long run.
	 */
	enableHardwareInterrupts();
	rc = _SERROR(2312, 0, EAGAIN);
	goto CleanupAndReturn;
    }

    pa->detach();
    exceptionLocal.ipcTargetTable.remove(pa);

    if (KernelTimer::TimerRequestTime(pa) != SysTime(-1)) {
	/*
	 * PA has a timeout request registered.  Rather than try to reproduce
	 * it on the new processor, we simply generate a TIMER_EVENT soft
	 * interrupt so that the dispatcher can sort things out for itself.
	 */
	(void) pa->dispatcher->interrupts.fetchAndSet(SoftIntr::TIMER_EVENT);
    }
    exceptionLocal.kernelTimer.remove(pa);

    ipcRetryIDs = IPCRetryManager::GetIPCRetryIDs(pa);
    if (ipcRetryIDs != 0) {
	/*
	 * PA has IPCs waiting to be retried.  Simply generate notifications
	 * for all of them, to be delivered when the dispatcher runs.
	 */
	pa->dispatcher->ipcRetry |= ipcRetryIDs;
	(void) pa->dispatcher->interrupts.
				fetchAndSet(SoftIntr::IPC_RETRY_NOTIFY);
    }
    exceptionLocal.ipcRetryManager.remove(pa);

    enableHardwareInterrupts();

    vpInfo->dspCounter--;
    if (vpInfo->dspCounter > 0) {
	rc = 0;
	goto CleanupAndReturn;
    }

    /*
     * This VP's last dispatcher has now been detached, so detach the VP.
     * Switch to the canonical kernel address space, in case we're currently
     * "borrowing" the address space we're about to unmap.
     */
    ((HATKernel*)(DREFGOBJK(TheKernelHATRef)))->switchToKernelAddressSpace();

    rc = DREF(hatRef)->detachVP(vp);
    tassertMsg(_SUCCESS(rc), "hat->detachVP() failed.\n");

    vpInfo->pp = ProcessAnnex::NO_PHYS_PROC; // VP now ready for re-attachment
    rc = 0;

CleanupAndReturn:
    vpInfo->lock.release();
    requests.leave();
    return rc;
}
Exemplo n.º 15
0
/*static*/ void
ExceptionLocal::PrintStatus(uval dumpThreads)
{
    SysStatus rc;
    uval i, j, keyIter, numThreads;
    ProcessAnnex *pa, *prev;
    StubSchedulerService schedServ(StubObj::UNINITIALIZED);
    // MAX_THREADS is defined to be large enough to fill the ppc page
    // we get stack overflows if we use that much, so use half.
    uval const MAX_NUM_THREADS = SchedulerService::MAX_THREADS/2;
    Thread::Status threadStatus[MAX_NUM_THREADS];
    SysTime const tps = SchedulerTimer::TicksPerSecond();

    disableHardwareInterrupts();

    for (i = 0; i < exceptionLocal.ipcTargetTable._tableSize; i++) {

        pa = exceptionLocal.ipcTargetTable._table[i];

        while (pa != NULL) {

            err_printf("ProcessAnnex %p (%s), pid 0x%lx, rd %ld, vp %ld\n",
                       pa, pa->dispatcher->progName,
                       SysTypes::PID_FROM_COMMID(pa->commID),
                       SysTypes::RD_FROM_COMMID(pa->commID),
                       SysTypes::VP_FROM_COMMID(pa->commID));

            err_printf("    dispatcher %p, interrupts 0x%x\n",
                       pa->dispatcher,
                       pa->dispatcher->interrupts.flags);
#ifdef DEBUG_SOFT_INTERRUPTS
            uval any = 0;
            for (uval intr = 0; intr < SoftIntr::MAX_INTERRUPTS; intr++) {
                uval count = pa->dispatcher->interrupts.outstanding[intr];
                if (count != 0) {
                    if (!any) err_printf("    interrupt counts:");
                    err_printf(" 0x%lx(%ld)", intr, count);
                    any = 1;
                }
            }
            if (any) err_printf("\n");
#endif
            err_printf("    dispatchTime %lld.%09lld, "
                       "state offsets(exc,trap,user): 0x%lx 0x%lx 0x%lx\n",
                       pa->dispatchTicks / tps,
                       ((pa->dispatchTicks % tps) *
                        1000000000ull) / tps,
                       pa->excStateOffset,
                       pa->trapStateOffset,
                       pa->userStateOffset);
            err_printf("    terminator 0x%lx, reservedThread %p\n",
                       pa->terminator,
                       pa->reservedThread);
            err_printf("    ppcTargetID 0x%lx, ppcThreadID 0x%lx\n",
                       pa->ppcTargetID,
                       pa->ppcThreadID);

            if ((SysTypes::PID_FROM_COMMID(pa->commID) == _KERNEL_PID) &&
                    (SysTypes::RD_FROM_COMMID(pa->commID) != 0)) {
                err_printf("    idle loop\n");
            } else if (dumpThreads) {
                if (pa->reservedThread != NULL) {
                    err_printf("    cannot obtain thread status of "
                               "disabled vp\n");
                    err_printf("        sending PrintStatus request instead\n");
                    pa->deliverInterrupt(SoftIntr::PRINT_STATUS);
                } else {
                    // assume the pa commid is right one
                    schedServ.initOHWithCommID(
                        SysTypes::WILD_COMMID(pa->commID),
                        XHANDLE_MAKE_NOSEQNO(CObjGlobals::SchedulerServiceIndex));

                    err_printf("    threads:\n");
                    keyIter = 0;
                    do {
                        enableHardwareInterrupts();
                        rc = schedServ._getStatus(keyIter, numThreads,
                                                  MAX_NUM_THREADS,
                                                  threadStatus);
                        disableHardwareInterrupts();
                        tassert(_SUCCESS(rc), err_printf("woops\n"));
                        for (j = 0; j < numThreads; j++) {
                            threadStatus[j].print();
                        }
                    } while (numThreads > 0);
                }
            }

            // Rescan the chain since we may have hardware-enabled for a while.
            prev = pa;
            pa = exceptionLocal.ipcTargetTable._table[i];
            while ((pa != NULL) && (pa != prev)) pa = pa->ipcTargetNext;
            if (pa == NULL) {
                // Prev no longer exists, so reprint the whole chain.
                pa = exceptionLocal.ipcTargetTable._table[i];
            } else {
                pa = pa->ipcTargetNext;
            }
        }
    }

    enableHardwareInterrupts();
}
Exemplo n.º 16
0
extern "C" SysStatus
ExceptionLocal_IPCRemote(IPCRegsArch *ipcRegsP,
                         CommID targetID,
                         uval ipcType,
                         ProcessAnnex *srcProc)
{
    uval len, checkedLen;
    uval ipcBufSize;
    RemoteIPCBuffer **listp, *list, *ipcBuf;
    SysStatus rc;
    BaseProcessRef pref;

    TraceOSExceptionIPCRemote(
        (uval64) CurrentThread, ipcType, targetID);

    enableHardwareInterrupts();
    CurrentThread->activate();

    // Deallocate any remote IPC buffers that have been returned here.
    listp = &ExceptionLocal::OldRemoteIPCBuffers[exceptionLocal.vp];
    list = (RemoteIPCBuffer *) FetchAndClear((uval *) listp);
    while (list != NULL) {
        ipcBuf = list;
        list = list->next;
        AllocPinnedGlobalPadded::free(ipcBuf, ipcBuf->size);
    }

    len = GET_PPC_LENGTH();
    checkedLen = (len <= PPCPAGE_LENGTH_MAX) ? len : PPCPAGE_LENGTH_MAX;

    ipcBufSize = sizeof(RemoteIPCBuffer) + checkedLen;
    ipcBuf = (RemoteIPCBuffer *) AllocPinnedGlobalPadded::alloc(ipcBufSize);
    if (ipcBuf != NULL) {
        // remember source and size for deallocation
        ipcBuf->sourceVP = exceptionLocal.vp;
        ipcBuf->size = ipcBufSize;

        ipcBuf->ipcType = ipcType;
        ipcBuf->callerID = srcProc->commID;
        ipcBuf->ipcRegs = *ipcRegsP;

        ipcBuf->ipcPageLength = len;
        memcpy((char *) (ipcBuf + 1), PPCPAGE_DATA, checkedLen);
        RESET_PPC();

        // Find the process that matches targetID, and call it.
        rc = DREFGOBJ(TheProcessSetRef)->
             getRefFromPID(SysTypes::PID_FROM_COMMID(targetID), pref);

        if (_SUCCESS(rc)) {
            rc = DREF((ProcessRef) pref)->sendRemoteIPC(targetID, ipcBuf);
        }

        if (_FAILURE(rc)) {
            /*
             * Restore the PPC page and deallocate the ipcBuffer if the
             * remote call failed.  If it succeeded, the target end is
             * responsible for the buffer and its content.
             */
            SET_PPC_LENGTH(len);
            memcpy(PPCPAGE_DATA, (char *) (ipcBuf + 1), checkedLen);
            AllocPinnedGlobalPadded::free(ipcBuf, ipcBufSize);
            /*
             * Add ipcType into the return code, so that the sender's IPC
             * fault handler will know how to process the fault.
             */
            rc = _SERROR(_SERRCD(rc), ipcType, _SGENCD(rc));
        }
    } else {
        rc = _SERROR(2310, ipcType, EAGAIN);
    }

    CurrentThread->deactivate();
    disableHardwareInterrupts();

    if (_FAILURE(rc) && (_SGENCD(rc) == EAGAIN)) {
        exceptionLocal.ipcRetryManager.
        requestNotificationRemote(srcProc, targetID);
    }

    TraceOSExceptionIPCRemoteDone(srcProc->commID);

    return rc;
}
Exemplo n.º 17
0
extern "C" SysStatus
ExceptionLocal_PgfltHandler(ProcessAnnex *srcProc,
                            uval faultInfo, uval faultAddr, uval noReflection)
{
    SysStatus rc;
    uval wasActive;
    PageFaultNotification *pn;

#ifdef COLLECT_FAULT_STATS
    //So that setPFBit works right
    uval oldTSU = Scheduler::GetThreadSpecificUvalSelf();
    Scheduler::SetThreadSpecificUvalSelf(0);
#endif // COLLECT_FAULT_STATS


    TraceOSExceptionPgflt((uval64) CurrentThread, faultAddr,
                          (uval64) srcProc->commID,
                          (uval64) srcProc->excStatePtr()->codeAddr(),
                          (uval64) faultInfo);

#ifndef NDEBUG
    /*
     * Check for segment fault in borrowed kernel address space.
     */
    if (exceptionLocal.currentSegmentTable->
            checkKernelSegmentFault(faultAddr)) {
        //FIXME - its hard to test this code
        //This printf because Marc wants to know if this
        //code ever gets executed
        err_printf("Tell Marc: segment fault %lx k %x\n",
                   faultAddr, srcProc->isKernel);
        return 0;	// we may re-fault for the page itself
    }
#endif
    StatTimer t1(ExceptionCode);
    PRESERVE_PPC_PAGE();

    enableHardwareInterrupts();

    tassertMsg(faultAddr!=checkAddr,"got it\n");
#ifdef COLLECT_FAULT_STATS
    if (faultInfo & DSIStatusReg::writeFault) {
        setPFBit(NeedWrite);
    }
#endif // COLLECT_FAULT_STATS

    wasActive = CurrentThread->isActive();
    if (!wasActive) {
        CurrentThread->activate();
    }

    if (noReflection) {
        pn = NULL;
    } else {
        pn = srcProc->fnMgr.alloc(srcProc);
#ifndef NDEBUG
        if (pn != NULL) pn->vaddr = faultAddr;
#endif
    }

    rc = DREF(srcProc->processRef)->
         handleFault(faultInfo, faultAddr, pn,
                     SysTypes::VP_FROM_COMMID(srcProc->commID));

    // Free the notification object for bad-address or in-core page faults.
    if ((pn != NULL) && (_FAILURE(rc) || (_SGETUVAL(rc) == 0))) {
        srcProc->fnMgr.free(pn);
    }

    if (!wasActive) {
        CurrentThread->deactivate();
    }

    tassertWrn(srcProc->isKernel || _SUCCESS(rc),
               "User-mode bad-address fault: "
               "commID 0x%lx, pc %p, addr %lx, rc %lx.\n",
               srcProc->commID,
               srcProc->excStatePtr()->codeAddr(), faultAddr, rc);

    disableHardwareInterrupts();

    RESTORE_PPC_PAGE();

    t1.record();

    TraceOSExceptionPgfltDone((uval) srcProc->commID,
                              (uval64) CurrentThread, faultAddr, rc,
                              Scheduler::GetThreadSpecificUvalSelf());
#ifdef COLLECT_FAULT_STATS
    Scheduler::SetThreadSpecificUvalSelf(oldTSU);
#endif // COLLECT_FAULT_STATS

    return rc;
}