/** * Write to a MMIO register. * * @returns VBox status code suitable for scheduling. * @param pDevIns The device instance. * @param pvUser A user argument (ignored). * @param GCPhysAddr The physical address being written to. (This is within our MMIO memory range.) * @param pv Pointer to the data being written. * @param cb The size of the data being written. */ PDMBOTHCBDECL(int) ox958MmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS GCPhysAddr, void const *pv, unsigned cb) { PDEVOX958 pThis = PDMINS_2_DATA(pDevIns, PDEVOX958); uint32_t offReg = (GCPhysAddr - pThis->GCPhysMMIO); int rc = VINF_SUCCESS; RT_NOREF1(pvUser); if (offReg < OX958_REG_UART_REGION_OFFSET) { const uint32_t u32 = *(const uint32_t *)pv; Assert(cb == 4); switch (offReg) { case OX958_REG_UART_IRQ_ENABLE: ASMAtomicOrU32(&pThis->u32RegIrqEnGlob, u32); ox958IrqUpdate(pThis); break; case OX958_REG_UART_IRQ_DISABLE: ASMAtomicAndU32(&pThis->u32RegIrqEnGlob, ~u32); ox958IrqUpdate(pThis); break; case OX958_REG_UART_WAKE_IRQ_ENABLE: ASMAtomicOrU32(&pThis->u32RegIrqEnWake, u32); break; case OX958_REG_UART_WAKE_IRQ_DISABLE: ASMAtomicAndU32(&pThis->u32RegIrqEnWake, ~u32); break; case OX958_REG_UART_IRQ_STS: /* Readonly */ case OX958_REG_CC_REV_ID: /* Readonly */ case OX958_REG_UART_CNT: /* Readonly */ default: rc = VINF_SUCCESS; } } else { /* Figure out the UART accessed from the offset. */ offReg -= OX958_REG_UART_REGION_OFFSET; uint32_t iUart = offReg / OX958_REG_UART_REGION_SIZE; uint32_t offUartReg = offReg % OX958_REG_UART_REGION_SIZE; if (iUart < pThis->cUarts) { POX958UART pUart = &pThis->aUarts[iUart]; rc = ox958UartRegWrite(pThis, pUart, offUartReg, pv, cb); if (rc == VINF_IOM_R3_IOPORT_WRITE) rc = VINF_IOM_R3_MMIO_WRITE; } } return rc; }
/** * Terminates the thread. * Called by the thread wrapper function when the thread terminates. * * @param pThread The thread structure. * @param rc The thread result code. */ DECLHIDDEN(void) rtThreadTerminate(PRTTHREADINT pThread, int rc) { Assert(pThread->cRefs >= 1); #ifdef IPRT_WITH_GENERIC_TLS /* * Destroy TLS entries. */ rtThreadTlsDestruction(pThread); #endif /* IPRT_WITH_GENERIC_TLS */ /* * Set the rc, mark it terminated and signal anyone waiting. */ pThread->rc = rc; rtThreadSetState(pThread, RTTHREADSTATE_TERMINATED); ASMAtomicOrU32(&pThread->fIntFlags, RTTHREADINT_FLAGS_TERMINATED); if (pThread->EventTerminated != NIL_RTSEMEVENTMULTI) RTSemEventMultiSignal(pThread->EventTerminated); /* * Remove the thread from the tree so that there will be no * key clashes in the AVL tree and release our reference to ourself. */ rtThreadRemove(pThread); rtThreadRelease(pThread); }
/** * UART core IRQ request callback. * * @returns nothing. * @param pDevIns The device instance. * @param pUart The UART requesting an IRQ update. * @param iLUN The UART index. * @param iLvl IRQ level requested. */ PDMBOTHCBDECL(void) ox958IrqReq(PPDMDEVINS pDevIns, PUARTCORE pUart, unsigned iLUN, int iLvl) { RT_NOREF(pUart); PDEVOX958 pThis = PDMINS_2_DATA(pDevIns, PDEVOX958); if (iLvl) ASMAtomicOrU32(&pThis->u32RegIrqStsGlob, RT_BIT_32(iLUN)); else ASMAtomicAndU32(&pThis->u32RegIrqStsGlob, ~RT_BIT_32(iLUN)); ox958IrqUpdate(pThis); }
/** * Insert the per thread data structure into the tree. * * This can be called from both the thread it self and the parent, * thus it must handle insertion failures in a nice manner. * * @param pThread Pointer to thread structure allocated by rtThreadAlloc(). * @param NativeThread The native thread id. */ DECLHIDDEN(void) rtThreadInsert(PRTTHREADINT pThread, RTNATIVETHREAD NativeThread) { Assert(pThread); Assert(pThread->u32Magic == RTTHREADINT_MAGIC); { RT_THREAD_LOCK_RW(); /* * Do not insert a terminated thread. * * This may happen if the thread finishes before the RTThreadCreate call * gets this far. Since the OS may quickly reuse the native thread ID * it should not be reinserted at this point. */ if (rtThreadGetState(pThread) != RTTHREADSTATE_TERMINATED) { /* * Before inserting we must check if there is a thread with this id * in the tree already. We're racing parent and child on insert here * so that the handle is valid in both ends when they return / start. * * If it's not ourself we find, it's a dead alien thread and we will * unlink it from the tree. Alien threads will be released at this point. */ PRTTHREADINT pThreadOther = (PRTTHREADINT)RTAvlPVGet(&g_ThreadTree, (void *)NativeThread); if (pThreadOther != pThread) { bool fRc; /* remove dead alien if any */ if (pThreadOther) { AssertMsg(pThreadOther->fIntFlags & RTTHREADINT_FLAGS_ALIEN, ("%p:%s; %p:%s\n", pThread, pThread->szName, pThreadOther, pThreadOther->szName)); ASMAtomicBitClear(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE_BIT); rtThreadRemoveLocked(pThreadOther); if (pThreadOther->fIntFlags & RTTHREADINT_FLAGS_ALIEN) rtThreadRelease(pThreadOther); } /* insert the thread */ ASMAtomicWritePtr(&pThread->Core.Key, (void *)NativeThread); fRc = RTAvlPVInsert(&g_ThreadTree, &pThread->Core); ASMAtomicOrU32(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE); if (fRc) ASMAtomicIncU32(&g_cThreadInTree); AssertReleaseMsg(fRc, ("Lock problem? %p (%RTnthrd) %s\n", pThread, NativeThread, pThread->szName)); NOREF(fRc); } } RT_THREAD_UNLOCK_RW(); } }
/** * Leaves a critical section entered with PDMCritSectEnter(). * * @param pCritSect The PDM critical section to leave. */ VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect) { AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic)); Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC); /* Check for NOP sections before asserting ownership. */ if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP) return; /* * Always check that the caller is the owner (screw performance). */ RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect); AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf, ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName), pCritSect->s.Core.NativeThreadOwner, hNativeSelf, pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings)); Assert(pCritSect->s.Core.cNestings >= 1); /* * Nested leave. */ if (pCritSect->s.Core.cNestings > 1) { ASMAtomicDecS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings >= 1); ASMAtomicDecS32(&pCritSect->s.Core.cLockers); Assert(pCritSect->s.Core.cLockers >= 0); return; } #ifdef IN_RING0 # if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */ if (1) /* SUPSemEventSignal is safe */ # else if (ASMIntAreEnabled()) # endif #endif #if defined(IN_RING3) || defined(IN_RING0) { /* * Leave for real. */ /* update members. */ # ifdef IN_RING3 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal; pCritSect->s.EventToSignal = NIL_RTSEMEVENT; # if defined(PDMCRITSECT_STRICT) if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD) RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec); # endif Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD); # endif ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); ASMAtomicDecS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings == 0); /* stop and decrement lockers. */ STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); ASMCompilerBarrier(); if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0) { /* Someone is waiting, wake up one of them. */ SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession; int rc = SUPSemEventSignal(pSession, hEvent); AssertRC(rc); } # ifdef IN_RING3 /* Signal exit event. */ if (hEventToSignal != NIL_RTSEMEVENT) { LogBird(("Signalling %#x\n", hEventToSignal)); int rc = RTSemEventSignal(hEventToSignal); AssertRC(rc); } # endif # if defined(DEBUG_bird) && defined(IN_RING0) VMMTrashVolatileXMMRegs(); # endif } #endif /* IN_RING3 || IN_RING0 */ #ifdef IN_RING0 else #endif #if defined(IN_RING0) || defined(IN_RC) { /* * Try leave it. */ if (pCritSect->s.Core.cLockers == 0) { ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0); RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner; ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0)) return; /* darn, someone raced in on us. */ ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread); STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); Assert(pCritSect->s.Core.cNestings == 0); ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1); } ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK); /* * Queue the request. */ PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++; LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect)); AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves)); pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect); VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock); } #endif /* IN_RING0 || IN_RC */ }