RTDECL(int) RTTimerStop(PRTTIMER pTimer) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (pTimer->fSuspended) return VERR_TIMER_SUSPENDED; pTimer->fSuspended = true; if (pTimer->pSingleTimer) { mutex_enter(&cpu_lock); cyclic_remove(pTimer->hCyclicId); mutex_exit(&cpu_lock); RTMemFree(pTimer->pSingleTimer); } else if (pTimer->pOmniTimer) { mutex_enter(&cpu_lock); cyclic_remove(pTimer->hCyclicId); mutex_exit(&cpu_lock); RTMemFree(pTimer->pOmniTimer->au64Ticks); RTMemFree(pTimer->pOmniTimer); } return VINF_SUCCESS; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; RT_ASSERT_INTS_ON(); if (idCpu >= ncpus) return VERR_CPU_NOT_FOUND; if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu))) return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); RTSOLCPUSET CpuSet; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[i] = 0; BT_SET(CpuSet.auCpus, idCpu); rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args); RTThreadPreemptRestore(&PreemptState); Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1); return ASMAtomicUoReadU32(&Args.cHits) == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); Assert(pThis->cRefs > 0); RT_ASSERT_INTS_ON(); lck_spin_lock(pThis->pSpinlock); ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC); /* make the handle invalid */ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIDARWIN_GEN_MASK); if (pThis->fHaveBlockedThreads) { /* abort waiting threads. */ thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_RESTART); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPT_CPUID_VAR(); RT_ASSERT_INTS_ON(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); /* * Set the signal and increment the generation counter. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT; fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); /* * Wake up all sleeping threads. */ if (pThis->fHaveBlockedThreads) { ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false); thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; RT_ASSERT_PREEMPT_CPUID_VAR(); AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); rtR0SemEventMultiSolRetain(pThis); rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx); Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC); /* * Do the job (could be done without the lock, but play safe). */ ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTISOL_STATE_MASK); mutex_exit(&pThis->Mtx); rtR0SemEventMultiSolRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; RT_ASSERT_PREEMPT_CPUID_VAR(); AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); rtR0SemEventMultiSolRetain(pThis); rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx); Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC); /* * Do the job. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTISOL_GEN_SHIFT; fNew |= RTSEMEVENTMULTISOL_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); cv_broadcast(&pThis->Cnd); mutex_exit(&pThis->Mtx); rtR0SemEventMultiSolRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTTimerDestroy(PRTTIMER pTimer) { if (pTimer == NULL) return VINF_SUCCESS; RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); /* * It is not possible to destroy a timer from it's callback function. * Cyclic makes that impossible (or at least extremely risky). */ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT); /* * Invalidate the handle, make sure it's stopped and free the associated resources. */ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); if ( !pTimer->fSuspended || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */ rtTimerSolStopIt(pTimer); rtTimerSolRelease(pTimer); return VINF_SUCCESS; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { RT_ASSERT_INTS_ON(); if (idCpu < ncpus) poke_cpu(idCpu); return VINF_SUCCESS; }
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; RT_ASSERT_INTS_ON(); Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = RTMpCpuId(); Args.cHits = 0; /* The caller is supposed to have disabled preemption, but take no chances. */ RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); RTSOLCPUSET CpuSet; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[0] = (ulong_t)-1L; BT_CLEAR(CpuSet.auCpus, RTMpCpuId()); rtMpSolCrossCall(&CpuSet, rtMpSolOnOtherCpusWrapper, &Args); RTThreadPreemptRestore(&PreemptState); return VINF_SUCCESS; }
RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; if (pThis == NIL_RTSEMMUTEX) return VERR_INVALID_PARAMETER; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Kill it, wake up all waiting threads and release the reference. */ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMMUTEX_MAGIC, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE); lck_spin_lock(pThis->pSpinlock); if (pThis->cWaiters > 0) thread_wakeup_prim((event_t)pThis, FALSE /* one_thread */, THREAD_RESTART); if (ASMAtomicDecU32(&pThis->cRefs) == 0) rtSemMutexDarwinFree(pThis); else lck_spin_unlock(pThis->pSpinlock); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { RT_ASSERT_INTS_ON(); if (g_pfnR0DarwinCpuInterrupt == NULL) return VERR_NOT_SUPPORTED; g_pfnR0DarwinCpuInterrupt(idCpu); return VINF_SUCCESS; }
RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb) { int rc; RT_ASSERT_INTS_ON(); rc = ddi_copyout(pvSrc, (void *)R3PtrDst, cb, 0 /*flags*/); if (RT_LIKELY(rc == 0)) return VINF_SUCCESS; return VERR_ACCESS_DENIED; }
RTDECL(int) RTSemFastMutexRelease(RTSEMFASTMUTEX hFastMtx) { PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); rw_exit(&pThis->Mtx); return VINF_SUCCESS; }
RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb) { int rc; RT_ASSERT_INTS_ON(); rc = ddi_copyin((const char *)R3PtrSrc, pvDst, cb, 0 /*flags*/); if (RT_LIKELY(rc == 0)) return VINF_SUCCESS; return VERR_ACCESS_DENIED; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { RT_ASSERT_INTS_ON(); if (g_pfnR0DarwinCpuInterrupt == NULL) return VERR_NOT_SUPPORTED; IPRT_DARWIN_SAVE_EFL_AC(); /* paranoia */ g_pfnR0DarwinCpuInterrupt(idCpu); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = NIL_RTCPUID; Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnAllDarwinWrapper, &Args); return VINF_SUCCESS; }
RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx) { PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx; if (pThis == NIL_RTSEMFASTMUTEX) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); ASMAtomicXchgU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD); rw_destroy(&pThis->Mtx); RTMemFree(pThis); return VINF_SUCCESS; }
RTDECL(int) RTTimerDestroy(PRTTIMER pTimer) { if (pTimer == NULL) return VINF_SUCCESS; RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); /* * Free the associated resources. */ RTTimerStop(pTimer); ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); RTMemFree(pTimer); return VINF_SUCCESS; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); int rc; RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnSpecificDarwinWrapper, &Args); return Args.cHits == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = RTMpCpuId(); Args.cHits = 0; mp_rendezvous_no_intrs(rtmpOnOthersDarwinWrapper, &Args); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
RTDECL(int) RTPowerNotificationDeregister(PFNRTPOWERNOTIFICATION pfnCallback, void *pvUser) { PRTPOWERNOTIFYREG pPrev; PRTPOWERNOTIFYREG pCur; RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; /* * Validation. */ AssertPtrReturn(pfnCallback, VERR_INVALID_POINTER); AssertReturn(g_hRTPowerNotifySpinLock != NIL_RTSPINLOCK, VERR_WRONG_ORDER); RT_ASSERT_INTS_ON(); /* * Find and unlink the record from the list. */ RTSpinlockAcquire(g_hRTPowerNotifySpinLock, &Tmp); pPrev = NULL; for (pCur = g_pRTPowerCallbackHead; pCur; pCur = pCur->pNext) { if ( pCur->pvUser == pvUser && pCur->pfnCallback == pfnCallback) break; pPrev = pCur; } if (pCur) { if (pPrev) pPrev->pNext = pCur->pNext; else g_pRTPowerCallbackHead = pCur->pNext; ASMAtomicIncU32(&g_iRTPowerGeneration); } RTSpinlockRelease(g_hRTPowerNotifySpinLock, &Tmp); if (!pCur) return VERR_NOT_FOUND; /* * Invalidate and free the record. */ pCur->pNext = NULL; pCur->pfnCallback = NULL; RTMemFree(pCur); return VINF_SUCCESS; }
RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx) { PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx; if (pThis == NIL_RTSEMFASTMUTEX) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD); Assert(g_pDarwinLockGroup); lck_mtx_free(pThis->pMtx, g_pDarwinLockGroup); pThis->pMtx = NULL; RTMemFree(pThis); return VINF_SUCCESS; }
RTDECL(int) RTTimerStop(PRTTIMER pTimer) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (pTimer->fSuspended) return VERR_TIMER_SUSPENDED; /* Trying the cpu_lock stuff and calling cyclic_remove may deadlock the system, so just mark the timer as suspened and deal with it in the callback wrapper function above. */ if (rtTimerSolIsCallingFromTimerProc(pTimer)) pTimer->fSuspendedFromTimer = true; else rtTimerSolStopIt(pTimer); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPT_CPUID_VAR(); RT_ASSERT_INTS_ON(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); ASMAtomicAndU32(&pThis->fStateAndGen, ~RTSEMEVENTMULTIDARWIN_STATE_MASK); lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTSpinlockDestroy(RTSPINLOCK Spinlock) { /* * Validate input. */ RT_ASSERT_INTS_ON(); PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; if (!pThis) return VERR_INVALID_PARAMETER; AssertMsgReturn(pThis->u32Magic == RTSPINLOCK_MAGIC, ("Invalid spinlock %p magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER); /* * Make the lock invalid and release the memory. */ ASMAtomicIncU32(&pThis->u32Magic); RTMemFree(pThis); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); AssertMsgReturn(pThis->cRefs > 0, ("pThis=%p cRefs=%d\n", pThis, pThis->cRefs), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); mutex_enter(&pThis->Mtx); /* Invalidate the handle and wake up all threads that might be waiting on the semaphore. */ Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC); ASMAtomicWriteU32(&pThis->u32Magic, RTSEMEVENTMULTI_MAGIC_DEAD); ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTISOL_GEN_MASK); cv_broadcast(&pThis->Cnd); /* Drop the reference from RTSemEventMultiCreateEx. */ mutex_exit(&pThis->Mtx); rtR0SemEventMultiSolRelease(pThis); return VINF_SUCCESS; }
RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; RT_ASSERT_INTS_ON(); Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = NIL_RTCPUID; Args.cHits = 0; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); RTSOLCPUSET CpuSet; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[i] = (ulong_t)-1L; rtMpSolCrossCall(&CpuSet, rtMpSolOnAllCpuWrapper, &Args); RTThreadPreemptRestore(&PreemptState); return VINF_SUCCESS; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); /* * It's not possible to restart a one-shot time from it's callback function, * at least not at the moment. */ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT); mutex_enter(&cpu_lock); /* * Make sure it's not active already. If it was suspended from a timer * callback function, we need to do some cleanup work here before we can * restart the timer. */ if (!pTimer->fSuspended) { if (!pTimer->fSuspendedFromTimer) { mutex_exit(&cpu_lock); return VERR_TIMER_ACTIVE; } cyclic_remove(pTimer->hCyclicId); pTimer->hCyclicId = CYCLIC_NONE; } pTimer->fSuspended = false; pTimer->fSuspendedFromTimer = false; pTimer->fIntervalChanged = false; if (pTimer->fAllCpus) { /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->u.Omni.u64When = RTTimeSystemNanoTS() + (u64First ? u64First : pTimer->cNsInterval); cyc_omni_handler_t HandlerOmni; HandlerOmni.cyo_online = rtTimerSolOmniCpuOnline; HandlerOmni.cyo_offline = NULL; HandlerOmni.cyo_arg = pTimer; pTimer->hCyclicId = cyclic_add_omni(&HandlerOmni); } else { cyc_handler_t Handler; cyc_time_t FireTime; /* * Setup a single CPU timer. If a specific CPU was requested, it * must be online or the timer cannot start. */ if ( pTimer->fSpecificCpu && !RTMpIsCpuOnline(pTimer->iCpu)) /* ASSUMES: index == cpuid */ { pTimer->fSuspended = true; mutex_exit(&cpu_lock); return VERR_CPU_OFFLINE; } Handler.cyh_func = (cyc_func_t)rtTimerSolSingleCallbackWrapper; Handler.cyh_arg = pTimer; Handler.cyh_level = CY_LOCK_LEVEL; /* * Use a large interval (1 hour) so that we don't get a timer-callback between * cyclic_add() and cyclic_bind(). Program the correct interval once cyclic_bind() is done. * See @bugref{7691#c20}. */ if (!pTimer->fSpecificCpu) FireTime.cyt_when = RTTimeSystemNanoTS() + u64First; else FireTime.cyt_when = RTTimeSystemNanoTS() + u64First + RT_NS_1HOUR; FireTime.cyt_interval = pTimer->cNsInterval != 0 ? pTimer->cNsInterval : CY_INFINITY /* Special value, see cyclic_fire(). */; pTimer->u.Single.u64Tick = 0; pTimer->u.Single.nsNextTick = 0; pTimer->hCyclicId = cyclic_add(&Handler, &FireTime); if (pTimer->fSpecificCpu) { cyclic_bind(pTimer->hCyclicId, cpu[pTimer->iCpu], NULL /* cpupart */); cyclic_reprogram(pTimer->hCyclicId, RTTimeSystemNanoTS() + u64First); } } mutex_exit(&cpu_lock); return VINF_SUCCESS; }
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType) { /* * Convert the priority type to scheduling policies. * (This is really just guess work.) */ bool fSetExtended = false; thread_extended_policy Extended = { true }; bool fSetTimeContstraint = false; thread_time_constraint_policy TimeConstraint = { 0, 0, 0, true }; thread_precedence_policy Precedence = { 0 }; switch (enmType) { case RTTHREADTYPE_INFREQUENT_POLLER: Precedence.importance = 1; break; case RTTHREADTYPE_EMULATION: Precedence.importance = 30; break; case RTTHREADTYPE_DEFAULT: Precedence.importance = 31; break; case RTTHREADTYPE_MSG_PUMP: Precedence.importance = 34; break; case RTTHREADTYPE_IO: Precedence.importance = 98; break; case RTTHREADTYPE_TIMER: Precedence.importance = 0x7fffffff; fSetExtended = true; Extended.timeshare = FALSE; fSetTimeContstraint = true; TimeConstraint.period = 0; /* not really true for a real timer thread, but we've really no idea. */ TimeConstraint.computation = rtDarwinAbsTimeFromNano(100000); /* 100 us*/ TimeConstraint.constraint = rtDarwinAbsTimeFromNano(500000); /* 500 us */ TimeConstraint.preemptible = FALSE; break; default: AssertMsgFailed(("enmType=%d\n", enmType)); return VERR_INVALID_PARAMETER; } RT_ASSERT_INTS_ON(); /* * Do the actual modification. */ kern_return_t kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_PRECEDENCE_POLICY, (thread_policy_t)&Precedence, THREAD_PRECEDENCE_POLICY_COUNT); AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); NOREF(kr); if (fSetExtended) { kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_EXTENDED_POLICY, (thread_policy_t)&Extended, THREAD_EXTENDED_POLICY_COUNT); AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); } if (fSetTimeContstraint) { kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t)&TimeConstraint, THREAD_TIME_CONSTRAINT_POLICY_COUNT); AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); } return VINF_SUCCESS; /* ignore any errors for now */ }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; /* One-shot timers are not supported by the cyclic system. */ if (pTimer->interval == 0) return VERR_NOT_SUPPORTED; pTimer->fSuspended = false; if (pTimer->fAllCpu) { PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL)); if (RT_UNLIKELY(!pOmniTimer)) return VERR_NO_MEMORY; pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t)); if (RT_UNLIKELY(!pOmniTimer->au64Ticks)) { RTMemFree(pOmniTimer); return VERR_NO_MEMORY; } /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->pOmniTimer = pOmniTimer; pOmniTimer->u64When = pTimer->interval + RTTimeNanoTS(); cyc_omni_handler_t hOmni; hOmni.cyo_online = rtTimerSolOmniCpuOnline; hOmni.cyo_offline = NULL; hOmni.cyo_arg = pTimer; mutex_enter(&cpu_lock); pTimer->hCyclicId = cyclic_add_omni(&hOmni); mutex_exit(&cpu_lock); } else { int iCpu = SOL_TIMER_ANY_CPU; if (pTimer->fSpecificCpu) { iCpu = pTimer->iCpu; if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */ return VERR_CPU_OFFLINE; } PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL)); if (RT_UNLIKELY(!pSingleTimer)) return VERR_NO_MEMORY; pTimer->pSingleTimer = pSingleTimer; pSingleTimer->hHandler.cyh_func = rtTimerSolCallbackWrapper; pSingleTimer->hHandler.cyh_arg = pTimer; pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL; mutex_enter(&cpu_lock); if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu])) { mutex_exit(&cpu_lock); RTMemFree(pSingleTimer); pTimer->pSingleTimer = NULL; return VERR_CPU_OFFLINE; } pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS(); if (pTimer->interval == 0) { /** @todo use gethrtime_max instead of LLONG_MAX? */ AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long)); pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when; } else pSingleTimer->hFireTime.cyt_interval = pTimer->interval; pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime); if (iCpu != SOL_TIMER_ANY_CPU) cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */); mutex_exit(&cpu_lock); } return VINF_SUCCESS; }