RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { RT_ASSERT_PREEMPTIBLE(); *ppTimer = NULL; /* * Validate flags. */ if (!RTTIMER_FLAGS_ARE_VALID(fFlags)) return VERR_INVALID_PARAMETER; if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK))) return VERR_CPU_NOT_FOUND; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL && u64NanoInterval == 0) return VERR_NOT_SUPPORTED; /* * Allocate and initialize the timer handle. */ PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer)); if (!pTimer) return VERR_NO_MEMORY; pTimer->u32Magic = RTTIMER_MAGIC; pTimer->fSuspended = true; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL) { pTimer->fAllCpu = true; pTimer->fSpecificCpu = false; pTimer->iCpu = 255; } else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) { pTimer->fAllCpu = false; pTimer->fSpecificCpu = true; pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */ } else { pTimer->fAllCpu = false; pTimer->fSpecificCpu = false; pTimer->iCpu = 255; } pTimer->interval = u64NanoInterval; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->pSingleTimer = NULL; pTimer->pOmniTimer = NULL; pTimer->hCyclicId = CYCLIC_NONE; *ppTimer = pTimer; return VINF_SUCCESS; }
static void doTest(RTTEST hTest) { NOREF(hTest); uint32_t iAllocCpu = 0; while (iAllocCpu < RTCPUSET_MAX_CPUS) { const uint32_t cbTestSet = _1M * 32; const uint32_t cIterations = 384; /* * Change CPU and allocate a chunk of memory. */ RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAllocCpu))); void *pvTest = RTMemPageAlloc(cbTestSet); /* may be leaked, who cares */ RTTESTI_CHECK_RETV(pvTest != NULL); memset(pvTest, 0xef, cbTestSet); /* * Do the tests. */ uint32_t iAccessCpu = 0; while (iAccessCpu < RTCPUSET_MAX_CPUS) { RTTESTI_CHECK_RC_OK_RETV(RTThreadSetAffinityToCpu(RTMpCpuIdFromSetIndex(iAccessCpu))); /* * The write test. */ RTTimeNanoTS(); RTThreadYield(); uint64_t u64StartTS = RTTimeNanoTS(); for (uint32_t i = 0; i < cIterations; i++) { ASMCompilerBarrier(); /* paranoia */ memset(pvTest, i, cbTestSet); } uint64_t const cNsElapsedWrite = RTTimeNanoTS() - u64StartTS; uint64_t cMBPerSec = (uint64_t)( ((uint64_t)cIterations * cbTestSet) /* bytes */ / ((long double)cNsElapsedWrite / RT_NS_1SEC_64) /* seconds */ / _1M /* MB */ ); RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-write", iAllocCpu, iAccessCpu); /* * The read test. */ memset(pvTest, 0, cbTestSet); RTTimeNanoTS(); RTThreadYield(); u64StartTS = RTTimeNanoTS(); for (uint32_t i = 0; i < cIterations; i++) { #if 1 size_t register u = 0; size_t volatile *puCur = (size_t volatile *)pvTest; size_t volatile *puEnd = puCur + cbTestSet / sizeof(size_t); while (puCur != puEnd) u += *puCur++; #else ASMCompilerBarrier(); /* paranoia */ void *pvFound = memchr(pvTest, (i & 127) + 1, cbTestSet); RTTESTI_CHECK(pvFound == NULL); #endif } uint64_t const cNsElapsedRead = RTTimeNanoTS() - u64StartTS; cMBPerSec = (uint64_t)( ((uint64_t)cIterations * cbTestSet) /* bytes */ / ((long double)cNsElapsedRead / RT_NS_1SEC_64) /* seconds */ / _1M /* MB */ ); RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read", iAllocCpu, iAccessCpu); /* * The read/write test. */ RTTimeNanoTS(); RTThreadYield(); u64StartTS = RTTimeNanoTS(); for (uint32_t i = 0; i < cIterations; i++) { ASMCompilerBarrier(); /* paranoia */ memcpy(pvTest, (uint8_t *)pvTest + cbTestSet / 2, cbTestSet / 2); } uint64_t const cNsElapsedRW = RTTimeNanoTS() - u64StartTS; cMBPerSec = (uint64_t)( ((uint64_t)cIterations * cbTestSet) /* bytes */ / ((long double)cNsElapsedRW / RT_NS_1SEC_64) /* seconds */ / _1M /* MB */ ); RTTestIValueF(cMBPerSec, RTTESTUNIT_MEGABYTES_PER_SEC, "cpu%02u-mem%02u-read-write", iAllocCpu, iAccessCpu); /* * Total time. */ RTTestIValueF(cNsElapsedRead + cNsElapsedWrite + cNsElapsedRW, RTTESTUNIT_NS, "cpu%02u-mem%02u-time", iAllocCpu, iAccessCpu); /* advance */ iAccessCpu = getNextCpu(iAccessCpu); } /* * Clean up and advance to the next CPU. */ RTMemPageFree(pvTest, cbTestSet); iAllocCpu = getNextCpu(iAllocCpu); } }
int main() { RTTEST hTest; RTEXITCODE rcExit = RTTestInitAndCreate("tstRTMp-1", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); /* * Present and possible CPUs. */ RTCPUID cCpus = RTMpGetCount(); if (cCpus > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetCount -> %u\n", cCpus); else { RTTestIFailed("RTMpGetCount returned zero"); cCpus = 1; } RTCPUID cCoreCpus = RTMpGetCoreCount(); if (cCoreCpus > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetCoreCount -> %d\n", (int)cCoreCpus); else { RTTestIFailed("RTMpGetCoreCount returned zero"); cCoreCpus = 1; } RTTESTI_CHECK(cCoreCpus <= cCpus); RTCPUSET Set; PRTCPUSET pSet = RTMpGetSet(&Set); RTTESTI_CHECK(pSet == &Set); if (pSet == &Set) { RTTESTI_CHECK((RTCPUID)RTCpuSetCount(&Set) == cCpus); RTTestIPrintf(RTTESTLVL_ALWAYS, "Possible CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); if (RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu)); if (RTMpIsCpuPresent(idCpu)) RTTestIPrintf(RTTESTLVL_ALWAYS, RTMpIsCpuOnline(idCpu) ? " online\n" : " offline\n"); else { if (!RTMpIsCpuOnline(idCpu)) RTTestIPrintf(RTTESTLVL_ALWAYS, " absent\n"); else { RTTestIPrintf(RTTESTLVL_ALWAYS, " online but absent!\n"); RTTestIFailed("Cpu with index %d is report as !RTIsCpuPresent while RTIsCpuOnline returns true!\n", iCpu); } } if (!RTMpIsCpuPossible(idCpu)) RTTestIFailed("Cpu with index %d is returned by RTCpuSet but not RTMpIsCpuPossible!\n", iCpu); } else if (RTMpIsCpuPossible(idCpu)) RTTestIFailed("Cpu with index %d is returned by RTMpIsCpuPossible but not RTCpuSet!\n", iCpu); else if (RTMpGetCurFrequency(idCpu) != 0) RTTestIFailed("RTMpGetCurFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); else if (RTMpGetMaxFrequency(idCpu) != 0) RTTestIFailed("RTMpGetMaxFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); } } else { RTCpuSetEmpty(&Set); RTCpuSetAdd(&Set, RTMpCpuIdFromSetIndex(0)); } /* * Online CPUs. */ RTCPUID cCpusOnline = RTMpGetOnlineCount(); if (cCpusOnline > 0) { if (cCpusOnline <= cCpus) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); else { RTTestIFailed("RTMpGetOnlineCount -> %d, expected <= %d\n", (int)cCpusOnline, (int)cCpus); cCpusOnline = cCpus; } } else { RTTestIFailed("RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); cCpusOnline = 1; } RTCPUID cCoresOnline = RTMpGetOnlineCoreCount(); if (cCoresOnline > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetOnlineCoreCount -> %d\n", (int)cCoresOnline); else { RTTestIFailed("RTMpGetOnlineCoreCount -> %d, expected <= %d\n", (int)cCoresOnline, (int)cCpusOnline); cCoresOnline = 1; } RTTESTI_CHECK(cCoresOnline <= cCpusOnline); RTCPUSET SetOnline; pSet = RTMpGetOnlineSet(&SetOnline); if (pSet == &SetOnline) { if (RTCpuSetCount(&SetOnline) <= 0) RTTestIFailed("RTMpGetOnlineSet returned an empty set!\n"); else if ((RTCPUID)RTCpuSetCount(&SetOnline) > cCpus) RTTestIFailed("RTMpGetOnlineSet returned a too high value; %d, expected <= %d\n", RTCpuSetCount(&SetOnline), cCpus); RTTestIPrintf(RTTESTLVL_ALWAYS, "Online CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetOnline, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuOnline(idCpu) ? "online" : "offline"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) RTTestIFailed("online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); } /* There isn't any sane way of testing RTMpIsCpuOnline really... :-/ */ } else RTTestIFailed("RTMpGetOnlineSet -> %p, expected %p\n", pSet, &Set); /* * Present CPUs. */ RTCPUID cCpusPresent = RTMpGetPresentCount(); if (cCpusPresent > 0) { if ( cCpusPresent <= cCpus && cCpusPresent >= cCpusOnline) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetPresentCount -> %d\n", (int)cCpusPresent); else RTTestIFailed("RTMpGetPresentCount -> %d, expected <= %d and >= %d\n", (int)cCpusPresent, (int)cCpus, (int)cCpusOnline); } else { RTTestIFailed("RTMpGetPresentCount -> %d\n", (int)cCpusPresent); cCpusPresent = 1; } RTCPUSET SetPresent; pSet = RTMpGetPresentSet(&SetPresent); if (pSet == &SetPresent) { if (RTCpuSetCount(&SetPresent) <= 0) RTTestIFailed("RTMpGetPresentSet returned an empty set!\n"); else if ((RTCPUID)RTCpuSetCount(&SetPresent) != cCpusPresent) RTTestIFailed("RTMpGetPresentSet returned a bad value; %d, expected = %d\n", RTCpuSetCount(&SetPresent), cCpusPresent); RTTestIPrintf(RTTESTLVL_ALWAYS, "Present CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetPresent, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuPresent(idCpu) ? "present" : "absent"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) RTTestIFailed("online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); } /* There isn't any sane way of testing RTMpIsCpuPresent really... :-/ */ } else RTTestIFailed("RTMpGetPresentSet -> %p, expected %p\n", pSet, &Set); /* Find an online cpu for the next test. */ RTCPUID idCpuOnline; for (idCpuOnline = 0; idCpuOnline < RTCPUSET_MAX_CPUS; idCpuOnline++) if (RTMpIsCpuOnline(idCpuOnline)) break; /* * Quick test of RTMpGetDescription. */ char szBuf[64]; int rc = RTMpGetDescription(idCpuOnline, &szBuf[0], sizeof(szBuf)); if (RT_SUCCESS(rc)) { RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetDescription -> '%s'\n", szBuf); size_t cch = strlen(szBuf); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch); if (rc != VERR_BUFFER_OVERFLOW) RTTestIFailed("RTMpGetDescription -> %Rrc, expected VERR_BUFFER_OVERFLOW\n", rc); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch + 1); if (RT_FAILURE(rc)) RTTestIFailed("RTMpGetDescription -> %Rrc, expected VINF_SUCCESS\n", rc); } else RTTestIFailed("RTMpGetDescription -> %Rrc\n", rc); return RTTestSummaryAndDestroy(hTest); }
int main() { RTR3InitExeNoArguments(0); RTPrintf("tstMp-1: TESTING...\n"); /* * Present and possible CPUs. */ RTCPUID cCpus = RTMpGetCount(); if (cCpus > 0) RTPrintf("tstMp-1: RTMpGetCount -> %d\n", (int)cCpus); else { RTPrintf("tstMp-1: FAILURE: RTMpGetCount -> %d\n", (int)cCpus); g_cErrors++; cCpus = 1; } RTCPUSET Set; PRTCPUSET pSet = RTMpGetSet(&Set); if (pSet == &Set) { if ((RTCPUID)RTCpuSetCount(&Set) != cCpus) { RTPrintf("tstMp-1: FAILURE: RTMpGetSet returned a set with a different cpu count; %d, expected %d\n", RTCpuSetCount(&Set), cCpus); g_cErrors++; } RTPrintf("tstMp-1: Possible CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); if (RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu)); if (RTMpIsCpuPresent(idCpu)) RTPrintf(RTMpIsCpuOnline(idCpu) ? " online\n" : " offline\n"); else { if (!RTMpIsCpuOnline(idCpu)) RTPrintf(" absent\n"); else { RTPrintf(" online but absent!\n"); RTPrintf("tstMp-1: FAILURE: Cpu with index %d is report as !RTIsCpuPresent while RTIsCpuOnline returns true!\n", iCpu); g_cErrors++; } } if (!RTMpIsCpuPossible(idCpu)) { RTPrintf("tstMp-1: FAILURE: Cpu with index %d is returned by RTCpuSet but not RTMpIsCpuPossible!\n", iCpu); g_cErrors++; } } else if (RTMpIsCpuPossible(idCpu)) { RTPrintf("tstMp-1: FAILURE: Cpu with index %d is returned by RTMpIsCpuPossible but not RTCpuSet!\n", iCpu); g_cErrors++; } else if (RTMpGetCurFrequency(idCpu) != 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetCurFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); g_cErrors++; } else if (RTMpGetMaxFrequency(idCpu) != 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetMaxFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); g_cErrors++; } } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; RTCpuSetEmpty(&Set); RTCpuSetAdd(&Set, RTMpCpuIdFromSetIndex(0)); } /* * Online CPUs. */ RTCPUID cCpusOnline = RTMpGetOnlineCount(); if (cCpusOnline > 0) { if (cCpusOnline <= cCpus) RTPrintf("tstMp-1: RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineCount -> %d, expected <= %d\n", (int)cCpusOnline, (int)cCpus); g_cErrors++; cCpusOnline = cCpus; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); g_cErrors++; cCpusOnline = 1; } RTCPUSET SetOnline; pSet = RTMpGetOnlineSet(&SetOnline); if (pSet == &SetOnline) { if (RTCpuSetCount(&SetOnline) <= 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet returned an empty set!\n"); g_cErrors++; } else if ((RTCPUID)RTCpuSetCount(&SetOnline) > cCpus) { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet returned a too high value; %d, expected <= %d\n", RTCpuSetCount(&SetOnline), cCpus); g_cErrors++; } RTPrintf("tstMp-1: Online CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetOnline, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuOnline(idCpu) ? "online" : "offline"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: FAILURE: online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); g_cErrors++; } } /* There isn't any sane way of testing RTMpIsCpuOnline really... :-/ */ } else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; } /* * Present CPUs. */ RTCPUID cCpusPresent = RTMpGetPresentCount(); if (cCpusPresent > 0) { if ( cCpusPresent <= cCpus && cCpusPresent >= cCpusOnline) RTPrintf("tstMp-1: RTMpGetPresentCount -> %d\n", (int)cCpusPresent); else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentCount -> %d, expected <= %d and >= %d\n", (int)cCpusPresent, (int)cCpus, (int)cCpusOnline); g_cErrors++; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentCount -> %d\n", (int)cCpusPresent); g_cErrors++; cCpusPresent = 1; } RTCPUSET SetPresent; pSet = RTMpGetPresentSet(&SetPresent); if (pSet == &SetPresent) { if (RTCpuSetCount(&SetPresent) <= 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet returned an empty set!\n"); g_cErrors++; } else if ((RTCPUID)RTCpuSetCount(&SetPresent) != cCpusPresent) { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet returned a bad value; %d, expected = %d\n", RTCpuSetCount(&SetPresent), cCpusPresent); g_cErrors++; } RTPrintf("tstMp-1: Present CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetPresent, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuPresent(idCpu) ? "present" : "absent"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: FAILURE: online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); g_cErrors++; } } /* There isn't any sane way of testing RTMpIsCpuPresent really... :-/ */ } else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; } /* Find an online cpu for the next test. */ RTCPUID idCpuOnline; for (idCpuOnline = 0; idCpuOnline < RTCPUSET_MAX_CPUS; idCpuOnline++) if (RTMpIsCpuOnline(idCpuOnline)) break; /* * Quick test of RTMpGetDescription. */ char szBuf[64]; int rc = RTMpGetDescription(idCpuOnline, &szBuf[0], sizeof(szBuf)); if (RT_SUCCESS(rc)) { RTPrintf("tstMp-1: RTMpGetDescription -> '%s'\n", szBuf); size_t cch = strlen(szBuf); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch); if (rc != VERR_BUFFER_OVERFLOW) { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc, expected VERR_BUFFER_OVERFLOW\n", rc); g_cErrors++; } rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch + 1); if (RT_FAILURE(rc)) { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc, expected VINF_SUCCESS\n", rc); g_cErrors++; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc\n", rc); g_cErrors++; } if (!g_cErrors) RTPrintf("tstMp-1: SUCCESS\n", g_cErrors); else RTPrintf("tstMp-1: FAILURE - %d errors\n", g_cErrors); return !!g_cErrors; }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { *ppTimer = NULL; /* * Validate flags. */ if (!RTTIMER_FLAGS_ARE_VALID(fFlags)) return VERR_INVALID_PARAMETER; if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK))) return VERR_CPU_NOT_FOUND; /* * Allocate the timer handler. */ RTCPUID cSubTimers = 1; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL) { cSubTimers = RTMpGetMaxCpuId() + 1; Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */ } PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cSubTimers])); if (!pTimer) return VERR_NO_MEMORY; /* * Initialize it. */ pTimer->u32Magic = RTTIMER_MAGIC; pTimer->fSuspended = true; pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL; pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL; pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID; pTimer->cSubTimers = cSubTimers; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->u64NanoInterval = u64NanoInterval; KeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer); if (pTimer->fOmniTimer) { /* * Initialize the per-cpu "sub-timers", select the first online cpu * to be the master. * ASSUMES that no cpus will ever go offline. */ pTimer->idCpu = NIL_RTCPUID; for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++) { pTimer->aSubTimers[iCpu].iTick = 0; pTimer->aSubTimers[iCpu].pParent = pTimer; if ( pTimer->idCpu == NIL_RTCPUID && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu))) { pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu); KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]); } else KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]); KeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance); KeSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, (int)RTMpCpuIdFromSetIndex(iCpu)); } Assert(pTimer->idCpu != NIL_RTCPUID); } else { /* * Initialize the first "sub-timer", target the DPC on a specific processor * if requested to do so. */ pTimer->aSubTimers[0].iTick = 0; pTimer->aSubTimers[0].pParent = pTimer; KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer); KeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance); if (pTimer->fSpecificCpu) KeSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu); } *ppTimer = pTimer; return VINF_SUCCESS; }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { RT_ASSERT_PREEMPTIBLE(); *ppTimer = NULL; /* * Validate flags. */ if (!RTTIMER_FLAGS_ARE_VALID(fFlags)) return VERR_INVALID_PARAMETER; if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK))) return VERR_CPU_NOT_FOUND; /* One-shot omni timers are not supported by the cyclic system. */ if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL && u64NanoInterval == 0) return VERR_NOT_SUPPORTED; /* * Allocate and initialize the timer handle. The omni variant has a * variable sized array of ticks counts, thus the size calculation. */ PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL ? RT_UOFFSETOF_DYN(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()]) : sizeof(RTTIMER)); if (!pTimer) return VERR_NO_MEMORY; pTimer->u32Magic = RTTIMER_MAGIC; pTimer->cRefs = 1; pTimer->fSuspended = true; pTimer->fSuspendedFromTimer = false; pTimer->fIntervalChanged = false; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL) { pTimer->fAllCpus = true; pTimer->fSpecificCpu = false; pTimer->iCpu = UINT32_MAX; } else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) { pTimer->fAllCpus = false; pTimer->fSpecificCpu = true; pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */ } else { pTimer->fAllCpus = false; pTimer->fSpecificCpu = false; pTimer->iCpu = UINT32_MAX; } pTimer->cNsInterval = u64NanoInterval; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->hCyclicId = CYCLIC_NONE; *ppTimer = pTimer; return VINF_SUCCESS; }