int rtPathFromNative(const char **ppszPath, const char *pszNativePath, const char *pszBasePath) { *ppszPath = NULL; int rc = RTOnce(&g_OnceInitPathConv, rtPathConvInitOnce, NULL); if (RT_SUCCESS(rc)) { if (g_fPassthruUtf8 || !*pszNativePath) { size_t cCpsIgnored; size_t cchNativePath; rc = rtUtf8Length(pszNativePath, RTSTR_MAX, &cCpsIgnored, &cchNativePath); if (RT_SUCCESS(rc)) { char *pszPath; *ppszPath = pszPath = RTStrAlloc(cchNativePath + 1); if (pszPath) memcpy(pszPath, pszNativePath, cchNativePath + 1); else rc = VERR_NO_STR_MEMORY; } } else rc = rtStrConvert(pszNativePath, strlen(pszNativePath), g_szFsCodeset, (char **)ppszPath, 0, "UTF-8", 2, g_enmFsToUtf8Idx); } NOREF(pszBasePath); /* We don't query the FS for codeset preferences. */ return rc; }
RTDECL(RTCPUID) RTMpGetCoreCount(void) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); return g_cRtMpWinMaxCpuCores; }
RTDECL(uint32_t) RTMpGetMaxCpuGroupCount(void) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); return g_cRtMpWinMaxCpuGroups; }
RTDECL(RTCPUID) RTMpCpuId(void) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); PROCESSOR_NUMBER ProcNum; ProcNum.Group = 0; ProcNum.Number = 0xff; if (g_pfnGetCurrentProcessorNumberEx) g_pfnGetCurrentProcessorNumberEx(&ProcNum); else if (g_pfnGetCurrentProcessorNumber) { DWORD iCpu = g_pfnGetCurrentProcessorNumber(); Assert(iCpu < g_cRtMpWinMaxCpus); ProcNum.Number = iCpu; } else { #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) ProcNum.Number = ASMGetApicId(); #else # error "Not ported to this architecture." return NIL_RTCPUID; #endif } #ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER return RTMPCPUID_FROM_GROUP_AND_NUMBER(ProcNum.Group, ProcNum.Number); #else return RTMpSetIndexFromCpuGroupMember(ProcNum.Group, ProcNum.Number); #endif }
RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); if ((unsigned)iCpu < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx)) { RTCPUID idCpu = g_aidRtMpWinByCpuSetIdx[iCpu]; #if defined(IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER) && defined(RT_STRICT) /* Check the correctness of the mapping table. */ RTCPUID idCpuGip = NIL_RTCPUID; if ( pGip && (unsigned)iCpu < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx)) { unsigned idxSupCpu = pGip->aiCpuFromCpuSetIdx[idxGuess]; if (idxSupCpu < pGip->cCpus) if (pGip->aCPUs[idxSupCpu].enmState != SUPGIPCPUSTATE_INVALID) idCpuGip = pGip->aCPUs[idxSupCpu].idCpu; } AssertMsg(idCpu == idCpuGip, ("table:%#x gip:%#x\n", idCpu, idCpuGip)); #endif return idCpu; } return NIL_RTCPUID; }
RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); /* Any CPU between 0 and g_cRtMpWinMaxCpus are possible. */ return idCpu < g_cRtMpWinMaxCpus; }
RTDECL(int) RTMpSetIndexFromCpuGroupMember(uint32_t idxGroup, uint32_t idxMember) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); if (idxGroup < g_cRtMpWinMaxCpuGroups) if (idxMember < g_aRtMpWinCpuGroups[idxGroup].cMaxCpus) return g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember]; return -1; }
bool RTCALL VBoxOglIsOfflineRenderingAppropriate(void) { /* In order to do not slowdown 3D engine which can ask about offline rendering several times, let's cache the result and assume that renderers amount value is constant. Use the IPRT execute once construct to make sure there aren't any threading issues. */ static RTONCE s_Once = RTONCE_INITIALIZER; static bool s_fCached = false; int rc = RTOnce(&s_Once, vboxOglIsOfflineRenderingAppropriateOnce, &s_fCached); AssertRC(rc); return s_fCached; }
RTDECL(RTCPUID) RTMpGetMaxCpuId(void) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); #ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER return RTMPCPUID_FROM_GROUP_AND_NUMBER(g_cRtMpWinMaxCpuGroups - 1, g_aRtMpWinCpuGroups[g_cRtMpWinMaxCpuGroups - 1].cMaxCpus - 1); #else return g_cRtMpWinMaxCpus - 1; #endif }
RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); #ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER if (idCpu != NIL_RTCPUID) return RTMpSetIndexFromCpuGroupMember(rtMpCpuIdGetGroup(idCpu), rtMpCpuIdGetGroupMember(idCpu)); return -1; #else /* 1:1 mapping, just do range checking. */ return idCpu < g_cRtMpWinMaxCpus ? idCpu : -1; #endif }
RTDECL(uint32_t) RTMpGetCpuGroupCounts(uint32_t idxGroup, uint32_t *pcActive) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); RTMPWIN_UPDATE_GIP_GLOBALS(); if (idxGroup < g_cRtMpWinMaxCpuGroups) { if (pcActive) *pcActive = g_aRtMpWinCpuGroups[idxGroup].cActiveCpus; return g_aRtMpWinCpuGroups[idxGroup].cMaxCpus; } if (pcActive) *pcActive = 0; return 0; }
int rtPathFromNativeDup(char **ppszPath, const char *pszNativePath, const char *pszBasePath) { int rc = RTOnce(&g_OnceInitPathConv, rtPathConvInitOnce, NULL); if (RT_SUCCESS(rc)) { if (g_fPassthruUtf8 || !*pszNativePath) rc = RTStrDupEx(ppszPath, pszNativePath); else rc = rtStrConvert(pszNativePath, strlen(pszNativePath), g_szFsCodeset, ppszPath, 0, "UTF-8", 2, g_enmFsToUtf8Idx); } NOREF(pszBasePath); /* We don't query the FS for codeset preferences. */ return rc; }
int rtPathToNative(char const **ppszNativePath, const char *pszPath, const char *pszBasePath) { *ppszNativePath = NULL; int rc = RTOnce(&g_OnceInitPathConv, rtPathConvInitOnce, NULL); if (RT_SUCCESS(rc)) { if (g_fPassthruUtf8 || !*pszPath) *ppszNativePath = pszPath; else rc = rtStrConvert(pszPath, strlen(pszPath), "UTF-8", (char **)ppszNativePath, 0, g_szFsCodeset, 2, g_enmUtf8ToFsIdx); } NOREF(pszBasePath); /* We don't query the FS for codeset preferences. */ return rc; }
/** * Worker for RTMpGetCurFrequency and RTMpGetMaxFrequency. * * @returns The desired frequency on success, 0 on failure. * * @param idCpu The CPU ID. * @param pszStatName The cpu_info stat name. */ static uint64_t rtMpSolarisGetFrequency(RTCPUID idCpu, char *pszStatName) { uint64_t u64 = 0; int rc = RTOnce(&g_MpSolarisOnce, rtMpSolarisOnce, NULL, NULL); if (RT_SUCCESS(rc)) { if ( idCpu < g_capCpuInfo && g_papCpuInfo[idCpu]) { rc = RTCritSectEnter(&g_MpSolarisCritSect); AssertRC(rc); if (RT_SUCCESS(rc)) { if (kstat_read(g_pKsCtl, g_papCpuInfo[idCpu], 0) != -1) { kstat_named_t *pStat = (kstat_named_t *)kstat_data_lookup(g_papCpuInfo[idCpu], pszStatName); if (pStat) { Assert(pStat->data_type == KSTAT_DATA_UINT64 || pStat->data_type == KSTAT_DATA_LONG); switch (pStat->data_type) { case KSTAT_DATA_UINT64: u64 = pStat->value.ui64; break; /* current_clock_Hz */ case KSTAT_DATA_INT32: u64 = pStat->value.i32; break; /* clock_MHz */ /* just in case... */ case KSTAT_DATA_UINT32: u64 = pStat->value.ui32; break; case KSTAT_DATA_INT64: u64 = pStat->value.i64; break; default: AssertMsgFailed(("%d\n", pStat->data_type)); break; } } else Log(("kstat_data_lookup(%s) -> %d\n", pszStatName, errno)); } else Log(("kstat_read() -> %d\n", errno)); RTCritSectLeave(&g_MpSolarisCritSect); } } else Log(("invalid idCpu: %d (g_capCpuInfo=%d)\n", (int)idCpu, (int)g_capCpuInfo)); } return u64; }
/** * @callback_method_impl{FNPDMDEVREQHANDLERR0} */ PDMBOTHCBDECL(int) devR0SmcReqHandler(PPDMDEVINS pDevIns, uint32_t uOperation, uint64_t u64Arg) { PDEVSMC pThis = PDMINS_2_DATA(pDevIns, PDEVSMC); int rc = VERR_INVALID_FUNCTION; if (uOperation == SMC_CALLR0_READ_OSK) { rc = RTOnce(&g_SmcR0Once, devR0SmcInitOnce, NULL); if ( RT_SUCCESS(rc) && g_fHaveOsk) { AssertCompile(sizeof(g_abOsk0And1) + 1 == sizeof(pThis->szOsk0And1)); memcpy(pThis->szOsk0And1, g_abOsk0And1, sizeof(pThis->szOsk0And1) - 1); pThis->szOsk0And1[sizeof(pThis->szOsk0And1) - 1] = '\0'; } } return rc; }
int rtPathFromNativeCopy(char *pszPath, size_t cbPath, const char *pszNativePath, const char *pszBasePath) { int rc = RTOnce(&g_OnceInitPathConv, rtPathConvInitOnce, NULL); if (RT_SUCCESS(rc)) { if (g_fPassthruUtf8 || !*pszNativePath) rc = RTStrCopy(pszPath, cbPath, pszNativePath); else if (cbPath) rc = rtStrConvert(pszNativePath, strlen(pszNativePath), g_szFsCodeset, &pszPath, cbPath, "UTF-8", 2, g_enmFsToUtf8Idx); else rc = VERR_BUFFER_OVERFLOW; } NOREF(pszBasePath); /* We don't query the FS for codeset preferences. */ return rc; }
static DECLCALLBACK(int) Once2Thread(RTTHREAD hThread, void *pvUser) { NOREF(hThread); NOREF(pvUser); int rc = RTSemEventMultiWait(g_hEventMulti, RT_INDEFINITE_WAIT); if (RT_FAILURE(rc)) return rc; rc = RTOnce(&g_Once2, Once2CB, (void *)42); if (RT_SUCCESS(rc)) { if (!ASMAtomicUoReadBool(&g_fOnce2Ready)) { RTPrintf("tstOnce: ERROR - Once2CB: Not initialized!\n"); g_cErrors++; } } return rc; }
static struct VBEGLTLS *getTls(void) { struct VBEGLTLS *pTls; RTOnce(&g_tlsOnce, tlsInitOnce, NULL); pTls = (struct VBEGLTLS *)RTTlsGet(g_tls); if (RT_LIKELY(pTls)) return pTls; pTls = (struct VBEGLTLS *)RTMemAlloc(sizeof(*pTls)); if (!VALID_PTR(pTls)) return NULL; pTls->cErr = EGL_SUCCESS; pTls->enmAPI = EGL_NONE; pTls->hCurrent = EGL_NO_CONTEXT; pTls->hCurrentDisplay = EGL_NO_DISPLAY; pTls->hCurrentDraw = EGL_NO_SURFACE; pTls->hCurrentRead = EGL_NO_SURFACE; RTTlsSet(g_tls, pTls); return pTls; }
DECLEXPORT(EGLDisplay) eglGetDisplay(EGLNativeDisplayType hDisplay) { Display *pDisplay; int rc, cError, cEvent, cMajor, cMinor; rc = RTR3InitDll(RTR3INIT_FLAGS_UNOBTRUSIVE); if (RT_FAILURE(rc)) return EGL_NO_DISPLAY; if (!clearEGLError()) /* Set up our tls. */ return EGL_NO_DISPLAY; if (hDisplay != EGL_DEFAULT_DISPLAY) pDisplay = hDisplay; else { RTOnce(&g_defaultDisplayOnce, defaultDisplayInitOnce, NULL); pDisplay = g_pDefaultDisplay; } if (pDisplay && glXQueryExtension(pDisplay, &cError, &cEvent)) if (glXQueryVersion(pDisplay, &cMajor, &cMinor)) if (cMajor > 1 || (cMajor == 1 && cMinor >= 3)) return (EGLDisplay) pDisplay; return EGL_NO_DISPLAY; }
/** * @callback_method_impl{FNRTONCE, Updates globals with information from GIP.} */ static DECLCALLBACK(int32_t) rtMpWinInitOnceGip(void *pvUser) { RT_NOREF(pvUser); RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; if ( pGip && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC) { /* * Update globals. */ if (g_cRtMpWinMaxCpus != pGip->cPossibleCpus) g_cRtMpWinMaxCpus = pGip->cPossibleCpus; if (g_cRtMpWinActiveCpus != pGip->cOnlineCpus) g_cRtMpWinActiveCpus = pGip->cOnlineCpus; Assert(g_cRtMpWinMaxCpuGroups == pGip->cPossibleCpuGroups); if (g_cRtMpWinMaxCpuGroups != pGip->cPossibleCpuGroups) { g_cRtMpWinMaxCpuGroups = pGip->cPossibleCpuGroups; g_cbRtMpWinGrpRelBuf = sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) + (g_cRtMpWinMaxCpuGroups + 2) * sizeof(PROCESSOR_GROUP_INFO); } /* * Update CPU set IDs. */ for (unsigned i = g_cRtMpWinMaxCpus; i < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx); i++) g_aidRtMpWinByCpuSetIdx[i] = NIL_RTCPUID; unsigned const cbGip = pGip->cPages * PAGE_SIZE; for (uint32_t idxGroup = 0; idxGroup < g_cRtMpWinMaxCpus; idxGroup++) { uint32_t idxMember; unsigned offCpuGroup = pGip->aoffCpuGroup[idxGroup]; if (offCpuGroup < cbGip) { PSUPGIPCPUGROUP pGipCpuGrp = (PSUPGIPCPUGROUP)((uintptr_t)pGip + offCpuGroup); uint32_t cMaxMembers = pGipCpuGrp->cMaxMembers; AssertStmt(cMaxMembers < RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers), cMaxMembers = RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers)); g_aRtMpWinCpuGroups[idxGroup].cMaxCpus = cMaxMembers; g_aRtMpWinCpuGroups[idxGroup].cActiveCpus = RT_MIN(pGipCpuGrp->cMembers, cMaxMembers); for (idxMember = 0; idxMember < cMaxMembers; idxMember++) { int16_t idxSet = pGipCpuGrp->aiCpuSetIdxs[idxMember]; g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = idxSet; if ((unsigned)idxSet < RT_ELEMENTS(g_aidRtMpWinByCpuSetIdx)) # ifdef IPRT_WITH_RTCPUID_AS_GROUP_AND_NUMBER g_aidRtMpWinByCpuSetIdx[idxSet] = RTMPCPUID_FROM_GROUP_AND_NUMBER(idxGroup, idxMember); # else g_aidRtMpWinByCpuSetIdx[idxSet] = idxSet; # endif } } else idxMember = 0; for (; idxMember < RT_ELEMENTS(g_aRtMpWinCpuGroups[0].aidxCpuSetMembers); idxMember++) g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember] = -1; } } return VINF_SUCCESS; }
int main() { RTR3InitExeNoArguments(0); /* * Just a simple testcase. */ RTPrintf("tstOnce: TESTING - smoke...\n"); RTONCE Once1 = RTONCE_INITIALIZER; g_fOnceCB1 = false; int rc = RTOnce(&Once1, Once1CB, (void *)1); if (rc != VINF_SUCCESS) RTPrintf("tstOnce: ERROR - Once1, 1 failed, rc=%Rrc\n", rc); g_fOnceCB1 = false; rc = RTOnce(&Once1, Once1CB, (void *)1); if (rc != VINF_SUCCESS) RTPrintf("tstOnce: ERROR - Once1, 2 failed, rc=%Rrc\n", rc); /* * Throw a bunch of threads up against a init once thing. */ RTPrintf("tstOnce: TESTING - bunch of threads...\n"); /* create the semaphore they'll be waiting on. */ rc = RTSemEventMultiCreate(&g_hEventMulti); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: FATAL ERROR - RTSemEventMultiCreate returned %Rrc\n", rc); return 1; } /* create the threads */ RTTHREAD aThreads[32]; for (unsigned i = 0; i < RT_ELEMENTS(aThreads); i++) { char szName[16]; RTStrPrintf(szName, sizeof(szName), "ONCE2-%d\n", i); rc = RTThreadCreate(&aThreads[i], Once2Thread, NULL, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, szName); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: ERROR - failed to create thread #%d\n", i); g_cErrors++; } } /* kick them off and yield */ rc = RTSemEventMultiSignal(g_hEventMulti); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: FATAL ERROR - RTSemEventMultiSignal returned %Rrc\n", rc); return 1; } RTThreadYield(); /* wait for all of them to finish up, 30 seconds each. */ for (unsigned i = 0; i < RT_ELEMENTS(aThreads); i++) if (aThreads[i] != NIL_RTTHREAD) { int rc2; rc = RTThreadWait(aThreads[i], 30*1000, &rc2); if (RT_FAILURE(rc)) { RTPrintf("tstOnce: ERROR - RTThreadWait on thread #%u returned %Rrc\n", i, rc); g_cErrors++; } else if (RT_FAILURE(rc2)) { RTPrintf("tstOnce: ERROR - Thread #%u returned %Rrc\n", i, rc2); g_cErrors++; } } /* * Summary. */ if (!g_cErrors) RTPrintf("tstOnce: SUCCESS\n"); else RTPrintf("tstOnce: FAILURE - %d errors\n", g_cErrors); return !!g_cErrors; }
DECLINLINE(int) rtDbgModLazyInit(void) { return RTOnce(&g_rtDbgModOnce, rtDbgModInitOnce, NULL, NULL); }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); #ifdef IPRT_WITH_GIP_MP_INFO RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP(); if (pGip) { *pSet = pGip->OnlineCpuSet; return pSet; } #endif if (g_pfnGetLogicalProcessorInformationEx) { /* * Get the group relation info. * * In addition to the ASSUMPTIONS that are documented in rtMpWinInitOnce, * we ASSUME that PROCESSOR_GROUP_INFO::MaximumProcessorCount gives the * active processor mask width. */ /** @todo this is not correct for WOW64 */ DWORD cbInfo = g_cbRtMpWinGrpRelBuf; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)alloca(cbInfo); AssertFatalMsg(g_pfnGetLogicalProcessorInformationEx(RelationGroup, pInfo, &cbInfo) != FALSE, ("last error = %u, cbInfo = %u (in %u)\n", GetLastError(), cbInfo, g_cbRtMpWinGrpRelBuf)); AssertFatalMsg(pInfo->Relationship == RelationGroup, ("Relationship = %u, expected %u!\n", pInfo->Relationship, RelationGroup)); AssertFatalMsg(pInfo->Group.MaximumGroupCount == g_cRtMpWinMaxCpuGroups, ("MaximumGroupCount is %u, expected %u!\n", pInfo->Group.MaximumGroupCount, g_cRtMpWinMaxCpuGroups)); RTCpuSetEmpty(pSet); for (uint32_t idxGroup = 0; idxGroup < pInfo->Group.MaximumGroupCount; idxGroup++) { Assert(pInfo->Group.GroupInfo[idxGroup].MaximumProcessorCount == g_aRtMpWinCpuGroups[idxGroup].cMaxCpus); Assert(pInfo->Group.GroupInfo[idxGroup].ActiveProcessorCount <= g_aRtMpWinCpuGroups[idxGroup].cMaxCpus); KAFFINITY fActive = pInfo->Group.GroupInfo[idxGroup].ActiveProcessorMask; if (fActive != 0) { #ifdef RT_STRICT uint32_t cMembersLeft = pInfo->Group.GroupInfo[idxGroup].ActiveProcessorCount; #endif int const cMembers = g_aRtMpWinCpuGroups[idxGroup].cMaxCpus; for (int idxMember = 0; idxMember < cMembers; idxMember++) { if (fActive & 1) { #ifdef RT_STRICT cMembersLeft--; #endif RTCpuSetAddByIndex(pSet, g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember]); fActive >>= 1; if (!fActive) break; } else { fActive >>= 1; } } Assert(cMembersLeft == 0); }