RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { RTCPUID iCpu = RTMpGetCount(); RTCpuSetEmpty(pSet); while (iCpu-- > 0) RTCpuSetAddByIndex(pSet, iCpu); return pSet; }
RTR3DECL(int) RTThreadGetAffinity(PRTCPUSET pCpuSet) { cpu_set_t LnxCpuSet; int rc = pthread_getaffinity_np(pthread_self(), sizeof(LnxCpuSet), &LnxCpuSet); if (rc != 0) return RTErrConvertFromErrno(errno); /* convert */ RTCpuSetEmpty(pCpuSet); for (unsigned iCpu = 0; iCpu < RT_MIN(CPU_SETSIZE, RTCPUSET_MAX_CPUS); iCpu++) if (CPU_ISSET(iCpu, &LnxCpuSet)) RTCpuSetAddByIndex(pCpuSet, iCpu); return VINF_SUCCESS; }
RTR3DECL(int) RTThreadSetAffinityToCpu(RTCPUID idCpu) { int rc; if (idCpu == NIL_RTCPUID) rc = RTThreadSetAffinity(NULL); else { int iCpu = RTMpCpuIdToSetIndex(idCpu); if (iCpu >= 0) { RTCPUSET CpuSet; RTCpuSetEmpty(&CpuSet); RTCpuSetAddByIndex(&CpuSet, iCpu); rc = RTThreadSetAffinity(&CpuSet); } else rc = VERR_CPU_NOT_FOUND; } return rc; }
RTR3DECL(int) RTThreadGetAffinity(PRTCPUSET pCpuSet) { RTCpuSetEmpty(pCpuSet); RTCpuSetAddByIndex(pCpuSet, 0); return VINF_SUCCESS; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTOnce(&g_MpInitOnce, rtMpWinInitOnce, NULL); #ifdef IPRT_WITH_GIP_MP_INFO RTMPWIN_UPDATE_GIP_GLOBALS_AND_GET_PGIP(); if (pGip) { *pSet = pGip->OnlineCpuSet; return pSet; } #endif if (g_pfnGetLogicalProcessorInformationEx) { /* * Get the group relation info. * * In addition to the ASSUMPTIONS that are documented in rtMpWinInitOnce, * we ASSUME that PROCESSOR_GROUP_INFO::MaximumProcessorCount gives the * active processor mask width. */ /** @todo this is not correct for WOW64 */ DWORD cbInfo = g_cbRtMpWinGrpRelBuf; SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pInfo = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)alloca(cbInfo); AssertFatalMsg(g_pfnGetLogicalProcessorInformationEx(RelationGroup, pInfo, &cbInfo) != FALSE, ("last error = %u, cbInfo = %u (in %u)\n", GetLastError(), cbInfo, g_cbRtMpWinGrpRelBuf)); AssertFatalMsg(pInfo->Relationship == RelationGroup, ("Relationship = %u, expected %u!\n", pInfo->Relationship, RelationGroup)); AssertFatalMsg(pInfo->Group.MaximumGroupCount == g_cRtMpWinMaxCpuGroups, ("MaximumGroupCount is %u, expected %u!\n", pInfo->Group.MaximumGroupCount, g_cRtMpWinMaxCpuGroups)); RTCpuSetEmpty(pSet); for (uint32_t idxGroup = 0; idxGroup < pInfo->Group.MaximumGroupCount; idxGroup++) { Assert(pInfo->Group.GroupInfo[idxGroup].MaximumProcessorCount == g_aRtMpWinCpuGroups[idxGroup].cMaxCpus); Assert(pInfo->Group.GroupInfo[idxGroup].ActiveProcessorCount <= g_aRtMpWinCpuGroups[idxGroup].cMaxCpus); KAFFINITY fActive = pInfo->Group.GroupInfo[idxGroup].ActiveProcessorMask; if (fActive != 0) { #ifdef RT_STRICT uint32_t cMembersLeft = pInfo->Group.GroupInfo[idxGroup].ActiveProcessorCount; #endif int const cMembers = g_aRtMpWinCpuGroups[idxGroup].cMaxCpus; for (int idxMember = 0; idxMember < cMembers; idxMember++) { if (fActive & 1) { #ifdef RT_STRICT cMembersLeft--; #endif RTCpuSetAddByIndex(pSet, g_aRtMpWinCpuGroups[idxGroup].aidxCpuSetMembers[idxMember]); fActive >>= 1; if (!fActive) break; } else { fActive >>= 1; } } Assert(cMembersLeft == 0); }