static int pdmacFileAioMgrCloseEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) { int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent); AssertRCReturn(rc, rc); ASMAtomicWritePtr(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, pEndpoint); rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT); ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint); RTCritSectLeave(&pAioMgr->CritSectBlockingEvent); return rc; }
bool VBoxCredPoller::QueryCredentials(VBoxCredential *pCred) { AssertPtr(pCred); RTCritSectEnter(&m_csCredUpate); pCred->Update(m_pszUser, m_pszPw, m_pszDomain); RTCritSectLeave(&m_csCredUpate); bool bRet = ( (m_pszUser && strlen(m_pszUser)) || (m_pszPw && strlen(m_pszPw)) || (m_pszDomain && strlen(m_pszDomain))); credentialsReset(); return bRet; }
static int pdmacFileAioMgrShutdown(PPDMACEPFILEMGR pAioMgr) { int rc; rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent); AssertRCReturn(rc, rc); rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN); RTCritSectLeave(&pAioMgr->CritSectBlockingEvent); return rc; }
VMMR3DECL(int) PDMR3NsBwGroupSetLimit(PVM pVM, const char *pcszBwGroup, uint64_t cbTransferPerSecMax) { PUVM pUVM = pVM->pUVM; PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); if (RT_SUCCESS(rc)) { PPDMNSBWGROUP pBwGroup = pdmNsBwGroupFindById(pShaper, pcszBwGroup); if (pBwGroup) { rc = RTCritSectEnter(&pBwGroup->cs); AssertRC(rc); pdmNsBwGroupSetLimit(pBwGroup, cbTransferPerSecMax); /* Drop extra tokens */ if (pBwGroup->cbTokensLast > pBwGroup->cbBucketSize) pBwGroup->cbTokensLast = pBwGroup->cbBucketSize; rc = RTCritSectLeave(&pBwGroup->cs); AssertRC(rc); } rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc); } return rc; }
int GuestBase::registerWaitEvent(uint32_t uSessionID, uint32_t uObjectID, const GuestEventTypes &lstEvents, GuestWaitEvent **ppEvent) { AssertPtrReturn(ppEvent, VERR_INVALID_POINTER); uint32_t uContextID; int rc = generateContextID(uSessionID, uObjectID, &uContextID); if (RT_FAILURE(rc)) return rc; rc = RTCritSectEnter(&mWaitEventCritSect); if (RT_SUCCESS(rc)) { try { GuestWaitEvent *pEvent = new GuestWaitEvent(uContextID, lstEvents); AssertPtr(pEvent); LogFlowThisFunc(("New event=%p, CID=%RU32\n", pEvent, uContextID)); /* Insert event into matching event group. This is for faster per-group * lookup of all events later. */ for (GuestEventTypes::const_iterator itEvents = lstEvents.begin(); itEvents != lstEvents.end(); itEvents++) { mWaitEventGroups[(*itEvents)].insert( std::pair<uint32_t, GuestWaitEvent*>(uContextID, pEvent)); /** @todo Check for key collision. */ } /* Register event in regular event list. */ /** @todo Check for key collisions. */ mWaitEvents[uContextID] = pEvent; *ppEvent = pEvent; } catch(std::bad_alloc &) { rc = VERR_NO_MEMORY; } int rc2 = RTCritSectLeave(&mWaitEventCritSect); if (RT_SUCCESS(rc)) rc = rc2; } return rc; }
/** * Call when the reference count reaches 0. * Caller owns the critsect. * @param pThis The instance to destroy. */ static void rtLocalIpcSessionWinDestroy(PRTLOCALIPCSESSIONINT pThis) { BOOL fRc = CloseHandle(pThis->hNmPipe); AssertMsg(fRc, ("%d\n", GetLastError())); NOREF(fRc); pThis->hNmPipe = INVALID_HANDLE_VALUE; fRc = CloseHandle(pThis->hEvent); AssertMsg(fRc, ("%d\n", GetLastError())); NOREF(fRc); pThis->hEvent = NULL; RTCritSectLeave(&pThis->CritSect); RTCritSectDelete(&pThis->CritSect); RTMemFree(pThis); }
/** * Called For VM power off. * * @param pVM Pointer to the VM. */ void pdmR3ThreadDestroyAll(PVM pVM) { PUVM pUVM = pVM->pUVM; RTCritSectEnter(&pUVM->pdm.s.ListCritSect); PPDMTHREAD pThread = pUVM->pdm.s.pThreads; while (pThread) { PPDMTHREAD pNext = pThread->Internal.s.pNext; int rc2 = PDMR3ThreadDestroy(pThread, NULL); AssertRC(rc2); pThread = pNext; } Assert(!pUVM->pdm.s.pThreads && !pUVM->pdm.s.pThreadsTail); RTCritSectLeave(&pUVM->pdm.s.ListCritSect); }
STDMETHODIMP HostDnsService::COMGETTER(NameServers)(ComSafeArrayOut(BSTR, aNameServers)) { RTCritSectEnter(&m_hCritSect); com::SafeArray<BSTR> nameServers(m_llNameServers.size()); Utf8StrListIterator it; int i = 0; for (it = m_llNameServers.begin(); it != m_llNameServers.end(); ++it, ++i) (*it).cloneTo(&nameServers[i]); nameServers.detachTo(ComSafeArrayOutArg(aNameServers)); RTCritSectLeave(&m_hCritSect); return S_OK; }
/** * Sets the current client socket in a safe manner. * * @returns NIL_RTSOCKET if consumed, other wise hTcpClient. * @param hTcpClient The client socket. * @param fFromServer Set if server type connection. */ static RTSOCKET txsTcpSetClient(RTSOCKET hTcpClient, bool fFromServer) { RTCritSectEnter(&g_TcpCritSect); if ( g_hTcpClient == NIL_RTSOCKET && !g_fTcpStopConnecting && g_hThreadMain != NIL_RTTHREAD ) { g_fTcpClientFromServer = true; g_hTcpClient = hTcpClient; int rc = RTThreadUserSignal(g_hThreadMain); AssertRC(rc); hTcpClient = NIL_RTSOCKET; } RTCritSectLeave(&g_TcpCritSect); return hTcpClient; }
/** * Leave all critical sections the calling thread owns. * * This is only used when entering guru meditation in order to prevent other * EMTs and I/O threads from deadlocking. * * @param pVM Pointer to the VM. */ VMMR3DECL(void) PDMR3CritSectLeaveAll(PVM pVM) { RTNATIVETHREAD const hNativeSelf = RTThreadNativeSelf(); PUVM pUVM = pVM->pUVM; RTCritSectEnter(&pUVM->pdm.s.ListCritSect); for (PPDMCRITSECTINT pCur = pUVM->pdm.s.pCritSects; pCur; pCur = pCur->pNext) { while ( pCur->Core.NativeThreadOwner == hNativeSelf && pCur->Core.cNestings > 0) PDMCritSectLeave((PPDMCRITSECT)pCur); } RTCritSectLeave(&pUVM->pdm.s.ListCritSect); }
/** * Worker for RTMpGetCurFrequency and RTMpGetMaxFrequency. * * @returns The desired frequency on success, 0 on failure. * * @param idCpu The CPU ID. * @param pszStatName The cpu_info stat name. */ static uint64_t rtMpSolarisGetFrequency(RTCPUID idCpu, const char *pszStatName) { uint64_t u64 = 0; int rc = RTOnceEx(&g_MpSolarisOnce, rtMpSolarisOnce, rtMpSolarisCleanUp, NULL /* pvUser */); if (RT_SUCCESS(rc)) { if ( idCpu < g_capCpuInfo && g_papCpuInfo[idCpu]) { rc = RTCritSectEnter(&g_MpSolarisCritSect); AssertRC(rc); if (RT_SUCCESS(rc)) { if (kstat_read(g_pKsCtl, g_papCpuInfo[idCpu], 0) != -1) { /* Solaris really need to fix their APIs. Explicitly cast for now. */ kstat_named_t *pStat = (kstat_named_t *)kstat_data_lookup(g_papCpuInfo[idCpu], (char*)pszStatName); if (pStat) { Assert(pStat->data_type == KSTAT_DATA_UINT64 || pStat->data_type == KSTAT_DATA_LONG); switch (pStat->data_type) { case KSTAT_DATA_UINT64: u64 = pStat->value.ui64; break; /* current_clock_Hz */ case KSTAT_DATA_INT32: u64 = pStat->value.i32; break; /* clock_MHz */ /* just in case... */ case KSTAT_DATA_UINT32: u64 = pStat->value.ui32; break; case KSTAT_DATA_INT64: u64 = pStat->value.i64; break; default: AssertMsgFailed(("%d\n", pStat->data_type)); break; } } else Log(("kstat_data_lookup(%s) -> %d\n", pszStatName, errno)); } else Log(("kstat_read() -> %d\n", errno)); RTCritSectLeave(&g_MpSolarisCritSect); } } else Log(("invalid idCpu: %d (g_capCpuInfo=%d)\n", (int)idCpu, (int)g_capCpuInfo)); } return u64; }
/** * Deletes all remaining critical sections. * * This is called at the very end of the termination process. It is also called * at the end of vmR3CreateU failure cleanup, which may cause it to be called * twice depending on where vmR3CreateU actually failed. We have to do the * latter call because other components expect the critical sections to be * automatically deleted. * * @returns VBox status. * First error code, rest is lost. * @param pVMU The user mode VM handle. * @remark Don't confuse this with PDMR3CritSectDelete. */ VMMDECL(int) PDMR3CritSectTerm(PVM pVM) { PUVM pUVM = pVM->pUVM; int rc = VINF_SUCCESS; RTCritSectEnter(&pUVM->pdm.s.ListCritSect); while (pUVM->pdm.s.pCritSects) { int rc2 = pdmR3CritSectDeleteOne(pVM, pUVM, pUVM->pdm.s.pCritSects, NULL, true /* final */); AssertRC(rc2); if (RT_FAILURE(rc2) && RT_SUCCESS(rc)) rc = rc2; } RTCritSectLeave(&pUVM->pdm.s.ListCritSect); return rc; }
STDMETHODIMP HostDnsService::COMGETTER(SearchStrings)(ComSafeArrayOut(BSTR, aSearchStrings)) { RTCritSectEnter(&m_hCritSect); com::SafeArray<BSTR> searchString(m_llSearchStrings.size()); Utf8StrListIterator it; int i = 0; for (it = m_llSearchStrings.begin(); it != m_llSearchStrings.end(); ++it, ++i) (*it).cloneTo(&searchString[i]); searchString.detachTo(ComSafeArrayOutArg(aSearchStrings)); RTCritSectLeave(&m_hCritSect); return S_OK; }
int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) { LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p{%s}\n", pAioMgr, pEndpoint, pEndpoint->Core.pszUri)); /* Update the assigned I/O manager. */ ASMAtomicWritePtr(&pEndpoint->pAioMgr, pAioMgr); int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent); AssertRCReturn(rc, rc); ASMAtomicWritePtr(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint, pEndpoint); rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT); ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint); RTCritSectLeave(&pAioMgr->CritSectBlockingEvent); return rc; }
static PPDMNSBWGROUP pdmNsBwGroupFindById(PPDMNETSHAPER pShaper, const char *pcszId) { PPDMNSBWGROUP pBwGroup = NULL; if (RT_VALID_PTR(pcszId)) { int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); pBwGroup = pShaper->pBwGroupsHead; while ( pBwGroup && RTStrCmp(pBwGroup->pszName, pcszId)) pBwGroup = pBwGroup->pNext; rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc); } return pBwGroup; }
struct VBOXVR_SCR_COMPOSITOR * renderspuVBoxCompositorAcquire( WindowInfo *window) { int rc = RTCritSectEnter(&window->CompositorLock); if (RT_SUCCESS(rc)) { VBOXVR_SCR_COMPOSITOR * pCompositor = window->pCompositor; if (pCompositor) return pCompositor; /* if no compositor is set, release the lock and return */ RTCritSectLeave(&window->CompositorLock); } else { crWarning("RTCritSectEnter failed rc %d", rc); } return NULL; }
/** * Logs a verbose message. * * @param pszFormat The message text. * @param va Format arguments. */ void VGSvcLogV(const char *pszFormat, va_list va) { #ifdef DEBUG int rc = RTCritSectEnter(&g_csLog); if (RT_SUCCESS(rc)) { #endif char *psz = NULL; RTStrAPrintfV(&psz, pszFormat, va); AssertPtr(psz); LogRel(("%s", psz)); RTStrFree(psz); #ifdef DEBUG RTCritSectLeave(&g_csLog); } #endif }
int AutostartDb::removeAutostopVM(const char *pszVMId) { int rc = VINF_SUCCESS; #if defined(RT_OS_LINUX) NOREF(pszVMId); /* Not needed */ RTCritSectEnter(&this->CritSect); rc = autostartModifyDb(false /* fAutostart */, false /* fAddVM */); RTCritSectLeave(&this->CritSect); #elif defined(RT_OS_DARWIN) NOREF(pszVMId); /* Not needed */ rc = VINF_SUCCESS; #else NOREF(pszVMId); rc = VERR_NOT_SUPPORTED; #endif return rc; }
int renderspuVBoxCompositorTryAcquire(WindowInfo *window, struct VBOXVR_SCR_COMPOSITOR **ppCompositor) { int rc = RTCritSectTryEnter(&window->CompositorLock); if (RT_SUCCESS(rc)) { *ppCompositor = window->pCompositor; if (*ppCompositor) return VINF_SUCCESS; /* if no compositor is set, release the lock and return */ RTCritSectLeave(&window->CompositorLock); rc = VERR_INVALID_STATE; } else { *ppCompositor = NULL; } return rc; }
/** * I/O thread for pending TX. * * @returns VINF_SUCCESS (ignored). * @param pVM Pointer to the VM. * @param pThread The PDM thread data. */ static DECLCALLBACK(int) pdmR3NsTxThread(PVM pVM, PPDMTHREAD pThread) { PPDMNETSHAPER pShaper = (PPDMNETSHAPER)pThread->pvUser; LogFlow(("pdmR3NsTxThread: pShaper=%p\n", pShaper)); while (pThread->enmState == PDMTHREADSTATE_RUNNING) { RTThreadSleep(PDM_NETSHAPER_MAX_LATENCY); /* Go over all bandwidth groups/filters calling pfnXmitPending */ int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); PPDMNSBWGROUP pBwGroup = pShaper->pBwGroupsHead; while (pBwGroup) { pdmNsBwGroupXmitPending(pBwGroup); pBwGroup = pBwGroup->pNext; } rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc); } return VINF_SUCCESS; }
/** * Releases memory allocated with MMR3UkHeapAlloc() and MMR3UkHeapAllocZ() * * @param pVM The cross context VM structure. * @param pv Pointer to the memory block to free. * @param enmTag The allocation accounting tag. */ VMMR3DECL(void) MMR3UkHeapFree(PVM pVM, void *pv, MMTAG enmTag) { /* Ignore NULL pointers. */ if (!pv) return; PMMUKHEAP pHeap = pVM->pUVM->mm.s.pUkHeap; RTCritSectEnter(&pHeap->Lock); /* * Find the sub-heap and block */ #ifdef MMUKHEAP_WITH_STATISTICS size_t cbActual = 0; #endif PMMUKHEAPSUB pSubHeap = pHeap->pSubHeapHead; while (pSubHeap) { if ((uintptr_t)pv - (uintptr_t)pSubHeap->pv < pSubHeap->cb) { #ifdef MMUKHEAP_WITH_STATISTICS cbActual = RTHeapSimpleSize(pSubHeap->hSimple, pv); PMMUKHEAPSTAT pStat = (PMMUKHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag); if (pStat) { pStat->cFrees++; pStat->cbCurAllocated -= cbActual; pStat->cbFreed += cbActual; } pHeap->Stat.cFrees++; pHeap->Stat.cbFreed += cbActual; pHeap->Stat.cbCurAllocated -= cbActual; #else RT_NOREF_PV(enmTag); #endif RTHeapSimpleFree(pSubHeap->hSimple, pv); RTCritSectLeave(&pHeap->Lock); return; } } AssertMsgFailed(("pv=%p\n", pv)); }
void renderspuVBoxCompositorSet( WindowInfo *window, struct VBOXVR_SCR_COMPOSITOR * pCompositor) { int rc; /* renderspuVBoxCompositorSet can be invoked from the chromium thread only and is not reentrant, * no need to synch here * the lock is actually needed to ensure we're in synch with the redraw thread */ if (window->pCompositor == pCompositor) return; rc = RTCritSectEnter(&window->CompositorLock); if (RT_SUCCESS(rc)) { window->pCompositor = pCompositor; RTCritSectLeave(&window->CompositorLock); return; } else { crWarning("RTCritSectEnter failed rc %d", rc); } }
void VBoxCredPoller::credentialsReset(void) { RTCritSectEnter(&m_csCredUpate); if (m_pszUser) SecureZeroMemory(m_pszUser, strlen(m_pszUser) * sizeof(char)); if (m_pszPw) SecureZeroMemory(m_pszPw, strlen(m_pszPw) * sizeof(char)); if (m_pszDomain) SecureZeroMemory(m_pszDomain, strlen(m_pszDomain) * sizeof(char)); RTStrFree(m_pszUser); m_pszUser = NULL; RTStrFree(m_pszPw); m_pszPw = NULL; RTStrFree(m_pszDomain); m_pszDomain = NULL; RTCritSectLeave(&m_csCredUpate); }
static void pdmNsBwGroupUnlink(PPDMNSBWGROUP pBwGroup) { PPDMNETSHAPER pShaper = pBwGroup->pShaper; int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); if (pBwGroup == pShaper->pBwGroupsHead) pShaper->pBwGroupsHead = pBwGroup->pNext; else { PPDMNSBWGROUP pPrev = pShaper->pBwGroupsHead; while ( pPrev && pPrev->pNext != pBwGroup) pPrev = pPrev->pNext; AssertPtr(pPrev); pPrev->pNext = pBwGroup->pNext; } rc = RTCritSectLeave(&pShaper->cs); AssertRC(rc); }
/** * Resumes all threads not running. * * This is called by PDMR3Resume() and PDMR3PowerOn() after all the devices * and drivers have been notified about the resume / power on . * * @return VBox status code. * @param pVM Pointer to the VM. */ int pdmR3ThreadResumeAll(PVM pVM) { PUVM pUVM = pVM->pUVM; RTCritSectEnter(&pUVM->pdm.s.ListCritSect); for (PPDMTHREAD pThread = pUVM->pdm.s.pThreads; pThread; pThread = pThread->Internal.s.pNext) switch (pThread->enmState) { case PDMTHREADSTATE_SUSPENDED: { int rc = PDMR3ThreadResume(pThread); AssertRCReturn(rc, rc); break; } default: AssertMsgFailed(("pThread=%p enmState=%d\n", pThread, pThread->enmState)); break; } RTCritSectLeave(&pUVM->pdm.s.ListCritSect); return VINF_SUCCESS; }
RTDECL(int) RTPipeQueryReadable(RTPIPE hPipe, size_t *pcbReadable) { RTPIPEINTERNAL *pThis = hPipe; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTPIPE_MAGIC, VERR_INVALID_HANDLE); AssertReturn(pThis->fRead, VERR_PIPE_NOT_READ); AssertPtrReturn(pcbReadable, VERR_INVALID_POINTER); int rc = RTCritSectEnter(&pThis->CritSect); if (RT_FAILURE(rc)) return rc; DWORD cbAvailable = 0; if (PeekNamedPipe(pThis->hPipe, NULL, 0, NULL, &cbAvailable, NULL)) *pcbReadable = cbAvailable; else rc = RTErrConvertFromWin32(GetLastError()); RTCritSectLeave(&pThis->CritSect); return rc; }
VMMR3DECL(int) PDMR3NsDetach(PVM pVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter) { VM_ASSERT_EMT(pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); AssertPtrReturn(pFilter->pBwGroupR3, VERR_INVALID_POINTER); PUVM pUVM = pVM->pUVM; PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); if (RT_SUCCESS(rc)) { pdmNsFilterUnlink(pFilter); PPDMNSBWGROUP pBwGroup = NULL; pBwGroup = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, NULL, PPDMNSBWGROUP); if (pBwGroup) pdmNsBwGroupUnref(pBwGroup); int rc2 = RTCritSectLeave(&pShaper->cs); AssertRC(rc2); } return rc; }
int GuestBase::registerEvent(uint32_t uSessionID, uint32_t uObjectID, const std::list<VBoxEventType_T> &lstEvents, GuestWaitEvent **ppEvent) { AssertPtrReturn(ppEvent, VERR_INVALID_POINTER); uint32_t uContextID; int rc = generateContextID(uSessionID, uObjectID, &uContextID); if (RT_FAILURE(rc)) return rc; rc = RTCritSectEnter(&mWaitEventCritSect); if (RT_SUCCESS(rc)) { try { GuestWaitEvent *pEvent = new GuestWaitEvent(uContextID, lstEvents); AssertPtr(pEvent); for (std::list<VBoxEventType_T>::const_iterator itEvents = lstEvents.begin(); itEvents != lstEvents.end(); itEvents++) { mWaitEvents[(*itEvents)].push_back(pEvent); } *ppEvent = pEvent; } catch(std::bad_alloc &) { rc = VERR_NO_MEMORY; } int rc2 = RTCritSectLeave(&mWaitEventCritSect); if (RT_SUCCESS(rc)) rc = rc2; } return rc; }
RTDECL(RTCPUID) RTMpGetOnlineCoreCount(void) { RTCPUID uOnlineCores = 0; int rc = RTOnceEx(&g_MpSolarisOnce, rtMpSolarisOnce, rtMpSolarisCleanUp, NULL /* pvUser */); if (RT_SUCCESS(rc)) { rc = RTCritSectEnter(&g_MpSolarisCritSect); AssertRC(rc); /* * For each core in the system, count how many are currently online. */ for (RTCPUID j = 0; j < g_cCores; j++) { uint64_t u64CoreId = g_pu64CoreIds[j]; for (RTCPUID idCpu = 0; idCpu < g_capCpuInfo; idCpu++) { rc = kstat_read(g_pKsCtl, g_papCpuInfo[idCpu], 0); AssertReturn(rc != -1, 0 /* rc */); uint64_t u64ThreadCoreId = rtMpSolarisGetCoreId(idCpu); if (u64ThreadCoreId == u64CoreId) { kstat_named_t *pStat = (kstat_named_t *)kstat_data_lookup(g_papCpuInfo[idCpu], (char *)"state"); Assert(pStat->data_type == KSTAT_DATA_CHAR); if( !RTStrNCmp(pStat->value.c, PS_ONLINE, sizeof(PS_ONLINE) - 1) || !RTStrNCmp(pStat->value.c, PS_NOINTR, sizeof(PS_NOINTR) - 1)) { uOnlineCores++; break; /* Move to the next core. We have at least 1 hyperthread online in the current core. */ } } } } RTCritSectLeave(&g_MpSolarisCritSect); } return uOnlineCores; }
/** * Internal deregistration helper. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pszName The identifier of the info. * @param enmType The info owner type. */ static int dbgfR3InfoDeregister(PVM pVM, const char *pszName, DBGFINFOTYPE enmType) { /* * Validate input. */ if (!pszName) { AssertMsgFailed(("!pszName\n")); return VERR_INVALID_PARAMETER; } /* * Find the info handler. */ size_t cchName = strlen(pszName); int rc = RTCritSectEnter(&pVM->dbgf.s.InfoCritSect); AssertRC(rc); rc = VERR_FILE_NOT_FOUND; PDBGFINFO pPrev = NULL; PDBGFINFO pInfo = pVM->dbgf.s.pInfoFirst; for (; pInfo; pPrev = pInfo, pInfo = pInfo->pNext) if ( pInfo->cchName == cchName && !strcmp(pInfo->szName, pszName) && pInfo->enmType == enmType) { if (pPrev) pPrev->pNext = pInfo->pNext; else pVM->dbgf.s.pInfoFirst = pInfo->pNext; MMR3HeapFree(pInfo); rc = VINF_SUCCESS; break; } int rc2 = RTCritSectLeave(&pVM->dbgf.s.InfoCritSect); AssertRC(rc2); AssertRC(rc); LogFlow(("dbgfR3InfoDeregister: returns %Rrc\n", rc)); return rc; }