/** * @interface_method_impl{PDMINETWORKUP,pfnAllocBuf} */ PDMBOTHCBDECL(int) drvNetShaperUp_AllocBuf(PPDMINETWORKUP pInterface, size_t cbMin, PCPDMNETWORKGSO pGso, PPPDMSCATTERGATHER ppSgBuf) { PDRVNETSHAPER pThis = RT_FROM_MEMBER(pInterface, DRVNETSHAPER, CTX_SUFF(INetworkUp)); if (RT_UNLIKELY(!pThis->CTX_SUFF(pIBelowNet))) return VERR_NET_DOWN; //LogFlow(("drvNetShaperUp_AllocBuf: cb=%d\n", cbMin)); STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesRequested, cbMin); STAM_REL_COUNTER_INC(&pThis->StatXmitPktsRequested); #if defined(IN_RING3) || defined(IN_RING0) if (!PDMNsAllocateBandwidth(&pThis->Filter, cbMin)) { STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesDenied, cbMin); STAM_REL_COUNTER_INC(&pThis->StatXmitPktsDenied); return VERR_TRY_AGAIN; } #endif STAM_REL_COUNTER_ADD(&pThis->StatXmitBytesGranted, cbMin); STAM_REL_COUNTER_INC(&pThis->StatXmitPktsGranted); //LogFlow(("drvNetShaperUp_AllocBuf: got cb=%d\n", cbMin)); return pThis->CTX_SUFF(pIBelowNet)->pfnAllocBuf(pThis->CTX_SUFF(pIBelowNet), cbMin, pGso, ppSgBuf); }
RTDECL(int) RTPoll(RTPOLLSET hPollSet, RTMSINTERVAL cMillies, uint32_t *pfEvents, uint32_t *pid) { RTPOLLSETINTERNAL *pThis = hPollSet; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTPOLLSET_MAGIC, VERR_INVALID_HANDLE); AssertPtrNull(pfEvents); AssertPtrNull(pid); /* * Set the busy flag and do the job. */ AssertReturn(ASMAtomicCmpXchgBool(&pThis->fBusy, true, false), VERR_CONCURRENT_ACCESS); int rc; if (cMillies == RT_INDEFINITE_WAIT || cMillies == 0) { do rc = rtPollNoResumeWorker(pThis, cMillies, pfEvents, pid); while (rc == VERR_INTERRUPTED); } else { uint64_t MsStart = RTTimeMilliTS(); rc = rtPollNoResumeWorker(pThis, cMillies, pfEvents, pid); while (RT_UNLIKELY(rc == VERR_INTERRUPTED)) { if (RTTimeMilliTS() - MsStart >= cMillies) { rc = VERR_TIMEOUT; break; } rc = rtPollNoResumeWorker(pThis, cMillies, pfEvents, pid); } } ASMAtomicWriteBool(&pThis->fBusy, false); return rc; }
/** * @interface_method_impl{PDMIBASE,pfnRead} */ static DECLCALLBACK(int) drvHostParallelRead(PPDMIHOSTPARALLELCONNECTOR pInterface, void *pvBuf, size_t cbRead, PDMPARALLELPORTMODE enmMode) { PDRVHOSTPARALLEL pThis = RT_FROM_MEMBER(pInterface, DRVHOSTPARALLEL, CTX_SUFF(IHostParallelConnector)); int rc = VINF_SUCCESS; # ifndef VBOX_WITH_WIN_PARPORT_SUP int rcLnx = 0; LogFlowFunc(("pvBuf=%#p cbRead=%d\n", pvBuf, cbRead)); rc = drvHostParallelSetMode(pThis, enmMode); if (RT_FAILURE(rc)) return rc; if (enmMode == PDM_PARALLEL_PORT_MODE_SPP) { /* Set the data lines directly. */ rcLnx = ioctl(RTFileToNative(pThis->hFileDevice), PPWDATA, pvBuf); } else { /* Use write interface. */ rcLnx = read(RTFileToNative(pThis->hFileDevice), pvBuf, cbRead); } if (RT_UNLIKELY(rcLnx < 0)) rc = RTErrConvertFromErrno(errno); # else /* VBOX_WITH_WIN_PARPORT_SUP */ /** @todo r=klaus this code assumes cbRead==1, which may not be guaranteed forever */ *((uint8_t*)(pvBuf)) = 0; /* Initialize the buffer. */ LogFlowFunc(("calling R0 to read from parallel port\n")); if (pThis->fParportAvail) { int rc = PDMDrvHlpCallR0(pThis->CTX_SUFF(pDrvIns), DRVHOSTPARALLELR0OP_READ, 0); AssertRC(rc); *(uint8_t *)pvBuf = (uint8_t)pThis->u8ReadIn; } # endif /* VBOX_WITH_WIN_PARPORT_SUP */ return rc; }
void crUnpackDrawPixels(PCrUnpackerState pState) { CHECK_BUFFER_SIZE_STATIC_LAST(pState, sizeof(int) + 20, GLint); GLsizei width = READ_DATA(pState, sizeof( int ) + 0, GLsizei ); GLsizei height = READ_DATA(pState, sizeof( int ) + 4, GLsizei ); GLenum format = READ_DATA(pState, sizeof( int ) + 8, GLenum ); GLenum type = READ_DATA(pState, sizeof( int ) + 12, GLenum ); GLint noimagedata = READ_DATA(pState, sizeof( int ) + 16, GLint ); GLvoid *pixels; if (noimagedata && !crStateIsBufferBound(pState->pStateTracker, GL_PIXEL_UNPACK_BUFFER_ARB)) return; if (noimagedata) pixels = (void*) (uintptr_t) READ_DATA(pState, sizeof( int ) + 20, GLint); else { size_t cbImg = crImageSize( format, type, width, height ); if (RT_UNLIKELY(cbImg == 0)) { pState->rcUnpack = VERR_INVALID_PARAMETER; return; } pixels = DATA_POINTER(pState, sizeof( int ) + 24, GLvoid ); CHECK_ARRAY_SIZE_FROM_PTR_UPDATE_LAST(pState, pixels, cbImg, GLubyte); } pState->pDispatchTbl->PixelStorei( GL_UNPACK_ROW_LENGTH, 0 ); pState->pDispatchTbl->PixelStorei( GL_UNPACK_SKIP_PIXELS, 0 ); pState->pDispatchTbl->PixelStorei( GL_UNPACK_SKIP_ROWS, 0 ); pState->pDispatchTbl->PixelStorei( GL_UNPACK_ALIGNMENT, 1 ); pState->pDispatchTbl->DrawPixels( width, height, format, type, pixels ); INCR_VAR_PTR(pState); }
/** * Updates Hyper-V's reference TSC page. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param u64Offset The computed TSC offset. * @thread EMT. */ VMM_INT_DECL(int) gimR0HvUpdateParavirtTsc(PVM pVM, uint64_t u64Offset) { Assert(GIMIsEnabled(pVM)); bool fHvTscEnabled = MSR_GIM_HV_REF_TSC_IS_ENABLED(pVM->gim.s.u.Hv.u64TscPageMsr); if (RT_UNLIKELY(!fHvTscEnabled)) return VERR_GIM_PVTSC_NOT_ENABLED; PCGIMHV pcHv = &pVM->gim.s.u.Hv; PCGIMMMIO2REGION pcRegion = &pcHv->aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pcRegion->CTX_SUFF(pvPage); Assert(pRefTsc); /* * Hyper-V reports the reference time in 100 nanosecond units. */ uint64_t u64Tsc100Ns = TMCpuTicksPerSecond(pVM) / RT_NS_10MS; int64_t i64TscOffset = (int64_t)u64Offset / u64Tsc100Ns; /* * The TSC page can be simulatenously read by other VCPUs in the guest. The * spinlock is only for protecting simultaneous hypervisor writes from other * EMTs. */ RTSpinlockAcquire(pcHv->hSpinlockR0); if (pRefTsc->i64TscOffset != i64TscOffset) { if (pRefTsc->u32TscSequence < UINT32_C(0xfffffffe)) ASMAtomicIncU32(&pRefTsc->u32TscSequence); else ASMAtomicWriteU32(&pRefTsc->u32TscSequence, 1); ASMAtomicWriteS64(&pRefTsc->i64TscOffset, i64TscOffset); } RTSpinlockRelease(pcHv->hSpinlockR0); Assert(pRefTsc->u32TscSequence != 0); Assert(pRefTsc->u32TscSequence != UINT32_C(0xffffffff)); return VINF_SUCCESS; }
/** * Virtio Pci put queue routine. Places the queue and frees associated queue. * * @param pDevice Pointer to the Virtio device instance. * @param pQueue Pointer to the queue. */ static void VirtioPciPutQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue) { LogFlowFunc((VIRTIOLOGNAME ":VirtioPciPutQueue pDevice=%p pQueue=%p\n", pDevice, pQueue)); AssertReturnVoid(pDevice); AssertReturnVoid(pQueue); virtio_pci_t *pPci = pDevice->pvHyper; AssertReturnVoid(pPci); virtio_pci_queue_t *pPciQueue = pQueue->pvData; if (RT_UNLIKELY(!pPciQueue)) { LogRel((VIRTIOLOGNAME ":VirtioPciPutQueue missing Pci queue.\n")); return; } ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex); ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), 0); ddi_dma_unbind_handle(pPciQueue->hDMA); ddi_dma_mem_free(&pPciQueue->hIO); ddi_dma_free_handle(&pPciQueue->hDMA); RTMemFree(pPciQueue); }
RTDECL(uint32_t) RTMpGetArraySize(void) { /* * Cache the result here. This whole point of this function is that it * will always return the same value, so that should be safe. * * Note! Because RTCPUSET may be to small to represent all the CPUs, we * check with RTMpGetCount() as well. */ static uint32_t s_cMaxCpus = 0; uint32_t cCpus = s_cMaxCpus; if (RT_UNLIKELY(cCpus == 0)) { RTCPUSET CpuSet; uint32_t cCpus1 = RTCpuLastIndex(RTMpGetSet(&CpuSet)) + 1; uint32_t cCpus2 = RTMpGetCount(); cCpus = RT_MAX(cCpus1, cCpus2); ASMAtomicCmpXchgU32(&s_cMaxCpus, cCpus, 0); return cCpus; } return s_cMaxCpus; }
int vbglR3GRAlloc(VMMDevRequestHeader **ppReq, uint32_t cb, VMMDevRequestType enmReqType) { VMMDevRequestHeader *pReq; AssertPtrReturn(ppReq, VERR_INVALID_PARAMETER); AssertMsgReturn(cb >= sizeof(VMMDevRequestHeader), ("%#x vs %#zx\n", cb, sizeof(VMMDevRequestHeader)), VERR_INVALID_PARAMETER); pReq = (VMMDevRequestHeader *)RTMemTmpAlloc(cb); if (RT_UNLIKELY(!pReq)) return VERR_NO_MEMORY; pReq->size = cb; pReq->version = VMMDEV_REQUEST_HEADER_VERSION; pReq->requestType = enmReqType; pReq->rc = VERR_GENERAL_FAILURE; pReq->reserved1 = 0; pReq->reserved2 = 0; *ppReq = pReq; return VINF_SUCCESS; }
/** * @interface_method_impl{PDMIBASE,pfnWriteControl} */ static DECLCALLBACK(int) drvHostParallelWriteControl(PPDMIHOSTPARALLELCONNECTOR pInterface, uint8_t fReg) { PDRVHOSTPARALLEL pThis = RT_FROM_MEMBER(pInterface, DRVHOSTPARALLEL, CTX_SUFF(IHostParallelConnector)); int rc = VINF_SUCCESS; int rcLnx = 0; LogFlowFunc(("fReg=%#x\n", fReg)); # ifndef VBOX_WITH_WIN_PARPORT_SUP rcLnx = ioctl(RTFileToNative(pThis->hFileDevice), PPWCONTROL, &fReg); if (RT_UNLIKELY(rcLnx < 0)) rc = RTErrConvertFromErrno(errno); # else /* VBOX_WITH_WIN_PARPORT_SUP */ uint64_t u64Data; u64Data = (uint8_t)fReg; if (pThis->fParportAvail) { LogFlowFunc(("calling R0 to write CTRL, data=%#x\n", u64Data)); rc = PDMDrvHlpCallR0(pThis->CTX_SUFF(pDrvIns), DRVHOSTPARALLELR0OP_WRITECONTROL, u64Data); AssertRC(rc); } # endif /* VBOX_WITH_WIN_PARPORT_SUP */ return rc; }
/** * Gets an entry. * * @returns IPRT status code. * @param pThis The manifest to work with. * @param pszEntry The entry name. * @param fNeedNormalization Whether rtManifestValidateNameEntry said it * needed normalization. * @param cchEntry The length of the name. * @param ppEntry Where to return the entry pointer on success. */ static int rtManifestGetEntry(RTMANIFESTINT *pThis, const char *pszEntry, bool fNeedNormalization, size_t cchEntry, PRTMANIFESTENTRY *ppEntry) { PRTMANIFESTENTRY pEntry; AssertCompileMemberOffset(RTMANIFESTATTR, StrCore, 0); if (!fNeedNormalization) pEntry = (PRTMANIFESTENTRY)RTStrSpaceGet(&pThis->Entries, pszEntry); else { char *pszCopy = (char *)RTMemTmpAlloc(cchEntry + 1); if (RT_UNLIKELY(!pszCopy)) return VERR_NO_TMP_MEMORY; memcpy(pszCopy, pszEntry, cchEntry + 1); rtManifestNormalizeEntry(pszCopy); pEntry = (PRTMANIFESTENTRY)RTStrSpaceGet(&pThis->Entries, pszCopy); RTMemTmpFree(pszCopy); } *ppEntry = pEntry; return pEntry ? VINF_SUCCESS : VERR_NOT_FOUND; }
/** * Reads one object ID component, returning it's value and encoded length. * * @returns The encoded length (positive) on success, negative IPRT status code * on failure. * @param pbContent The start of the component to parse. * @param cbContent The number of content bytes left. * @param puValue Where to return the value. */ static int rtAsn1ObjId_ReadComponent(uint8_t const *pbContent, uint32_t cbContent, uint32_t *puValue) { if (cbContent >= 1) { /* The first byte. */ uint8_t b = *pbContent; if (!(b & 0x80)) { *puValue = b; return 1; } /* Encoded as more than one byte. Make sure that it's efficently encoded as 8.19.2 indicates it must. */ if (b != 0x80) { uint32_t off = 1; uint32_t uValue = b & 0x7f; while (off < cbContent) { b = pbContent[off++]; uValue <<= 7; uValue |= b & 0x7f; if (!(b & 0x80)) { *puValue = uValue; return (int)off; } if (RT_UNLIKELY(uValue & UINT32_C(0x0e000000))) return VERR_ASN1_OBJID_COMPONENT_TOO_BIG; } } return VERR_ASN1_INVALID_OBJID_ENCODING; } return VERR_NO_DATA; }
/** * Attach entry point, to attach a device to the system or resume it. * * @param pDip The module structure instance. * @param enmCmd Attach type (ddi_attach_cmd_t) * * @returns corresponding solaris error code. */ static int VBoxUSBMonSolarisAttach(dev_info_t *pDip, ddi_attach_cmd_t enmCmd) { LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisAttach pDip=%p enmCmd=%d\n", pDip, enmCmd)); switch (enmCmd) { case DDI_ATTACH: { if (RT_UNLIKELY(g_pDip)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisAttach global instance already initialized.\n")); return DDI_FAILURE; } g_pDip = pDip; int instance = ddi_get_instance(pDip); int rc = ddi_create_priv_minor_node(pDip, DEVICE_NAME, S_IFCHR, instance, DDI_PSEUDO, 0, "none", "none", 0660); if (rc == DDI_SUCCESS) { ddi_report_dev(pDip); return rc; } else LogRel((DEVICE_NAME ":VBoxUSBMonSolarisAttach ddi_create_minor_node failed! rc=%d\n", rc)); return DDI_FAILURE; } case DDI_RESUME: { /* We don't have to bother about power management. */ return DDI_SUCCESS; } default: return DDI_FAILURE; } }
/** * Physical access handler for MMIO ranges. * * @returns VBox status code (appropriate for GC return). * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure of the calling EMT. * @param uErrorCode CPU Error code. * @param pCtxCore Trap register frame. * @param GCPhysFault The GC physical address. */ VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault) { /* * We don't have a range here, so look it up before calling the common function. */ int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2); #ifndef IN_RING3 if (rc2 == VERR_SEM_BUSY) return VINF_IOM_R3_MMIO_READ_WRITE; #endif PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault); if (RT_UNLIKELY(!pRange)) { IOM_UNLOCK_SHARED(pVM); return VERR_IOM_MMIO_RANGE_NOT_FOUND; } iomMmioRetainRange(pRange); IOM_UNLOCK_SHARED(pVM); VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange); iomMmioReleaseRange(pVM, pRange); return VBOXSTRICTRC_VAL(rcStrict); }
/** * The client driver IOCtl Wrapper function. * * @returns VBox status code. * @param pDevSol The Solaris device instance. * @param Function The Function. * @param pvData Opaque pointer to the data. * @param cbData Size of the data pointed to by pvData. */ static int usbProxySolarisIOCtl(PUSBPROXYDEVSOL pDevSol, unsigned Function, void *pvData, size_t cbData) { if (RT_UNLIKELY(pDevSol->hFile == NIL_RTFILE)) { LogFlow((USBPROXY ":usbProxySolarisIOCtl connection to driver gone!\n")); return VERR_VUSB_DEVICE_NOT_ATTACHED; } VBOXUSBREQ Req; Req.u32Magic = VBOXUSB_MAGIC; Req.rc = -1; Req.cbData = cbData; Req.pvDataR3 = pvData; int Ret = -1; int rc = RTFileIoCtl(pDevSol->hFile, Function, &Req, sizeof(Req), &Ret); if (RT_SUCCESS(rc)) { if (RT_FAILURE(Req.rc)) { if (Req.rc == VERR_VUSB_DEVICE_NOT_ATTACHED) { pDevSol->pProxyDev->fDetached = true; usbProxySolarisCloseFile(pDevSol); LogRel((USBPROXY ":Command %#x failed, USB Device '%s' disconnected!\n", Function, pDevSol->pProxyDev->pUsbIns->pszName)); } else LogRel((USBPROXY ":Command %#x failed. Req.rc=%Rrc\n", Function, Req.rc)); } return Req.rc; } LogRel((USBPROXY ":Function %#x failed. rc=%Rrc\n", Function, rc)); return rc; }
/** * Write a character to the stream. * * @returns IPRT status code * @param pStream The stream. Must be in write mode. * @param pchBuf What to write. * @param cchBuf How much to write. */ int ScmStreamPutCh(PSCMSTREAM pStream, char ch) { AssertReturn(pStream->fWriteOrRead, VERR_ACCESS_DENIED); if (RT_FAILURE(pStream->rc)) return pStream->rc; /* * Only deal with the simple cases here, use ScmStreamWrite for the * annoying stuff. */ size_t off = pStream->off; if ( ch == '\n' || RT_UNLIKELY(off + 1 > pStream->cbAllocated)) return ScmStreamWrite(pStream, &ch, 1); /* * Just append it. */ pStream->pch[off] = ch; pStream->off = off + 1; pStream->paLines[pStream->iLine].cch++; return VINF_SUCCESS; }
static DECLCALLBACK(int) drvHostParallelSetPortDirection(PPDMIHOSTPARALLELCONNECTOR pInterface, bool fForward) { PDRVHOSTPARALLEL pThis = RT_FROM_MEMBER(pInterface, DRVHOSTPARALLEL, CTX_SUFF(IHostParallelConnector)); int rc = VINF_SUCCESS; int iMode = 0; if (!fForward) iMode = 1; # ifndef VBOX_WITH_WIN_PARPORT_SUP int rcLnx = ioctl(RTFileToNative(pThis->hFileDevice), PPDATADIR, &iMode); if (RT_UNLIKELY(rcLnx < 0)) rc = RTErrConvertFromErrno(errno); # else /* VBOX_WITH_WIN_PARPORT_SUP */ uint64_t u64Data; u64Data = (uint8_t)iMode; if (pThis->fParportAvail) { LogFlowFunc(("calling R0 to write CTRL, data=%#x\n", u64Data)); rc = PDMDrvHlpCallR0(pThis->CTX_SUFF(pDrvIns), DRVHOSTPARALLELR0OP_SETPORTDIRECTION, u64Data); AssertRC(rc); } # endif /* VBOX_WITH_WIN_PARPORT_SUP */ return rc; }
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check if nested request. */ pthread_t Self = pthread_self(); if ( pThis->Owner == Self && pThis->cNestings > 0) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cNestings); return VINF_SUCCESS; } #ifdef RTSEMMUTEX_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies) { int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif /* * Convert timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } /* * Lock the mutex. * Optimize for the uncontended case (makes 1-2 ns difference). */ if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))) { for (;;) { int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2); /* * Was the lock released in the meantime? This is unlikely (but possible) */ if (RT_UNLIKELY(iOld == 0)) break; /* * Go to sleep. */ if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec )) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC)) return VERR_SEM_DESTROYED; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { Assert(pTimeout); return VERR_TIMEOUT; } if (rc == 0) /* we'll leave the loop now unless another thread is faster */; else if (rc == -EWOULDBLOCK) /* retry with new value. */; else if (rc == -EINTR) { if (!fAutoResume) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } /* * When leaving this loop, iState is set to 2. This means that we gained the * lock and there are _possibly_ some waiters. We don't know exactly as another * thread might entered this loop at nearly the same time. Therefore we will * call futex_wakeup once too often (if _no_ other thread entered this loop). * The key problem is the simple futex_wait test for x != y (iState != 2) in * our case). */ } /* * Set the owner and nesting. */ pThis->Owner = Self; ASMAtomicWriteU32(&pThis->cNestings, 1); #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
int main(int argc, char **argv) { int rcRet = 0; int i; int rc; int cIterations = argc > 1 ? RTStrToUInt32(argv[1]) : 32; if (cIterations == 0) cIterations = 64; /* * Init. */ RTR3InitExe(argc, &argv, 0); PSUPDRVSESSION pSession; rc = SUPR3Init(&pSession); rcRet += rc != 0; RTPrintf("tstInt: SUPR3Init -> rc=%Rrc\n", rc); char szFile[RTPATH_MAX]; if (!rc) { rc = RTPathExecDir(szFile, sizeof(szFile) - sizeof("/VMMR0.r0")); } char szAbsFile[RTPATH_MAX]; if (RT_SUCCESS(rc)) { strcat(szFile, "/VMMR0.r0"); rc = RTPathAbs(szFile, szAbsFile, sizeof(szAbsFile)); } if (RT_SUCCESS(rc)) { /* * Load VMM code. */ rc = SUPR3LoadVMM(szAbsFile); if (RT_SUCCESS(rc)) { /* * Create a fake 'VM'. */ PVMR0 pVMR0 = NIL_RTR0PTR; PVM pVM = NULL; const unsigned cPages = RT_ALIGN_Z(sizeof(*pVM), PAGE_SIZE) >> PAGE_SHIFT; PSUPPAGE paPages = (PSUPPAGE)RTMemAllocZ(cPages * sizeof(SUPPAGE)); if (paPages) rc = SUPR3LowAlloc(cPages, (void **)&pVM, &pVMR0, &paPages[0]); else rc = VERR_NO_MEMORY; if (RT_SUCCESS(rc)) { pVM->pVMRC = 0; pVM->pVMR3 = pVM; pVM->pVMR0 = pVMR0; pVM->paVMPagesR3 = paPages; pVM->pSession = pSession; pVM->enmVMState = VMSTATE_CREATED; rc = SUPR3SetVMForFastIOCtl(pVMR0); if (!rc) { /* * Call VMM code with invalid function. */ for (i = cIterations; i > 0; i--) { rc = SUPR3CallVMMR0(pVMR0, NIL_VMCPUID, VMMR0_DO_SLOW_NOP, NULL); if (rc != VINF_SUCCESS) { RTPrintf("tstInt: SUPR3CallVMMR0 -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } RTPrintf("tstInt: Performed SUPR3CallVMMR0 %d times (rc=%Rrc)\n", cIterations, rc); /* * The fast path. */ if (rc == VINF_SUCCESS) { RTTimeNanoTS(); uint64_t StartTS = RTTimeNanoTS(); uint64_t StartTick = ASMReadTSC(); uint64_t MinTicks = UINT64_MAX; for (i = 0; i < 1000000; i++) { uint64_t OneStartTick = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVMR0, VMMR0_DO_NOP, 0); uint64_t Ticks = ASMReadTSC() - OneStartTick; if (Ticks < MinTicks) MinTicks = Ticks; if (RT_UNLIKELY(rc != VINF_SUCCESS)) { RTPrintf("tstInt: SUPR3CallVMMR0Fast -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } uint64_t Ticks = ASMReadTSC() - StartTick; uint64_t NanoSecs = RTTimeNanoTS() - StartTS; RTPrintf("tstInt: SUPR3CallVMMR0Fast - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); /* * The ordinary path. */ RTTimeNanoTS(); StartTS = RTTimeNanoTS(); StartTick = ASMReadTSC(); MinTicks = UINT64_MAX; for (i = 0; i < 1000000; i++) { uint64_t OneStartTick = ASMReadTSC(); rc = SUPR3CallVMMR0Ex(pVMR0, NIL_VMCPUID, VMMR0_DO_SLOW_NOP, 0, NULL); uint64_t OneTicks = ASMReadTSC() - OneStartTick; if (OneTicks < MinTicks) MinTicks = OneTicks; if (RT_UNLIKELY(rc != VINF_SUCCESS)) { RTPrintf("tstInt: SUPR3CallVMMR0Ex -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } Ticks = ASMReadTSC() - StartTick; NanoSecs = RTTimeNanoTS() - StartTS; RTPrintf("tstInt: SUPR3CallVMMR0Ex - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); } } else { RTPrintf("tstInt: SUPR3SetVMForFastIOCtl failed: %Rrc\n", rc); rcRet++; } } else { RTPrintf("tstInt: SUPR3ContAlloc(%#zx,,) failed\n", sizeof(*pVM)); rcRet++; } /* * Unload VMM. */ rc = SUPR3UnloadVMM(); if (rc) { RTPrintf("tstInt: SUPR3UnloadVMM failed with rc=%Rrc\n", rc); rcRet++; } } else { RTPrintf("tstInt: SUPR3LoadVMM failed with rc=%Rrc\n", rc); rcRet++; } /* * Terminate. */ rc = SUPR3Term(false /*fForced*/); rcRet += rc != 0; RTPrintf("tstInt: SUPR3Term -> rc=%Rrc\n", rc); }
/** * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventWaitEx. * @param uTimeout See RTSemEventWaitEx. * @param pSrcPos The source code position of the wait. */ static int rtR0SemEventLnxWait(PRTSEMEVENTINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { int rc; /* * Validate the input. */ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER); AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); rtR0SemEventLnxRetain(pThis); /* * Try grab the event without setting up the wait. */ if ( 1 /** @todo check if there are someone waiting already - waitqueue_active, but then what do we do below? */ && ASMAtomicCmpXchgU32(&pThis->fState, 0, 1)) rc = VINF_SUCCESS; else { /* * We have to wait. */ RTR0SEMLNXWAIT Wait; rc = rtR0SemLnxWaitInit(&Wait, fFlags, uTimeout, &pThis->Head); if (RT_SUCCESS(rc)) { IPRT_DEBUG_SEMS_STATE(pThis, 'E'); for (;;) { /* The destruction test. */ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC)) rc = VERR_SEM_DESTROYED; else { rtR0SemLnxWaitPrepare(&Wait); /* Check the exit conditions. */ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENT_MAGIC)) rc = VERR_SEM_DESTROYED; else if (ASMAtomicCmpXchgU32(&pThis->fState, 0, 1)) rc = VINF_SUCCESS; else if (rtR0SemLnxWaitHasTimedOut(&Wait)) rc = VERR_TIMEOUT; else if (rtR0SemLnxWaitWasInterrupted(&Wait)) rc = VERR_INTERRUPTED; else { /* Do the wait and then recheck the conditions. */ rtR0SemLnxWaitDoIt(&Wait); continue; } } break; } rtR0SemLnxWaitDelete(&Wait); IPRT_DEBUG_SEMS_STATE_RC(pThis, 'E', rc); } } rtR0SemEventLnxRelease(pThis); return rc; }
static int VBoxUSBMonSolarisIOCtl(dev_t Dev, int Cmd, intptr_t pArg, int Mode, cred_t *pCred, int *pVal) { LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl Dev=%d Cmd=%d pArg=%p Mode=%d\n", Dev, Cmd, pArg)); /* * Get the session from the soft state item. */ vboxusbmon_state_t *pState = ddi_get_soft_state(g_pVBoxUSBMonSolarisState, getminor(Dev)); if (!pState) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: no state data for %d\n", getminor(Dev))); return EINVAL; } /* * Read the request wrapper. Though We don't really need wrapper struct. now * it's room for the future as Solaris isn't generous regarding the size. */ VBOXUSBREQ ReqWrap; if (IOCPARM_LEN(Cmd) != sizeof(ReqWrap)) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: bad request %#x size=%d expected=%d\n", Cmd, IOCPARM_LEN(Cmd), sizeof(ReqWrap))); return ENOTTY; } int rc = ddi_copyin((void *)pArg, &ReqWrap, sizeof(ReqWrap), Mode); if (RT_UNLIKELY(rc)) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: ddi_copyin failed to read header pArg=%p Cmd=%d. rc=%d.\n", pArg, Cmd, rc)); return EINVAL; } if (ReqWrap.u32Magic != VBOXUSBMON_MAGIC) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: bad magic %#x; pArg=%p Cmd=%d.\n", ReqWrap.u32Magic, pArg, Cmd)); return EINVAL; } if (RT_UNLIKELY( ReqWrap.cbData == 0 || ReqWrap.cbData > _1M*16)) { LogRel((DEVICE_NAME ": VBoxUSBMonSolarisIOCtl: bad size %#x; pArg=%p Cmd=%d.\n", ReqWrap.cbData, pArg, Cmd)); return EINVAL; } /* * Read the request. */ void *pvBuf = RTMemTmpAlloc(ReqWrap.cbData); if (RT_UNLIKELY(!pvBuf)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: RTMemTmpAlloc failed to alloc %d bytes.\n", ReqWrap.cbData)); return ENOMEM; } rc = ddi_copyin((void *)(uintptr_t)ReqWrap.pvDataR3, pvBuf, ReqWrap.cbData, Mode); if (RT_UNLIKELY(rc)) { RTMemTmpFree(pvBuf); LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: ddi_copyin failed; pvBuf=%p pArg=%p Cmd=%d. rc=%d\n", pvBuf, pArg, Cmd, rc)); return EFAULT; } if (RT_UNLIKELY( ReqWrap.cbData != 0 && !VALID_PTR(pvBuf))) { RTMemTmpFree(pvBuf); LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: pvBuf invalid pointer %p\n", pvBuf)); return EINVAL; } Log((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: pid=%d.\n", (int)RTProcSelf())); /* * Process the IOCtl. */ size_t cbDataReturned; rc = vboxUSBMonSolarisProcessIOCtl(Cmd, pState, pvBuf, ReqWrap.cbData, &cbDataReturned); ReqWrap.rc = rc; rc = 0; if (RT_UNLIKELY(cbDataReturned > ReqWrap.cbData)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: too much output data %d expected %d\n", cbDataReturned, ReqWrap.cbData)); cbDataReturned = ReqWrap.cbData; } ReqWrap.cbData = cbDataReturned; /* * Copy the request back to user space. */ rc = ddi_copyout(&ReqWrap, (void *)pArg, sizeof(ReqWrap), Mode); if (RT_LIKELY(!rc)) { /* * Copy the payload (if any) back to user space. */ if (cbDataReturned > 0) { rc = ddi_copyout(pvBuf, (void *)(uintptr_t)ReqWrap.pvDataR3, cbDataReturned, Mode); if (RT_UNLIKELY(rc)) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: ddi_copyout failed; pvBuf=%p pArg=%p Cmd=%d. rc=%d\n", pvBuf, pArg, Cmd, rc)); rc = EFAULT; } } } else { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisIOCtl: ddi_copyout(1) failed pArg=%p Cmd=%d\n", pArg, Cmd)); rc = EFAULT; } *pVal = rc; RTMemTmpFree(pvBuf); return rc; }
/** * @interface_method_impl{PDMDEVREG,pfnConstruct} */ static DECLCALLBACK(int) gimdevR3Construct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg) { PDMDEV_CHECK_VERSIONS_RETURN(pDevIns); RT_NOREF2(iInstance, pCfg); Assert(iInstance == 0); PGIMDEV pThis = PDMINS_2_DATA(pDevIns, PGIMDEV); /* * Initialize relevant state bits. */ pThis->pDevInsR3 = pDevIns; pThis->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns); pThis->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns); /* * Get debug setup requirements from GIM. */ PVM pVM = PDMDevHlpGetVM(pDevIns); int rc = GIMR3GetDebugSetup(pVM, &pThis->DbgSetup); if ( RT_SUCCESS(rc) && pThis->DbgSetup.cbDbgRecvBuf > 0) { /* * Attach the stream driver for the debug connection. */ PPDMISTREAM pDbgDrvStream = NULL; pThis->IDbgBase.pfnQueryInterface = gimdevR3QueryInterface; rc = PDMDevHlpDriverAttach(pDevIns, GIMDEV_DEBUG_LUN, &pThis->IDbgBase, &pThis->pDbgDrvBase, "GIM Debug Port"); if (RT_SUCCESS(rc)) { pDbgDrvStream = PDMIBASE_QUERY_INTERFACE(pThis->pDbgDrvBase, PDMISTREAM); if (pDbgDrvStream) LogRel(("GIMDev: LUN#%u: Debug port configured\n", GIMDEV_DEBUG_LUN)); else { LogRel(("GIMDev: LUN#%u: No unit\n", GIMDEV_DEBUG_LUN)); rc = VERR_INTERNAL_ERROR_2; } } else { pThis->pDbgDrvBase = NULL; LogRel(("GIMDev: LUN#%u: No debug port configured! rc=%Rrc\n", GIMDEV_DEBUG_LUN, rc)); } if (!pDbgDrvStream) { Assert(rc != VINF_SUCCESS); return PDMDevHlpVMSetError(pDevIns, rc, RT_SRC_POS, N_("Debug port configuration expected when GIM configured with debugging support")); } void *pvDbgRecvBuf = RTMemAllocZ(pThis->DbgSetup.cbDbgRecvBuf); if (RT_UNLIKELY(!pvDbgRecvBuf)) { LogRel(("GIMDev: Failed to alloc %u bytes for debug receive buffer\n", pThis->DbgSetup.cbDbgRecvBuf)); return VERR_NO_MEMORY; } /* * Update the shared debug struct. */ pThis->Dbg.pDbgDrvStream = pDbgDrvStream; pThis->Dbg.pvDbgRecvBuf = pvDbgRecvBuf; pThis->Dbg.cbDbgRecvBufRead = 0; pThis->Dbg.fDbgRecvBufRead = false; /* * Create the sempahore and the debug receive thread itself. */ rc = RTSemEventMultiCreate(&pThis->Dbg.hDbgRecvThreadSem); if (RT_SUCCESS(rc)) { rc = RTThreadCreate(&pThis->hDbgRecvThread, gimDevR3DbgRecvThread, pDevIns, 0 /*cbStack*/, RTTHREADTYPE_IO, RTTHREADFLAGS_WAITABLE, "GIMDebugRecv"); if (RT_FAILURE(rc)) { RTSemEventMultiDestroy(pThis->Dbg.hDbgRecvThreadSem); pThis->Dbg.hDbgRecvThreadSem = NIL_RTSEMEVENTMULTI; RTMemFree(pThis->Dbg.pvDbgRecvBuf); pThis->Dbg.pvDbgRecvBuf = NULL; return rc; } } else return rc; } /* * Register this device with the GIM component. */ GIMR3GimDeviceRegister(pVM, pDevIns, pThis->DbgSetup.cbDbgRecvBuf ? &pThis->Dbg : NULL); /* * Get the MMIO2 regions from the GIM provider. */ uint32_t cRegions = 0; PGIMMMIO2REGION pRegionsR3 = GIMR3GetMmio2Regions(pVM, &cRegions); if ( cRegions && pRegionsR3) { /* * Register the MMIO2 regions. */ PGIMMMIO2REGION pCur = pRegionsR3; for (uint32_t i = 0; i < cRegions; i++, pCur++) { Assert(!pCur->fRegistered); rc = PDMDevHlpMMIO2Register(pDevIns, NULL, pCur->iRegion, pCur->cbRegion, 0 /* fFlags */, &pCur->pvPageR3, pCur->szDescription); if (RT_FAILURE(rc)) return rc; pCur->fRegistered = true; #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) RTR0PTR pR0Mapping = 0; rc = PDMDevHlpMMIO2MapKernel(pDevIns, NULL, pCur->iRegion, 0 /* off */, pCur->cbRegion, pCur->szDescription, &pR0Mapping); AssertLogRelMsgRCReturn(rc, ("PDMDevHlpMapMMIO2IntoR0(%#x,) -> %Rrc\n", pCur->cbRegion, rc), rc); pCur->pvPageR0 = pR0Mapping; #else pCur->pvPageR0 = (RTR0PTR)pCur->pvPageR3; #endif /* * Map into RC if required. */ if (pCur->fRCMapping) { RTRCPTR pRCMapping = 0; rc = PDMDevHlpMMHyperMapMMIO2(pDevIns, NULL, pCur->iRegion, 0 /* off */, pCur->cbRegion, pCur->szDescription, &pRCMapping); AssertLogRelMsgRCReturn(rc, ("PDMDevHlpMMHyperMapMMIO2(%#x,) -> %Rrc\n", pCur->cbRegion, rc), rc); pCur->pvPageRC = pRCMapping; } else pCur->pvPageRC = NIL_RTRCPTR; LogRel(("GIMDev: Registered %s\n", pCur->szDescription)); } } /** @todo Register SSM: PDMDevHlpSSMRegister(). */ /** @todo Register statistics: STAM_REG(). */ /** @todo Register DBGFInfo: PDMDevHlpDBGFInfoRegister(). */ return VINF_SUCCESS; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; /* One-shot timers are not supported by the cyclic system. */ if (pTimer->interval == 0) return VERR_NOT_SUPPORTED; pTimer->fSuspended = false; if (pTimer->fAllCpu) { PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL)); if (RT_UNLIKELY(!pOmniTimer)) return VERR_NO_MEMORY; pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t)); if (RT_UNLIKELY(!pOmniTimer->au64Ticks)) { RTMemFree(pOmniTimer); return VERR_NO_MEMORY; } /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->pOmniTimer = pOmniTimer; pOmniTimer->u64When = pTimer->interval + RTTimeNanoTS(); cyc_omni_handler_t hOmni; hOmni.cyo_online = rtTimerSolOmniCpuOnline; hOmni.cyo_offline = NULL; hOmni.cyo_arg = pTimer; mutex_enter(&cpu_lock); pTimer->hCyclicId = cyclic_add_omni(&hOmni); mutex_exit(&cpu_lock); } else { int iCpu = SOL_TIMER_ANY_CPU; if (pTimer->fSpecificCpu) { iCpu = pTimer->iCpu; if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */ return VERR_CPU_OFFLINE; } PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL)); if (RT_UNLIKELY(!pSingleTimer)) return VERR_NO_MEMORY; pTimer->pSingleTimer = pSingleTimer; pSingleTimer->hHandler.cyh_func = rtTimerSolCallbackWrapper; pSingleTimer->hHandler.cyh_arg = pTimer; pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL; mutex_enter(&cpu_lock); if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu])) { mutex_exit(&cpu_lock); RTMemFree(pSingleTimer); pTimer->pSingleTimer = NULL; return VERR_CPU_OFFLINE; } pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS(); if (pTimer->interval == 0) { /** @todo use gethrtime_max instead of LLONG_MAX? */ AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long)); pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when; } else pSingleTimer->hFireTime.cyt_interval = pTimer->interval; pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime); if (iCpu != SOL_TIMER_ANY_CPU) cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */); mutex_exit(&cpu_lock); } return VINF_SUCCESS; }
RTDECL(int) RTAsn1Integer_CheckSanity(PCRTASN1INTEGER pThis, uint32_t fFlags, PRTERRINFO pErrInfo, const char *pszErrorTag) { if (RT_UNLIKELY(!RTAsn1Integer_IsPresent(pThis))) return RTErrInfoSetF(pErrInfo, VERR_ASN1_NOT_PRESENT, "%s: Missing (INTEGER).", pszErrorTag); return VINF_SUCCESS; }
DECLR0VBGL(int) VbglGR0Verify(const VMMDevRequestHeader *pReq, size_t cbReq) { size_t cbReqExpected; if (RT_UNLIKELY(!pReq || cbReq < sizeof(VMMDevRequestHeader))) { dprintf(("VbglGR0Verify: Invalid parameter: pReq = %p, cbReq = %zu\n", pReq, cbReq)); return VERR_INVALID_PARAMETER; } if (RT_UNLIKELY(pReq->size > cbReq)) { dprintf(("VbglGR0Verify: request size %u > buffer size %zu\n", pReq->size, cbReq)); return VERR_INVALID_PARAMETER; } /* The request size must correspond to the request type. */ cbReqExpected = vmmdevGetRequestSize(pReq->requestType); if (RT_UNLIKELY(cbReq < cbReqExpected)) { dprintf(("VbglGR0Verify: buffer size %zu < expected size %zu\n", cbReq, cbReqExpected)); return VERR_INVALID_PARAMETER; } if (cbReqExpected == cbReq) { /* * This is most likely a fixed size request, and in this case the * request size must be also equal to the expected size. */ if (RT_UNLIKELY(pReq->size != cbReqExpected)) { dprintf(("VbglGR0Verify: request size %u != expected size %zu\n", pReq->size, cbReqExpected)); return VERR_INVALID_PARAMETER; } return VINF_SUCCESS; } /* * This can be a variable size request. Check the request type and limit the size * to VMMDEV_MAX_VMMDEVREQ_SIZE, which is max size supported by the host. * * Note: Keep this list sorted for easier human lookup! */ if ( pReq->requestType == VMMDevReq_ChangeMemBalloon || pReq->requestType == VMMDevReq_GetDisplayChangeRequestMulti #ifdef VBOX_WITH_64_BITS_GUESTS || pReq->requestType == VMMDevReq_HGCMCall64 #endif || pReq->requestType == VMMDevReq_HGCMCall32 || pReq->requestType == VMMDevReq_RegisterSharedModule || pReq->requestType == VMMDevReq_ReportGuestUserState || pReq->requestType == VMMDevReq_LogString || pReq->requestType == VMMDevReq_SetPointerShape || pReq->requestType == VMMDevReq_VideoSetVisibleRegion) { if (RT_UNLIKELY(cbReq > VMMDEV_MAX_VMMDEVREQ_SIZE)) { dprintf(("VbglGR0Verify: VMMDevReq_LogString: buffer size %zu too big\n", cbReq)); return VERR_BUFFER_OVERFLOW; /** @todo is this error code ok? */ } } else { dprintf(("VbglGR0Verify: request size %u > buffer size %zu\n", pReq->size, cbReq)); return VERR_IO_BAD_LENGTH; /** @todo is this error code ok? */ } return VINF_SUCCESS; }
/** * Internal worker for the sleep scenario. * * Called owning the spinlock, returns without it. * * @returns IPRT status code. * @param pThis The mutex instance. * @param cMillies The timeout. * @param fInterruptible Whether it's interruptible * (RTSemMutexRequestNoResume) or not * (RTSemMutexRequest). * @param hNativeSelf The thread handle of the caller. */ static int rtR0SemMutexDarwinRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible, RTNATIVETHREAD hNativeSelf) { /* * Grab a reference and indicate that we're waiting. */ pThis->cWaiters++; ASMAtomicIncU32(&pThis->cRefs); /* * Go to sleep, use the address of the mutex instance as sleep/blocking/event id. */ wait_result_t rcWait; if (cMillies == RT_INDEFINITE_WAIT) rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible); else { uint64_t u64AbsTime; nanoseconds_to_absolutetime(cMillies * UINT64_C(1000000), &u64AbsTime); u64AbsTime += mach_absolute_time(); rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible, u64AbsTime); } /* * Translate the rc. */ int rc; switch (rcWait) { case THREAD_AWAKENED: if (RT_LIKELY(pThis->u32Magic == RTSEMMUTEX_MAGIC)) { if (RT_LIKELY( pThis->cRecursions == 0 && pThis->hNativeOwner == NIL_RTNATIVETHREAD)) { pThis->cRecursions = 1; pThis->hNativeOwner = hNativeSelf; rc = VINF_SUCCESS; } else { Assert(pThis->cRecursions == 0); Assert(pThis->hNativeOwner == NIL_RTNATIVETHREAD); rc = VERR_INTERNAL_ERROR_3; } } else rc = VERR_SEM_DESTROYED; break; case THREAD_TIMED_OUT: Assert(cMillies != RT_INDEFINITE_WAIT); rc = VERR_TIMEOUT; break; case THREAD_INTERRUPTED: Assert(fInterruptible); rc = VERR_INTERRUPTED; break; case THREAD_RESTART: Assert(pThis->u32Magic == ~RTSEMMUTEX_MAGIC); rc = VERR_SEM_DESTROYED; break; default: AssertMsgFailed(("rcWait=%d\n", rcWait)); rc = VERR_GENERAL_FAILURE; break; } /* * Dereference it and quit the lock. */ Assert(pThis->cWaiters > 0); pThis->cWaiters--; Assert(pThis->cRefs > 0); if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0)) rtSemMutexDarwinFree(pThis); else lck_spin_unlock(pThis->pSpinlock); return rc; }
static int vboxguestLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg) #endif { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pFilp->private_data; uint32_t cbData = _IOC_SIZE(uCmd); void *pvBufFree; void *pvBuf; int rc; uint64_t au64Buf[32/sizeof(uint64_t)]; Log6(("vboxguestLinuxIOCtl: pFilp=%p uCmd=%#x ulArg=%p pid=%d/%d\n", pFilp, uCmd, (void *)ulArg, RTProcSelf(), current->pid)); /* * Buffer the request. */ if (cbData <= sizeof(au64Buf)) { pvBufFree = NULL; pvBuf = &au64Buf[0]; } else { pvBufFree = pvBuf = RTMemTmpAlloc(cbData); if (RT_UNLIKELY(!pvBuf)) { LogRel((DEVICE_NAME "::IOCtl: RTMemTmpAlloc failed to alloc %u bytes.\n", cbData)); return -ENOMEM; } } if (RT_LIKELY(copy_from_user(pvBuf, (void *)ulArg, cbData) == 0)) { /* * Process the IOCtl. */ size_t cbDataReturned; rc = VbgdCommonIoCtl(uCmd, &g_DevExt, pSession, pvBuf, cbData, &cbDataReturned); /* * Copy ioctl data and output buffer back to user space. */ if (RT_SUCCESS(rc)) { rc = 0; if (RT_UNLIKELY(cbDataReturned > cbData)) { LogRel((DEVICE_NAME "::IOCtl: too much output data %u expected %u\n", cbDataReturned, cbData)); cbDataReturned = cbData; } if (cbDataReturned > 0) { if (RT_UNLIKELY(copy_to_user((void *)ulArg, pvBuf, cbDataReturned) != 0)) { LogRel((DEVICE_NAME "::IOCtl: copy_to_user failed; pvBuf=%p ulArg=%p cbDataReturned=%u uCmd=%d\n", pvBuf, (void *)ulArg, cbDataReturned, uCmd, rc)); rc = -EFAULT; } } } else { Log(("vboxguestLinuxIOCtl: pFilp=%p uCmd=%#x ulArg=%p failed, rc=%d\n", pFilp, uCmd, (void *)ulArg, rc)); rc = -rc; Assert(rc > 0); /* Positive returns == negated VBox error status codes. */ } } else { Log((DEVICE_NAME "::IOCtl: copy_from_user(,%#lx, %#x) failed; uCmd=%#x.\n", ulArg, cbData, uCmd)); rc = -EFAULT; } if (pvBufFree) RTMemFree(pvBufFree); Log6(("vboxguestLinuxIOCtl: returns %d (pid=%d/%d)\n", rc, RTProcSelf(), current->pid)); return rc; }
static int VBoxUSBMonSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred) { vboxusbmon_state_t *pState = NULL; unsigned iOpenInstance; LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisOpen\n")); /* * Verify we are being opened as a character device. */ if (fType != OTYP_CHR) return EINVAL; /* * Verify that we're called after attach. */ if (!g_pDip) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisOpen invalid state for opening.\n")); return ENXIO; } mutex_enter(&g_VBoxUSBMonSolarisMtx); if (!g_cVBoxUSBMonSolarisClient) { mutex_exit(&g_VBoxUSBMonSolarisMtx); int rc = usb_register_dev_driver(g_pDip, VBoxUSBMonSolarisElectDriver); if (RT_UNLIKELY(rc != DDI_SUCCESS)) { LogRel((DEVICE_NAME ":Failed to register driver election callback with USBA rc=%d\n", rc)); return EINVAL; } Log((DEVICE_NAME ":Successfully registered election callback with USBA\n")); mutex_enter(&g_VBoxUSBMonSolarisMtx); } g_cVBoxUSBMonSolarisClient++; mutex_exit(&g_VBoxUSBMonSolarisMtx); for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pVBoxUSBMonSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pVBoxUSBMonSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pVBoxUSBMonSolarisState, iOpenInstance); break; } } if (!pState) { LogRel((DEVICE_NAME ":VBoxUSBMonSolarisOpen: too many open instances.")); mutex_enter(&g_VBoxUSBMonSolarisMtx); g_cVBoxUSBMonSolarisClient--; mutex_exit(&g_VBoxUSBMonSolarisMtx); return ENXIO; } pState->Process = RTProcSelf(); *pDev = makedevice(getmajor(*pDev), iOpenInstance); NOREF(fFlag); NOREF(pCred); return 0; }
/** * OS specific allocation function. */ DECLHIDDEN(int) rtR0MemAllocEx(size_t cb, uint32_t fFlags, PRTMEMHDR *ppHdr) { PRTMEMHDR pHdr; IPRT_LINUX_SAVE_EFL_AC(); /* * Allocate. */ if (fFlags & RTMEMHDR_FLAG_EXEC) { if (fFlags & RTMEMHDR_FLAG_ANY_CTX) return VERR_NOT_SUPPORTED; #if defined(RT_ARCH_AMD64) # ifdef RTMEMALLOC_EXEC_HEAP if (g_HeapExec != NIL_RTHEAPSIMPLE) { RTSpinlockAcquire(g_HeapExecSpinlock); pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0); RTSpinlockRelease(g_HeapExecSpinlock); fFlags |= RTMEMHDR_FLAG_EXEC_HEAP; } else pHdr = NULL; # elif defined(RTMEMALLOC_EXEC_VM_AREA) pHdr = rtR0MemAllocExecVmArea(cb); fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA; # else /* !RTMEMALLOC_EXEC_HEAP */ # error "you don not want to go here..." pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC); # endif /* !RTMEMALLOC_EXEC_HEAP */ #elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE) pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC); #else pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr)); #endif } else { if ( #if 1 /* vmalloc has serious performance issues, avoid it. */ cb <= PAGE_SIZE*16 - sizeof(*pHdr) #else cb <= PAGE_SIZE #endif || (fFlags & RTMEMHDR_FLAG_ANY_CTX) ) { fFlags |= RTMEMHDR_FLAG_KMALLOC; pHdr = kmalloc(cb + sizeof(*pHdr), (fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC) ? (GFP_ATOMIC | __GFP_NOWARN) : (GFP_KERNEL | __GFP_NOWARN)); if (RT_UNLIKELY( !pHdr && cb > PAGE_SIZE && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) )) { fFlags &= ~RTMEMHDR_FLAG_KMALLOC; pHdr = vmalloc(cb + sizeof(*pHdr)); } } else pHdr = vmalloc(cb + sizeof(*pHdr)); } if (RT_UNLIKELY(!pHdr)) { IPRT_LINUX_RESTORE_EFL_AC(); return VERR_NO_MEMORY; } /* * Initialize. */ pHdr->u32Magic = RTMEMHDR_MAGIC; pHdr->fFlags = fFlags; pHdr->cb = cb; pHdr->cbReq = cb; *ppHdr = pHdr; IPRT_LINUX_RESTORE_EFL_AC(); return VINF_SUCCESS; }
HRESULT HostDnsServiceWin::updateInfo() { HostDnsInformation info; LONG lrc; int rc; std::string strDomain; std::string strSearchList; /* NB: comma separated, no spaces */ /* * We ignore "DhcpDomain" key here since it's not stable. If * there are two active interfaces that use DHCP (in particular * when host uses OpenVPN) then DHCP ACKs will take turns updating * that key. Instead we call GetAdaptersAddresses() below (which * is what ipconfig.exe seems to do). */ for (DWORD regIndex = 0; /**/; ++regIndex) { char keyName[256]; DWORD cbKeyName = sizeof(keyName); DWORD keyType = 0; char keyData[1024]; DWORD cbKeyData = sizeof(keyData); lrc = RegEnumValueA(m->hKeyTcpipParameters, regIndex, keyName, &cbKeyName, 0, &keyType, (LPBYTE)keyData, &cbKeyData); if (lrc == ERROR_NO_MORE_ITEMS) break; if (lrc == ERROR_MORE_DATA) /* buffer too small; handle? */ continue; if (lrc != ERROR_SUCCESS) { LogRel2(("HostDnsServiceWin: RegEnumValue error %d\n", (int)lrc)); return E_FAIL; } if (keyType != REG_SZ) continue; if (cbKeyData > 0 && keyData[cbKeyData - 1] == '\0') --cbKeyData; /* don't count trailing NUL if present */ if (RTStrICmp("Domain", keyName) == 0) { strDomain.assign(keyData, cbKeyData); LogRel2(("HostDnsServiceWin: Domain=\"%s\"\n", strDomain.c_str())); } else if (RTStrICmp("DhcpDomain", keyName) == 0) { std::string strDhcpDomain(keyData, cbKeyData); LogRel2(("HostDnsServiceWin: DhcpDomain=\"%s\"\n", strDhcpDomain.c_str())); } else if (RTStrICmp("SearchList", keyName) == 0) { strSearchList.assign(keyData, cbKeyData); LogRel2(("HostDnsServiceWin: SearchList=\"%s\"\n", strSearchList.c_str())); } } /* statically configured domain name */ if (!strDomain.empty()) { info.domain = strDomain; info.searchList.push_back(strDomain); } /* statically configured search list */ if (!strSearchList.empty()) { vappend(info.searchList, strSearchList, ','); } /* * When name servers are configured statically it seems that the * value of Tcpip\Parameters\NameServer is NOT set, inly interface * specific NameServer value is (which triggers notification for * us to pick up the change). Fortunately, DnsApi seems to do the * right thing there. */ DNS_STATUS status; PIP4_ARRAY pIp4Array = NULL; // NB: must be set on input it seems, despite docs' claim to the contrary. DWORD cbBuffer = sizeof(&pIp4Array); status = DnsQueryConfig(DnsConfigDnsServerList, DNS_CONFIG_FLAG_ALLOC, NULL, NULL, &pIp4Array, &cbBuffer); if (status == NO_ERROR && pIp4Array != NULL) { for (DWORD i = 0; i < pIp4Array->AddrCount; ++i) { char szAddrStr[16] = ""; RTStrPrintf(szAddrStr, sizeof(szAddrStr), "%RTnaipv4", pIp4Array->AddrArray[i]); LogRel2(("HostDnsServiceWin: server %d: %s\n", i+1, szAddrStr)); info.servers.push_back(szAddrStr); } LocalFree(pIp4Array); } /** * DnsQueryConfig(DnsConfigSearchList, ...) is not implemented. * Call GetAdaptersAddresses() that orders the returned list * appropriately and collect IP_ADAPTER_ADDRESSES::DnsSuffix. */ do { PIP_ADAPTER_ADDRESSES pAddrBuf = NULL; ULONG cbAddrBuf = 8 * 1024; bool fReallocated = false; ULONG err; pAddrBuf = (PIP_ADAPTER_ADDRESSES) malloc(cbAddrBuf); if (pAddrBuf == NULL) { LogRel2(("HostDnsServiceWin: failed to allocate %zu bytes" " of GetAdaptersAddresses buffer\n", (size_t)cbAddrBuf)); break; } while (pAddrBuf != NULL) { ULONG cbAddrBufProvided = cbAddrBuf; err = GetAdaptersAddresses(AF_UNSPEC, GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST, NULL, pAddrBuf, &cbAddrBuf); if (err == NO_ERROR) { break; } else if (err == ERROR_BUFFER_OVERFLOW) { LogRel2(("HostDnsServiceWin: provided GetAdaptersAddresses with %zu" " but asked again for %zu bytes\n", (size_t)cbAddrBufProvided, (size_t)cbAddrBuf)); if (RT_UNLIKELY(fReallocated)) /* what? again?! */ { LogRel2(("HostDnsServiceWin: ... not going to realloc again\n")); free(pAddrBuf); pAddrBuf = NULL; break; } PIP_ADAPTER_ADDRESSES pNewBuf = (PIP_ADAPTER_ADDRESSES) realloc(pAddrBuf, cbAddrBuf); if (pNewBuf == NULL) { LogRel2(("HostDnsServiceWin: failed to reallocate %zu bytes\n", (size_t)cbAddrBuf)); free(pAddrBuf); pAddrBuf = NULL; break; } /* try again */ pAddrBuf = pNewBuf; /* cbAddrBuf already updated */ fReallocated = true; } else { LogRel2(("HostDnsServiceWin: GetAdaptersAddresses error %d\n", err)); free(pAddrBuf); pAddrBuf = NULL; break; } } if (pAddrBuf == NULL) break; for (PIP_ADAPTER_ADDRESSES pAdp = pAddrBuf; pAdp != NULL; pAdp = pAdp->Next) { LogRel2(("HostDnsServiceWin: %ls (status %u) ...\n", pAdp->FriendlyName ? pAdp->FriendlyName : L"(null)", pAdp->OperStatus)); if (pAdp->OperStatus != IfOperStatusUp) continue; if (pAdp->DnsSuffix == NULL || *pAdp->DnsSuffix == L'\0') continue; char *pszDnsSuffix = NULL; rc = RTUtf16ToUtf8Ex(pAdp->DnsSuffix, RTSTR_MAX, &pszDnsSuffix, 0, /* allocate */ NULL); if (RT_FAILURE(rc)) { LogRel2(("HostDnsServiceWin: failed to convert DNS suffix \"%ls\": %Rrc\n", pAdp->DnsSuffix, rc)); continue; } AssertContinue(pszDnsSuffix != NULL); AssertContinue(*pszDnsSuffix != '\0'); LogRel2(("HostDnsServiceWin: ... suffix = \"%s\"\n", pszDnsSuffix)); vappend(info.searchList, pszDnsSuffix); RTStrFree(pszDnsSuffix); } free(pAddrBuf); } while (0); if (info.domain.empty() && !info.searchList.empty()) info.domain = info.searchList[0]; if (info.searchList.size() == 1) info.searchList.clear(); HostDnsMonitor::setInfo(info); return S_OK; }
/** * Releases a reference to the event semaphore. * * @param pThis The event semaphore. */ DECLINLINE(void) rtR0SemEventLnxRelease(PRTSEMEVENTINTERNAL pThis) { if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0)) RTMemFree(pThis); }