/** * Patches the instructions necessary for making a hypercall to the hypervisor. * Used by GIM. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pvBuf The buffer in the hypercall page(s) to be patched. * @param cbBuf The size of the buffer. * @param pcbWritten Where to store the number of bytes patched. This * is reliably updated only when this function returns * VINF_SUCCESS. */ VMM_INT_DECL(int) VMMPatchHypercall(PVM pVM, void *pvBuf, size_t cbBuf, size_t *pcbWritten) { AssertReturn(pvBuf, VERR_INVALID_POINTER); AssertReturn(pcbWritten, VERR_INVALID_POINTER); if (ASMIsAmdCpu()) { uint8_t abHypercall[] = { 0x0F, 0x01, 0xD9 }; /* VMMCALL */ if (RT_LIKELY(cbBuf >= sizeof(abHypercall))) { memcpy(pvBuf, abHypercall, sizeof(abHypercall)); *pcbWritten = sizeof(abHypercall); return VINF_SUCCESS; } return VERR_BUFFER_OVERFLOW; } else { AssertReturn(ASMIsIntelCpu() || ASMIsViaCentaurCpu(), VERR_UNSUPPORTED_CPU); uint8_t abHypercall[] = { 0x0F, 0x01, 0xC1 }; /* VMCALL */ if (RT_LIKELY(cbBuf >= sizeof(abHypercall))) { memcpy(pvBuf, abHypercall, sizeof(abHypercall)); *pcbWritten = sizeof(abHypercall); return VINF_SUCCESS; } return VERR_BUFFER_OVERFLOW; } }
/** * Registers client driver. * * @returns VBox status code. * @param pszDevicePath The device path of the client driver. * @param Instance The client driver instance. */ int VBoxUSBMonSolarisRegisterClient(dev_info_t *pClientDip, PVBOXUSB_CLIENT_INFO pClientInfo) { LogFunc((DEVICE_NAME ":VBoxUSBMonSolarisRegisterClient pClientDip=%p pClientInfo=%p\n", pClientDip, pClientInfo)); AssertPtrReturn(pClientInfo, VERR_INVALID_PARAMETER); if (RT_LIKELY(g_pDip)) { vboxusbmon_client_t *pClient = RTMemAllocZ(sizeof(vboxusbmon_client_t)); if (RT_LIKELY(pClient)) { pClient->Info.Instance = pClientInfo->Instance; strncpy(pClient->Info.szClientPath, pClientInfo->szClientPath, sizeof(pClient->Info.szClientPath)); strncpy(pClient->Info.szDeviceIdent, pClientInfo->szDeviceIdent, sizeof(pClient->Info.szDeviceIdent)); pClient->Info.pfnSetConsumerCredentials = pClientInfo->pfnSetConsumerCredentials; pClient->pDip = pClientDip; mutex_enter(&g_VBoxUSBMonSolarisMtx); pClient->pNext = g_pVBoxUSBMonSolarisClients; g_pVBoxUSBMonSolarisClients = pClient; mutex_exit(&g_VBoxUSBMonSolarisMtx); Log((DEVICE_NAME ":VBoxUSBMonSolarisRegisterClient registered. %d %s %s\n", pClient->Info.Instance, pClient->Info.szClientPath, pClient->Info.szDeviceIdent)); return VINF_SUCCESS; } else return VERR_NO_MEMORY; } else return VERR_INVALID_STATE; }
static int VBoxDrvLinuxIOCtl(struct inode *pInode, struct file *pFilp, unsigned int uCmd, unsigned long ulArg) #endif { /* * Deal with the two high-speed IOCtl that takes it's arguments from * the session and iCmd, and only returns a VBox status code. */ #ifdef HAVE_UNLOCKED_IOCTL if (RT_LIKELY( uCmd == SUP_IOCTL_FAST_DO_RAW_RUN || uCmd == SUP_IOCTL_FAST_DO_HWACC_RUN || uCmd == SUP_IOCTL_FAST_DO_NOP)) return supdrvIOCtlFast(uCmd, ulArg, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data); return VBoxDrvLinuxIOCtlSlow(pFilp, uCmd, ulArg); #else /* !HAVE_UNLOCKED_IOCTL */ int rc; unlock_kernel(); if (RT_LIKELY( uCmd == SUP_IOCTL_FAST_DO_RAW_RUN || uCmd == SUP_IOCTL_FAST_DO_HWACC_RUN || uCmd == SUP_IOCTL_FAST_DO_NOP)) rc = supdrvIOCtlFast(uCmd, ulArg, &g_DevExt, (PSUPDRVSESSION)pFilp->private_data); else rc = VBoxDrvLinuxIOCtlSlow(pFilp, uCmd, ulArg); lock_kernel(); return rc; #endif /* !HAVE_UNLOCKED_IOCTL */ }
/** * Read the current CPU timestamp counter. * * @returns Gets the CPU tsc. * @param pVCpu The cross context virtual CPU structure. * @param fCheckTimers Whether to check timers. */ DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers) { uint64_t u64; if (RT_LIKELY(pVCpu->tm.s.fTSCTicking)) { PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) u64 = SUPReadTsc(); else u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); u64 -= pVCpu->tm.s.offTSCRawSrc; /* Always return a value higher than what the guest has already seen. */ if (RT_LIKELY(u64 > pVCpu->tm.s.u64TSCLastSeen)) pVCpu->tm.s.u64TSCLastSeen = u64; else { STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); pVCpu->tm.s.u64TSCLastSeen += 64; /** @todo choose a good increment here */ u64 = pVCpu->tm.s.u64TSCLastSeen; } } else u64 = pVCpu->tm.s.u64TSC; return u64; }
/** * Virtio Net private data allocation routine. * * @param pDevice Pointer to the Virtio device instance. * * @return Allocated private data that must only be freed by calling * VirtioNetDevFree(). */ static void *VirtioNetDevAlloc(PVIRTIODEVICE pDevice) { LogFlowFunc((VIRTIOLOGNAME ":VirtioNetDevAlloc pDevice=%p\n", pDevice)); AssertReturn(pDevice, NULL); virtio_net_t *pNet = RTMemAllocZ(sizeof(virtio_net_t)); if (RT_LIKELY(pNet)) { /* * Create a kernel memory cache for frequently allocated/deallocated * buffers. */ char szCachename[KSTAT_STRLEN]; RTStrPrintf(szCachename, sizeof(szCachename), "VirtioNet_Cache_%d", ddi_get_instance(pDevice->pDip)); pNet->pTxCache = kmem_cache_create(szCachename, /* Cache name */ sizeof(virtio_net_txbuf_t), /* Size of buffers in cache */ 0, /* Align */ VirtioNetTxBufCreate, /* Buffer constructor */ VirtioNetTxBufDestroy, /* Buffer destructor */ NULL, /* pfnReclaim */ pDevice, /* Private data */ NULL, /* "vmp", MBZ (man page) */ 0 /* "cflags", MBZ (man page) */ ); if (RT_LIKELY(pNet->pTxCache)) return pNet; else LogRel((VIRTIOLOGNAME ":kmem_cache_create failed.\n")); } else LogRel((VIRTIOLOGNAME ":failed to alloc %u bytes for Net instance.\n", sizeof(virtio_net_t))); return NULL; }
/** * Gets the next deadline in host CPU clock ticks and the TSC offset if we can * use the raw TSC. * * @returns The number of host CPU clock ticks to the next timer deadline. * @param pVCpu The current CPU. * @param poffRealTSC The offset against the TSC of the current CPU. * @thread EMT(pVCpu). * @remarks Superset of TMCpuTickCanUseRealTSC. */ VMM_INT_DECL(uint64_t) TMCpuTickGetDeadlineAndTscOffset(PVMCPU pVCpu, bool *pfOffsettedTsc, uint64_t *poffRealTSC) { PVM pVM = pVCpu->CTX_SUFF(pVM); uint64_t cTicksToDeadline; /* * We require: * 1. A fixed TSC, this is checked at init time. * 2. That the TSC is ticking (we shouldn't be here if it isn't) * 3. Either that we're using the real TSC as time source or * a) we don't have any lag to catch up, and * b) the virtual sync clock hasn't been halted by an expired timer, and * c) we're not using warp drive (accelerated virtual guest time). */ if ( pVM->tm.s.fMaybeUseOffsettedHostTSC && RT_LIKELY(pVCpu->tm.s.fTSCTicking) && ( pVM->tm.s.fTSCUseRealTSC || ( !pVM->tm.s.fVirtualSyncCatchUp && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) && !pVM->tm.s.fVirtualWarpDrive)) ) { *pfOffsettedTsc = true; if (!pVM->tm.s.fTSCUseRealTSC) { /* The source is the timer synchronous virtual clock. */ Assert(pVM->tm.s.fTSCVirtualized); uint64_t cNsToDeadline; uint64_t u64NowVirtSync = TMVirtualSyncGetWithDeadlineNoCheck(pVM, &cNsToDeadline); uint64_t u64Now = u64NowVirtSync != TMCLOCK_FREQ_VIRTUAL /* what's the use of this? */ ? ASMMultU64ByU32DivByU32(u64NowVirtSync, pVM->tm.s.cTSCTicksPerSecond, TMCLOCK_FREQ_VIRTUAL) : u64NowVirtSync; u64Now -= pVCpu->tm.s.offTSCRawSrc; *poffRealTSC = u64Now - ASMReadTSC(); cTicksToDeadline = tmCpuCalcTicksToDeadline(cNsToDeadline); } else { /* The source is the real TSC. */ if (pVM->tm.s.fTSCVirtualized) *poffRealTSC = pVCpu->tm.s.offTSCRawSrc; else *poffRealTSC = 0; cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); } } else { #ifdef VBOX_WITH_STATISTICS tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); #endif *pfOffsettedTsc = false; *poffRealTSC = 0; cTicksToDeadline = tmCpuCalcTicksToDeadline(TMVirtualSyncGetNsToDeadline(pVM)); } return cTicksToDeadline; }
/** * Callback wrapper for single-CPU timers. * * @param pvArg Opaque pointer to the timer. * * @remarks This will be executed in interrupt context but only at the specified * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the * cyclic subsystem here, neither should pfnTimer(). */ static void rtTimerSolSingleCallbackWrapper(void *pvArg) { PRTTIMER pTimer = (PRTTIMER)pvArg; AssertPtrReturnVoid(pTimer); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); Assert(!pTimer->fAllCpus); /* Make sure one-shots do not fire another time. */ Assert( !pTimer->fSuspended || pTimer->cNsInterval != 0); if (!pTimer->fSuspendedFromTimer) { /* Make sure we are firing on the right CPU. */ Assert( !pTimer->fSpecificCpu || pTimer->iCpu == RTMpCpuId()); /* For one-shot, we may allow the callback to restart them. */ if (pTimer->cNsInterval == 0) pTimer->fSuspendedFromTimer = true; /* * Perform the callout. */ pTimer->u.Single.pActiveThread = curthread; uint64_t u64Tick = ++pTimer->u.Single.u64Tick; pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); pTimer->u.Single.pActiveThread = NULL; if (RT_LIKELY(!pTimer->fSuspendedFromTimer)) { if ( !pTimer->fIntervalChanged || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE)) return; /* * The interval was changed, we need to set the expiration time * ourselves before returning. This comes at a slight cost, * which is why we don't do it all the time. */ if (pTimer->u.Single.nsNextTick) pTimer->u.Single.nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval); else pTimer->u.Single.nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval); cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Single.nsNextTick); return; } /* * The timer has been suspended, set expiration time to infinitiy. */ } if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE)) cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY); }
/** * Checks if AMD-V / VT-x can use an offsetted hardware TSC or not. * * @returns true/false accordingly. * @param pVCpu The VMCPU to operate on. * @param poffRealTSC The offset against the TSC of the current CPU. * Can be NULL. * @thread EMT. */ VMM_INT_DECL(bool) TMCpuTickCanUseRealTSC(PVMCPU pVCpu, uint64_t *poffRealTSC) { PVM pVM = pVCpu->CTX_SUFF(pVM); /* * We require: * 1. A fixed TSC, this is checked at init time. * 2. That the TSC is ticking (we shouldn't be here if it isn't) * 3. Either that we're using the real TSC as time source or * a) we don't have any lag to catch up, and * b) the virtual sync clock hasn't been halted by an expired timer, and * c) we're not using warp drive (accelerated virtual guest time). */ if ( pVM->tm.s.fMaybeUseOffsettedHostTSC && RT_LIKELY(pVCpu->tm.s.fTSCTicking) && ( pVM->tm.s.fTSCUseRealTSC || ( !pVM->tm.s.fVirtualSyncCatchUp && RT_LIKELY(pVM->tm.s.fVirtualSyncTicking) && !pVM->tm.s.fVirtualWarpDrive)) ) { if (!pVM->tm.s.fTSCUseRealTSC) { /* The source is the timer synchronous virtual clock. */ Assert(pVM->tm.s.fTSCVirtualized); if (poffRealTSC) { uint64_t u64Now = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) - pVCpu->tm.s.offTSCRawSrc; /** @todo When we start collecting statistics on how much time we spend executing * guest code before exiting, we should check this against the next virtual sync * timer timeout. If it's lower than the avg. length, we should trap rdtsc to increase * the chance that we'll get interrupted right after the timer expired. */ *poffRealTSC = u64Now - ASMReadTSC(); } } else if (poffRealTSC) { /* The source is the real TSC. */ if (pVM->tm.s.fTSCVirtualized) *poffRealTSC = pVCpu->tm.s.offTSCRawSrc; else *poffRealTSC = 0; } /** @todo count this? */ return true; } #ifdef VBOX_WITH_STATISTICS tmCpuTickRecordOffsettedTscRefusal(pVM, pVCpu); #endif return false; }
/** * Callback wrapper for Omni-CPU timers. * * @param pvArg Opaque pointer to the timer. * * @remarks This will be executed in interrupt context but only at the specified * level i.e. CY_LOCK_LEVEL in our case. We -CANNOT- call into the * cyclic subsystem here, neither should pfnTimer(). */ static void rtTimerSolOmniCallbackWrapper(void *pvArg) { PRTTIMER pTimer = (PRTTIMER)pvArg; AssertPtrReturnVoid(pTimer); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); Assert(pTimer->fAllCpus); if (!pTimer->fSuspendedFromTimer) { /* * Perform the callout. */ uint32_t const iCpu = CPU->cpu_id; pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = curthread; uint64_t u64Tick = ++pTimer->u.Omni.aPerCpu[iCpu].u64Tick; pTimer->pfnTimer(pTimer, pTimer->pvUser, u64Tick); pTimer->u.Omni.aPerCpu[iCpu].pActiveThread = NULL; if (RT_LIKELY(!pTimer->fSuspendedFromTimer)) { if ( !pTimer->fIntervalChanged || RT_UNLIKELY(pTimer->hCyclicId == CYCLIC_NONE)) return; /* * The interval was changed, we need to set the expiration time * ourselves before returning. This comes at a slight cost, * which is why we don't do it all the time. * * Note! The cyclic_reprogram call only affects the omni cyclic * component for this CPU. */ if (pTimer->u.Omni.aPerCpu[iCpu].nsNextTick) pTimer->u.Omni.aPerCpu[iCpu].nsNextTick += ASMAtomicUoReadU64(&pTimer->cNsInterval); else pTimer->u.Omni.aPerCpu[iCpu].nsNextTick = RTTimeSystemNanoTS() + ASMAtomicUoReadU64(&pTimer->cNsInterval); cyclic_reprogram(pTimer->hCyclicId, pTimer->u.Omni.aPerCpu[iCpu].nsNextTick); return; } /* * The timer has been suspended, set expiration time to infinitiy. */ } if (RT_LIKELY(pTimer->hCyclicId != CYCLIC_NONE)) cyclic_reprogram(pTimer->hCyclicId, CY_INFINITY); }
/** * Internal worker for getting the GIP CPU array index for the calling CPU. * * @returns Index into SUPGLOBALINFOPAGE::aCPUs or UINT16_MAX. * @param pGip The GIP. */ DECLINLINE(uint16_t) supGetGipCpuIndex(PSUPGLOBALINFOPAGE pGip) { uint16_t iGipCpu; #ifdef IN_RING3 if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS) { /* Storing the IDTR is normally very fast. */ uint16_t cbLim = ASMGetIdtrLimit(); uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8); iCpuSet &= RTCPUSET_MAX_CPUS - 1; iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; } else if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS) { /* RDTSCP gives us what need need and more. */ uint32_t iCpuSet; ASMReadTscWithAux(&iCpuSet); iCpuSet &= RTCPUSET_MAX_CPUS - 1; iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; } else { /* Get APIC ID via the slow CPUID instruction. */ uint8_t idApic = ASMGetApicId(); iGipCpu = pGip->aiCpuFromApicId[idApic]; } #elif defined(IN_RING0) /* Ring-0: Use use RTMpCpuId() (disables cli to avoid host OS assertions about unsafe CPU number usage). */ RTCCUINTREG uFlags = ASMIntDisableFlags(); int iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId()); if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))) iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; else iGipCpu = UINT16_MAX; ASMSetFlags(uFlags); # elif defined(IN_RC) /* Raw-mode context: We can get the host CPU set index via VMCPU. */ uint32_t iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet; if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))) iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; else iGipCpu = UINT16_MAX; #else # error "IN_RING3, IN_RC or IN_RING0 must be defined!" #endif return iGipCpu; }
RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem) { /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); #ifdef RTSEMEVENT_STRICT if (pThis->fEverHadSignallers) { int rc9 = RTLockValidatorRecSharedCheckSignaller(&pThis->Signallers, NIL_RTTHREAD); if (RT_FAILURE(rc9)) return rc9; } #endif ASMAtomicWriteU32(&pThis->fSignalled, 1); if (ASMAtomicReadS32(&pThis->cWaiters) < 1) return VINF_SUCCESS; /* somebody is waiting, try wake up one of them. */ long cWoken = sys_futex(&pThis->fSignalled, FUTEX_WAKE, 1, NULL, NULL, 0); if (RT_LIKELY(cWoken >= 0)) return VINF_SUCCESS; if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) return VERR_SEM_DESTROYED; return VERR_INVALID_PARAMETER; }
int suplibOsIOCtl(PSUPLIBDATA pThis, uintptr_t uFunction, void *pvReq, size_t cbReq) { AssertMsg(pThis->hDevice != (intptr_t)NIL_RTFILE, ("SUPLIB not initiated successfully!\n")); NOREF(cbReq); /* * Issue device iocontrol. */ if (RT_LIKELY(ioctl(pThis->hDevice, uFunction, pvReq) >= 0)) return VINF_SUCCESS; /* This is the reverse operation of the one found in SUPDrv-linux.c */ switch (errno) { case EACCES: return VERR_GENERAL_FAILURE; case EINVAL: return VERR_INVALID_PARAMETER; case EILSEQ: return VERR_INVALID_MAGIC; case ENXIO: return VERR_INVALID_HANDLE; case EFAULT: return VERR_INVALID_POINTER; case ENOLCK: return VERR_LOCK_FAILED; case EEXIST: return VERR_ALREADY_LOADED; case EPERM: return VERR_PERMISSION_DENIED; case ENOSYS: return VERR_VERSION_MISMATCH; case 1000: return VERR_IDT_FAILED; } return RTErrConvertFromErrno(errno); }
static int VBoxGuestSolarisPoll(dev_t Dev, short fEvents, int fAnyYet, short *pReqEvents, struct pollhead **ppPollHead) { LogFlow((DEVICE_NAME "::Poll: fEvents=%d fAnyYet=%d\n", fEvents, fAnyYet)); vboxguest_state_t *pState = ddi_get_soft_state(g_pVBoxGuestSolarisState, getminor(Dev)); if (RT_LIKELY(pState)) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pState->pSession; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) { *pReqEvents |= (POLLIN | POLLRDNORM); pSession->u32MousePosChangedSeq = u32CurSeq; } else { *pReqEvents = 0; if (!fAnyYet) *ppPollHead = &g_PollHead; } return 0; } else { Log((DEVICE_NAME "::Poll: no state data for %d\n", getminor(Dev))); return EINVAL; } }
/** * Allocates and acquires the lock for the stream. * * @returns IPRT status. * @param pStream The stream (valid). */ static int rtStrmAllocLock(PRTSTREAM pStream) { Assert(pStream->pCritSect == NULL); PRTCRITSECT pCritSect = (PRTCRITSECT)RTMemAlloc(sizeof(*pCritSect)); if (!pCritSect) return VERR_NO_MEMORY; /* The native stream lock are normally not recursive. */ int rc = RTCritSectInitEx(pCritSect, RTCRITSECT_FLAGS_NO_NESTING, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemSpinMutex"); if (RT_SUCCESS(rc)) { rc = RTCritSectEnter(pCritSect); if (RT_SUCCESS(rc)) { if (RT_LIKELY(ASMAtomicCmpXchgPtr(&pStream->pCritSect, pCritSect, NULL))) return VINF_SUCCESS; RTCritSectLeave(pCritSect); } RTCritSectDelete(pCritSect); } RTMemFree(pCritSect); /* Handle the lost race case... */ pCritSect = ASMAtomicReadPtrT(&pStream->pCritSect, PRTCRITSECT); if (pCritSect) return RTCritSectEnter(pCritSect); return rc; }
/** * A variant of Utf8Str::copyFromN that does not throw any exceptions but * returns E_OUTOFMEMORY instead. * * @param a_pcszSrc The source string. * @param a_offSrc Start offset to copy from. * @param a_cchSrc The source string. * @returns S_OK or E_OUTOFMEMORY. * * @remarks This calls cleanup() first, so the caller doesn't have to. (Saves * code space.) */ HRESULT Utf8Str::copyFromExNComRC(const char *a_pcszSrc, size_t a_offSrc, size_t a_cchSrc) { cleanup(); if (a_cchSrc) { m_psz = RTStrAlloc(a_cchSrc + 1); if (RT_LIKELY(m_psz)) { m_cch = a_cchSrc; m_cbAllocated = a_cchSrc + 1; memcpy(m_psz, a_pcszSrc + a_offSrc, a_cchSrc); m_psz[a_cchSrc] = '\0'; } else { m_cch = 0; m_cbAllocated = 0; return E_OUTOFMEMORY; } } else { m_cch = 0; m_cbAllocated = 0; m_psz = NULL; } return S_OK; }
RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax, uint32_t fFlags) { int rc = VINF_SUCCESS; PRTFILEAIOCTXINTERNAL pCtxInt; AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER); AssertReturn(!(fFlags & ~RTFILEAIOCTX_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER); pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOCTXINTERNAL)); if (RT_UNLIKELY(!pCtxInt)) return VERR_NO_MEMORY; /* Init the event handle. */ pCtxInt->iPort = port_create(); if (RT_LIKELY(pCtxInt->iPort > 0)) { pCtxInt->fFlags = fFlags; pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC; *phAioCtx = (RTFILEAIOCTX)pCtxInt; } else { RTMemFree(pCtxInt); rc = RTErrConvertFromErrno(errno); } return rc; }
/** * Sets an irq on the PIC and I/O APIC. * * @returns true if delivered, false if postponed. * @param pVM Pointer to the VM. * @param iIrq The irq. * @param iLevel The new level. * @param uTagSrc The IRQ tag and source. * * @remarks The caller holds the PDM lock. */ static bool pdmR0IsaSetIrq(PVM pVM, int iIrq, int iLevel, uint32_t uTagSrc) { if (RT_LIKELY( ( pVM->pdm.s.IoApic.pDevInsR0 || !pVM->pdm.s.IoApic.pDevInsR3) && ( pVM->pdm.s.Pic.pDevInsR0 || !pVM->pdm.s.Pic.pDevInsR3))) { if (pVM->pdm.s.Pic.pDevInsR0) pVM->pdm.s.Pic.pfnSetIrqR0(pVM->pdm.s.Pic.pDevInsR0, iIrq, iLevel, uTagSrc); if (pVM->pdm.s.IoApic.pDevInsR0) pVM->pdm.s.IoApic.pfnSetIrqR0(pVM->pdm.s.IoApic.pDevInsR0, iIrq, iLevel, uTagSrc); return true; } /* queue for ring-3 execution. */ PPDMDEVHLPTASK pTask = (PPDMDEVHLPTASK)PDMQueueAlloc(pVM->pdm.s.pDevHlpQueueR0); AssertReturn(pTask, false); pTask->enmOp = PDMDEVHLPTASKOP_ISA_SET_IRQ; pTask->pDevInsR3 = NIL_RTR3PTR; /* not required */ pTask->u.SetIRQ.iIrq = iIrq; pTask->u.SetIRQ.iLevel = iLevel; pTask->u.SetIRQ.uTagSrc = uTagSrc; PDMQueueInsertEx(pVM->pdm.s.pDevHlpQueueR0, &pTask->Core, 0); return false; }
RTR0DECL(int) RTR0MemKernelCopyTo(void *pvDst, void const *pvSrc, size_t cb) { int rc = kcopy(pvSrc, pvDst, cb); if (RT_LIKELY(rc == 0)) return VINF_SUCCESS; return VERR_ACCESS_DENIED; }
RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax) { int rc = VINF_SUCCESS; PRTFILEAIOCTXINTERNAL pCtxInt; AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER); pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOCTXINTERNAL)); if (RT_UNLIKELY(!pCtxInt)) return VERR_NO_MEMORY; /* Init the event handle. */ pCtxInt->iKQueue = kqueue(); if (RT_LIKELY(pCtxInt->iKQueue > 0)) { pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC; *phAioCtx = (RTFILEAIOCTX)pCtxInt; } else { RTMemFree(pCtxInt); rc = RTErrConvertFromErrno(errno); } return rc; }
DECLASM(int) VBoxDrvIOCtlFast(uint16_t sfn, uint8_t iFunction) { /* * Find the session. */ const RTPROCESS Process = RTProcSelf(); const unsigned iHash = SESSION_HASH(sfn); PSUPDRVSESSION pSession; RTSpinlockAcquire(g_Spinlock); pSession = g_apSessionHashTab[iHash]; if (pSession && pSession->Process != Process) { do pSession = pSession->pNextHash; while ( pSession && ( pSession->sfn != sfn || pSession->Process != Process)); if (RT_LIKELY(pSession)) supdrvSessionRetain(pSession); } RTSpinlockReleaseNoInts(g_Spinlock); if (RT_UNLIKELY(!pSession)) { OSDBGPRINT(("VBoxDrvIoctl: WHUT?!? pSession == NULL! This must be a mistake... pid=%d\n", (int)Process)); return VERR_INVALID_PARAMETER; } /* * Dispatch the fast IOCtl. */ supdrvIOCtlFast(iFunction, 0, &g_DevExt, pSession); supdrvSessionRelease(pSession); return 0; }
RTDECL(char *) RTStrToUpper(char *psz) { /* * Loop the code points in the string, converting them one by one. * * ASSUMES that the folded code points have an encoding that is equal or * shorter than the original (this is presently correct). */ const char *pszSrc = psz; char *pszDst = psz; RTUNICP uc; do { int rc = RTStrGetCpEx(&pszSrc, &uc); if (RT_SUCCESS(rc)) { RTUNICP uc2 = RTUniCpToUpper(uc); if (RT_LIKELY( uc2 == uc || RTUniCpCalcUtf8Len(uc2) == RTUniCpCalcUtf8Len(uc))) pszDst = RTStrPutCp(pszDst, uc2); else pszDst = RTStrPutCp(pszDst, uc); } else { /* bad encoding, just copy it quietly (uc == RTUNICP_INVALID (!= 0)). */ AssertRC(rc); *pszDst++ = pszSrc[-1]; } Assert((uintptr_t)pszDst <= (uintptr_t)pszSrc); } while (uc != 0); return psz; }
RTR0DECL(int) RTR0MemUserCopyFrom(void *pvDst, RTR3PTR R3PtrSrc, size_t cb) { int rc = copyin((const void *)R3PtrSrc, pvDst, cb); if (RT_LIKELY(rc == 0)) return VINF_SUCCESS; return VERR_ACCESS_DENIED; }
/** * Allocates one page. * * @param virtAddr The virtual address to which this page maybe mapped in * the future. * * @returns Pointer to the allocated page, NULL on failure. */ static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr) { u_offset_t offPage; seg_t KernelSeg; /* * 16777215 terabytes of total memory for all VMs or * restart 8000 1GB VMs 2147483 times until wraparound! */ mutex_enter(&g_OffsetMtx); AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR); g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE; offPage = g_offPage; mutex_exit(&g_OffsetMtx); KernelSeg.s_as = &kas; page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr); if (RT_LIKELY(pPage)) { /* * Lock this page into memory "long term" to prevent this page from being paged out * when we drop the page lock temporarily (during free). Downgrade to a shared lock * to prevent page relocation. */ page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */); page_io_unlock(pPage); page_downgrade(pPage); Assert(PAGE_LOCKED_SE(pPage, SE_SHARED)); } return pPage; }
/** * Outbound TTL/HOPL check. */ static int pxudp_ttl_expired(struct pbuf *p) { int ttl; if (ip_current_is_v6()) { ttl = IP6H_HOPLIM(ip6_current_header()); } else { ttl = IPH_TTL(ip_current_header()); } if (RT_UNLIKELY(ttl <= 1)) { int status = pbuf_header(p, ip_current_header_tot_len() + UDP_HLEN); if (RT_LIKELY(status == 0)) { if (ip_current_is_v6()) { icmp6_time_exceeded(p, ICMP6_TE_HL); } else { icmp_time_exceeded(p, ICMP_TE_TTL); } } pbuf_free(p); return 1; } return 0; }
/** * Read the current CPU timestamp counter. * * @returns Gets the CPU tsc. * @param pVCpu The VMCPU to operate on. */ DECLINLINE(uint64_t) tmCpuTickGetInternal(PVMCPU pVCpu, bool fCheckTimers) { uint64_t u64; if (RT_LIKELY(pVCpu->tm.s.fTSCTicking)) { PVM pVM = pVCpu->CTX_SUFF(pVM); if (pVM->tm.s.fTSCVirtualized) { if (pVM->tm.s.fTSCUseRealTSC) u64 = ASMReadTSC(); else u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); u64 -= pVCpu->tm.s.offTSCRawSrc; } else u64 = ASMReadTSC(); /* Never return a value lower than what the guest has already seen. */ if (u64 < pVCpu->tm.s.u64TSCLastSeen) { STAM_COUNTER_INC(&pVM->tm.s.StatTSCUnderflow); pVCpu->tm.s.u64TSCLastSeen += 64; /* @todo choose a good increment here */ u64 = pVCpu->tm.s.u64TSCLastSeen; } } else u64 = pVCpu->tm.s.u64TSC; return u64; }
/** * Receive thread loop. * * @returns VINF_SUCCESS * @param hThreadSelf Thread handle to this thread. * @param pvUser User argument. */ static DECLCALLBACK(int) drvTCPListenLoop(RTTHREAD hThreadSelf, void *pvUser) { RT_NOREF(hThreadSelf); PDRVTCP pThis = (PDRVTCP)pvUser; while (RT_LIKELY(!pThis->fShutdown)) { RTSOCKET hTcpSockNew = NIL_RTSOCKET; int rc = RTTcpServerListen2(pThis->hTcpServ, &hTcpSockNew); if (RT_SUCCESS(rc)) { if (pThis->hTcpSock != NIL_RTSOCKET) { LogRel(("DrvTCP%d: only single connection supported\n", pThis->pDrvIns->iInstance)); RTTcpServerDisconnectClient2(hTcpSockNew); } else { pThis->hTcpSock = hTcpSockNew; /* Inform the poller about the new socket. */ drvTcpPollerKick(pThis, DRVTCP_WAKEUP_REASON_NEW_CONNECTION); } } } return VINF_SUCCESS; }
RTR0DECL(int) RTR0MemUserCopyTo(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb) { int rc = copyout(pvSrc, (void *)R3PtrDst, cb); if (RT_LIKELY(rc == 0)) return VINF_SUCCESS; return VERR_ACCESS_DENIED; }
DECLINLINE(int) rtSemEventMultiPosixWait(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMEVENTMULTIINTERNAL *pThis = hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); uint32_t u32 = pThis->u32State; AssertReturn(u32 == EVENTMULTI_STATE_NOT_SIGNALED || u32 == EVENTMULTI_STATE_SIGNALED, VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); /* * Optimize the case where the event is signalled. */ if (ASMAtomicUoReadU32(&pThis->u32State) == EVENTMULTI_STATE_SIGNALED) { int rc = rtSemEventMultiPosixWaitPoll(pThis); if (RT_LIKELY(rc != VERR_TIMEOUT)) return rc; } /* * Indefinite or timed wait? */ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); return rtSemEventMultiPosixWaitTimed(pThis, fFlags, uTimeout, pSrcPos); }
RTDECL(void *) RTHeapOffsetAllocZ(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment) { PRTHEAPOFFSETINTERNAL pHeapInt = hHeap; PRTHEAPOFFSETBLOCK pBlock; /* * Validate and adjust the input. */ AssertPtrReturn(pHeapInt, NULL); if (cb < RTHEAPOFFSET_MIN_BLOCK) cb = RTHEAPOFFSET_MIN_BLOCK; else cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT); if (!cbAlignment) cbAlignment = RTHEAPOFFSET_ALIGNMENT; else { Assert(!(cbAlignment & (cbAlignment - 1))); Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment); if (cbAlignment < RTHEAPOFFSET_ALIGNMENT) cbAlignment = RTHEAPOFFSET_ALIGNMENT; } /* * Do the allocation. */ pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment); if (RT_LIKELY(pBlock)) { void *pv = pBlock + 1; memset(pv, 0, cb); return pv; } return NULL; }
static int vboxPciLinuxDevRegisterWithIommu(PVBOXRAWPCIINS pIns) { #ifdef VBOX_WITH_IOMMU int rc = VINF_SUCCESS; struct pci_dev *pPciDev = pIns->pPciDev; PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns); IPRT_LINUX_SAVE_EFL_AC(); if (RT_LIKELY(pData)) { if (RT_LIKELY(pData->pIommuDomain)) { /** @todo: KVM checks IOMMU_CAP_CACHE_COHERENCY and sets * flag IOMMU_CACHE later used when mapping physical * addresses, which could improve performance. */ int rcLnx = iommu_attach_device(pData->pIommuDomain, &pPciDev->dev); if (!rcLnx) { vbpci_printk(KERN_DEBUG, pPciDev, "attached to IOMMU\n"); pIns->fIommuUsed = true; rc = VINF_SUCCESS; } else { vbpci_printk(KERN_DEBUG, pPciDev, "failed to attach to IOMMU, error %d\n", rcLnx); rc = VERR_INTERNAL_ERROR; } } else { vbpci_printk(KERN_DEBUG, pIns->pPciDev, "cannot attach to IOMMU, no domain\n"); rc = VERR_NOT_FOUND; } } else { vbpci_printk(KERN_DEBUG, pPciDev, "cannot attach to IOMMU, no VM data\n"); rc = VERR_INVALID_PARAMETER; } IPRT_LINUX_RESTORE_EFL_AC(); return rc; #else return VERR_NOT_SUPPORTED; #endif }