/** @copydoc fuse_operations::write */ static int vboxfuseOp_write(const char *pszPath, const char *pbBuf, size_t cbBuf, off_t offFile, struct fuse_file_info *pInfo) { /* paranoia */ AssertReturn((int)cbBuf >= 0, -EINVAL); AssertReturn((unsigned)cbBuf == cbBuf, -EINVAL); AssertReturn(offFile >= 0, -EINVAL); AssertReturn((off_t)(offFile + cbBuf) >= offFile, -EINVAL); PVBOXFUSENODE pNode = (PVBOXFUSENODE)(uintptr_t)pInfo->fh; AssertPtr(pNode); switch (pNode->enmType) { case VBOXFUSETYPE_DIRECTORY: return -ENOTSUP; case VBOXFUSETYPE_FLAT_IMAGE: { PVBOXFUSEFLATIMAGE pFlatImage = (PVBOXFUSEFLATIMAGE)(uintptr_t)pInfo->fh; LogFlow(("vboxfuseOp_write: offFile=%#llx cbBuf=%#zx pszPath=\"%s\"\n", (uint64_t)offFile, cbBuf, pszPath)); vboxfuseNodeLock(&pFlatImage->Node); int rc; if ((off_t)(offFile + cbBuf) < offFile) rc = -EINVAL; else if (offFile >= pFlatImage->Node.cbPrimary) rc = 0; else if (!cbBuf) rc = 0; else { /* Adjust for EOF. */ if ((off_t)(offFile + cbBuf) >= pFlatImage->Node.cbPrimary) cbBuf = pFlatImage->Node.cbPrimary - offFile; /* * Aligned write? */ int rc2; if ( !(offFile & VBOXFUSE_MIN_SIZE_MASK_OFF) && !(cbBuf & VBOXFUSE_MIN_SIZE_MASK_OFF)) rc2 = VDWrite(pFlatImage->pDisk, offFile, pbBuf, cbBuf); else { /* * Unaligned write - lots of extra work. */ uint8_t abBlock[VBOXFUSE_MIN_SIZE]; if (((offFile + cbBuf) & VBOXFUSE_MIN_SIZE_MASK_BLK) == (offFile & VBOXFUSE_MIN_SIZE_MASK_BLK)) { /* a single partial block. */ rc2 = VDRead(pFlatImage->pDisk, offFile & VBOXFUSE_MIN_SIZE_MASK_BLK, abBlock, VBOXFUSE_MIN_SIZE); if (RT_SUCCESS(rc2)) { memcpy(&abBlock[offFile & VBOXFUSE_MIN_SIZE_MASK_OFF], pbBuf, cbBuf); /* Update the block */ rc2 = VDWrite(pFlatImage->pDisk, offFile & VBOXFUSE_MIN_SIZE_MASK_BLK, abBlock, VBOXFUSE_MIN_SIZE); } } else { /* read unaligned head. */ rc2 = VINF_SUCCESS; if (offFile & VBOXFUSE_MIN_SIZE_MASK_OFF) { rc2 = VDRead(pFlatImage->pDisk, offFile & VBOXFUSE_MIN_SIZE_MASK_BLK, abBlock, VBOXFUSE_MIN_SIZE); if (RT_SUCCESS(rc2)) { size_t cbCopy = VBOXFUSE_MIN_SIZE - (offFile & VBOXFUSE_MIN_SIZE_MASK_OFF); memcpy(&abBlock[offFile & VBOXFUSE_MIN_SIZE_MASK_OFF], pbBuf, cbCopy); pbBuf += cbCopy; offFile += cbCopy; cbBuf -= cbCopy; rc2 = VDWrite(pFlatImage->pDisk, offFile & VBOXFUSE_MIN_SIZE_MASK_BLK, abBlock, VBOXFUSE_MIN_SIZE); } } /* write the middle. */ Assert(!(offFile & VBOXFUSE_MIN_SIZE_MASK_OFF)); if (cbBuf >= VBOXFUSE_MIN_SIZE && RT_SUCCESS(rc2)) { size_t cbWrite = cbBuf & VBOXFUSE_MIN_SIZE_MASK_BLK; rc2 = VDWrite(pFlatImage->pDisk, offFile, pbBuf, cbWrite); if (RT_SUCCESS(rc2)) { pbBuf += cbWrite; offFile += cbWrite; cbBuf -= cbWrite; } } /* unaligned tail write. */ Assert(cbBuf < VBOXFUSE_MIN_SIZE); Assert(!(offFile & VBOXFUSE_MIN_SIZE_MASK_OFF)); if (cbBuf && RT_SUCCESS(rc2)) { rc2 = VDRead(pFlatImage->pDisk, offFile, abBlock, VBOXFUSE_MIN_SIZE); if (RT_SUCCESS(rc2)) { memcpy(&abBlock[0], pbBuf, cbBuf); rc2 = VDWrite(pFlatImage->pDisk, offFile, abBlock, VBOXFUSE_MIN_SIZE); } } } } /* convert the return code */ if (RT_SUCCESS(rc2)) rc = cbBuf; else rc = -RTErrConvertToErrno(rc2); } vboxfuseNodeUnlock(&pFlatImage->Node); return rc; } case VBOXFUSETYPE_CONTROL_PIPE: return -ENOTSUP; default: AssertMsgFailed(("%s\n", pszPath)); return -EDOOFUS; } }
/** * Convert from zlib to IPRT status codes. * * This will also set the fFatalError flag when appropriate. * * @returns IPRT status code. * @param pThis The gzip I/O stream instance data. * @param rc Zlib error code. */ static int rtZipGzipConvertErrFromZlib(PRTZIPGZIPSTREAM pThis, int rc) { switch (rc) { case Z_OK: return VINF_SUCCESS; case Z_BUF_ERROR: /* This isn't fatal. */ return VINF_SUCCESS; /** @todo The code in zip.cpp treats Z_BUF_ERROR as fatal... */ case Z_STREAM_ERROR: pThis->fFatalError = true; return VERR_ZIP_CORRUPTED; case Z_DATA_ERROR: pThis->fFatalError = true; return pThis->fDecompress ? VERR_ZIP_CORRUPTED : VERR_ZIP_ERROR; case Z_MEM_ERROR: pThis->fFatalError = true; return VERR_ZIP_NO_MEMORY; case Z_VERSION_ERROR: pThis->fFatalError = true; return VERR_ZIP_UNSUPPORTED_VERSION; case Z_ERRNO: /* We shouldn't see this status! */ default: AssertMsgFailed(("%d\n", rc)); if (rc >= 0) return VINF_SUCCESS; pThis->fFatalError = true; return VERR_ZIP_ERROR; } }
/* StorageSlot <= QString: */ template<> StorageSlot fromString<StorageSlot>(const QString &strStorageSlot) { QHash<int, QString> list; list[0] = QApplication::translate("VBoxGlobal", "IDE Primary Master", "StorageSlot"); list[1] = QApplication::translate("VBoxGlobal", "IDE Primary Slave", "StorageSlot"); list[2] = QApplication::translate("VBoxGlobal", "IDE Secondary Master", "StorageSlot"); list[3] = QApplication::translate("VBoxGlobal", "IDE Secondary Slave", "StorageSlot"); list[4] = QApplication::translate("VBoxGlobal", "SATA Port %1", "StorageSlot"); list[5] = QApplication::translate("VBoxGlobal", "SCSI Port %1", "StorageSlot"); list[6] = QApplication::translate("VBoxGlobal", "SAS Port %1", "StorageSlot"); list[7] = QApplication::translate("VBoxGlobal", "Floppy Device %1", "StorageSlot"); int index = -1; QRegExp regExp; for (int i = 0; i < list.size(); ++i) { regExp = QRegExp(i >= 0 && i <= 3 ? list[i] : list[i].arg("(\\d+)")); if (regExp.indexIn(strStorageSlot) != -1) { index = i; break; } } StorageSlot result; switch (index) { case 0: case 1: case 2: case 3: { KStorageBus bus = KStorageBus_IDE; int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(bus); int iMaxDevice = vboxGlobal().virtualBox().GetSystemProperties().GetMaxDevicesPerPortForStorageBus(bus); LONG iPort = index / iMaxPort; LONG iDevice = index % iMaxPort; if (iPort < 0 || iPort > iMaxPort) { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } if (iDevice < 0 || iDevice > iMaxDevice) { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } result.bus = bus; result.port = iPort; result.device = iDevice; break; } case 4: { KStorageBus bus = KStorageBus_SATA; int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(bus); LONG iPort = regExp.cap(1).toInt(); LONG iDevice = 0; if (iPort < 0 || iPort > iMaxPort) { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } result.bus = bus; result.port = iPort; result.device = iDevice; break; } case 5: { KStorageBus bus = KStorageBus_SCSI; int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(bus); LONG iPort = regExp.cap(1).toInt(); LONG iDevice = 0; if (iPort < 0 || iPort > iMaxPort) { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } result.bus = bus; result.port = iPort; result.device = iDevice; break; } case 6: { KStorageBus bus = KStorageBus_SAS; int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(bus); LONG iPort = regExp.cap(1).toInt(); LONG iDevice = 0; if (iPort < 0 || iPort > iMaxPort) { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } result.bus = bus; result.port = iPort; result.device = iDevice; break; } case 7: { KStorageBus bus = KStorageBus_Floppy; int iMaxDevice = vboxGlobal().virtualBox().GetSystemProperties().GetMaxDevicesPerPortForStorageBus(bus); LONG iPort = 0; LONG iDevice = regExp.cap(1).toInt(); if (iDevice < 0 || iDevice > iMaxDevice) { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } result.bus = bus; result.port = iPort; result.device = iDevice; break; } default: { AssertMsgFailed(("No storage slot for text='%s'", strStorageSlot.toAscii().constData())); break; } } return result; }
/* static */ QIcon UIIconPool::defaultIcon(UIDefaultIconType defaultIconType, const QWidget *pWidget /* = 0 */) { QIcon icon; QStyle *pStyle = pWidget ? pWidget->style() : QApplication::style(); switch (defaultIconType) { case UIDefaultIconType_MessageBoxInformation: { icon = pStyle->standardIcon(QStyle::SP_MessageBoxInformation, 0, pWidget); break; } case UIDefaultIconType_MessageBoxQuestion: { icon = pStyle->standardIcon(QStyle::SP_MessageBoxQuestion, 0, pWidget); break; } case UIDefaultIconType_MessageBoxWarning: { #ifdef Q_WS_MAC /* At least in Qt 4.3.4/4.4 RC1 SP_MessageBoxWarning is the application * icon. So change this to the critical icon. (Maybe this would be * fixed in a later Qt version) */ icon = pStyle->standardIcon(QStyle::SP_MessageBoxCritical, 0, pWidget); #else /* Q_WS_MAC */ icon = pStyle->standardIcon(QStyle::SP_MessageBoxWarning, 0, pWidget); #endif /* !Q_WS_MAC */ break; } case UIDefaultIconType_MessageBoxCritical: { icon = pStyle->standardIcon(QStyle::SP_MessageBoxCritical, 0, pWidget); break; } case UIDefaultIconType_DialogCancel: { icon = pStyle->standardIcon(QStyle::SP_DialogCancelButton, 0, pWidget); if (icon.isNull()) icon = iconSet(":/cancel_16px.png"); break; } case UIDefaultIconType_DialogHelp: { icon = pStyle->standardIcon(QStyle::SP_DialogHelpButton, 0, pWidget); if (icon.isNull()) icon = iconSet(":/help_16px.png"); break; } case UIDefaultIconType_ArrowBack: { icon = pStyle->standardIcon(QStyle::SP_ArrowBack, 0, pWidget); if (icon.isNull()) icon = iconSet(":/list_moveup_16px.png", ":/list_moveup_disabled_16px.png"); break; } case UIDefaultIconType_ArrowForward: { icon = pStyle->standardIcon(QStyle::SP_ArrowForward, 0, pWidget); if (icon.isNull()) icon = iconSet(":/list_movedown_16px.png", ":/list_movedown_disabled_16px.png"); break; } default: { AssertMsgFailed(("Unknown default icon type!")); break; } } return icon; }
/** * Maps a range of physical pages at a given virtual address * in the guest context. * * The GC virtual address range must be within an existing mapping. * * @returns VBox status code. * @param pVM The virtual machine. * @param GCPtr Where to map the page(s). Must be page aligned. * @param HCPhys Start of the range of physical pages. Must be page aligned. * @param cbPages Number of bytes to map. Must be page aligned. * @param fFlags Page flags (X86_PTE_*). */ VMMDECL(int) PGMMap(PVM pVM, RTGCUINTPTR GCPtr, RTHCPHYS HCPhys, uint32_t cbPages, unsigned fFlags) { AssertMsg(pVM->pgm.s.offVM, ("Bad init order\n")); /* * Validate input. */ AssertMsg(RT_ALIGN_T(GCPtr, PAGE_SIZE, RTGCUINTPTR) == GCPtr, ("Invalid alignment GCPtr=%#x\n", GCPtr)); AssertMsg(cbPages > 0 && RT_ALIGN_32(cbPages, PAGE_SIZE) == cbPages, ("Invalid cbPages=%#x\n", cbPages)); AssertMsg(!(fFlags & X86_PDE_PG_MASK), ("Invalid flags %#x\n", fFlags)); /* hypervisor defaults */ if (!fFlags) fFlags = X86_PTE_P | X86_PTE_A | X86_PTE_D; /* * Find the mapping. */ PPGMMAPPING pCur = pVM->pgm.s.CTX_SUFF(pMappings); while (pCur) { if (GCPtr - pCur->GCPtr < pCur->cb) { if (GCPtr + cbPages - 1 > pCur->GCPtrLast) { AssertMsgFailed(("Invalid range!!\n")); return VERR_INVALID_PARAMETER; } /* * Setup PTE. */ X86PTEPAE Pte; Pte.u = fFlags | (HCPhys & X86_PTE_PAE_PG_MASK); /* * Update the page tables. */ for (;;) { RTGCUINTPTR off = GCPtr - pCur->GCPtr; const unsigned iPT = off >> X86_PD_SHIFT; const unsigned iPageNo = (off >> PAGE_SHIFT) & X86_PT_MASK; /* 32-bit */ pCur->aPTs[iPT].CTX_SUFF(pPT)->a[iPageNo].u = (uint32_t)Pte.u; /* ASSUMES HCPhys < 4GB and/or that we're never gonna do 32-bit on a PAE host! */ /* pae */ PGMSHWPTEPAE_SET(pCur->aPTs[iPT].CTX_SUFF(paPaePTs)[iPageNo / 512].a[iPageNo % 512], Pte.u); /* next */ cbPages -= PAGE_SIZE; if (!cbPages) break; GCPtr += PAGE_SIZE; Pte.u += PAGE_SIZE; } return VINF_SUCCESS; } /* next */ pCur = pCur->CTX_SUFF(pNext); } AssertMsgFailed(("GCPtr=%#x was not found in any mapping ranges!\n", GCPtr)); return VERR_INVALID_PARAMETER; }
/** * Reap URBs in-flight on a device. * * @returns Pointer to a completed URB. * @returns NULL if no URB was completed. * @param pProxyDev The device. * @param cMillies Number of milliseconds to wait. Use 0 to not * wait at all. */ static DECLCALLBACK(PVUSBURB) usbProxyWinUrbReap(PUSBPROXYDEV pProxyDev, RTMSINTERVAL cMillies) { PPRIV_USBW32 pPriv = USBPROXYDEV_2_DATA(pProxyDev, PPRIV_USBW32); AssertReturn(pPriv, NULL); /* * There are some unnecessary calls, just return immediately or * WaitForMultipleObjects will fail. */ if ( pPriv->cQueuedUrbs <= 0 && pPriv->cPendingUrbs == 0) { if ( cMillies != 0 && pPriv->cPendingUrbs == 0) { /* Wait for the wakeup call. */ DWORD cMilliesWait = cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies; DWORD rc = WaitForMultipleObjects(1, &pPriv->hEventWakeup, FALSE, cMilliesWait); } return NULL; } again: /* Check for pending URBs. */ if (pPriv->cPendingUrbs) { RTCritSectEnter(&pPriv->CritSect); /* Ensure we've got sufficient space in the arrays. */ if (pPriv->cQueuedUrbs + pPriv->cPendingUrbs + 1 > pPriv->cAllocatedUrbs) { unsigned cNewMax = pPriv->cAllocatedUrbs + pPriv->cPendingUrbs + 1; void *pv = RTMemRealloc(pPriv->paHandles, sizeof(pPriv->paHandles[0]) * (cNewMax + 1)); /* One extra for the wakeup event. */ if (!pv) { AssertMsgFailed(("RTMemRealloc failed for paHandles[%d]", cNewMax)); //break; } pPriv->paHandles = (PHANDLE)pv; pv = RTMemRealloc(pPriv->paQueuedUrbs, sizeof(pPriv->paQueuedUrbs[0]) * cNewMax); if (!pv) { AssertMsgFailed(("RTMemRealloc failed for paQueuedUrbs[%d]", cNewMax)); //break; } pPriv->paQueuedUrbs = (PQUEUED_URB *)pv; pPriv->cAllocatedUrbs = cNewMax; } /* Copy the pending URBs over. */ for (unsigned i = 0; i < pPriv->cPendingUrbs; i++) { pPriv->paHandles[pPriv->cQueuedUrbs + i] = pPriv->aPendingUrbs[i]->overlapped.hEvent; pPriv->paQueuedUrbs[pPriv->cQueuedUrbs + i] = pPriv->aPendingUrbs[i]; } pPriv->cQueuedUrbs += pPriv->cPendingUrbs; pPriv->cPendingUrbs = 0; pPriv->paHandles[pPriv->cQueuedUrbs] = pPriv->hEventWakeup; pPriv->paHandles[pPriv->cQueuedUrbs + 1] = INVALID_HANDLE_VALUE; RTCritSectLeave(&pPriv->CritSect); } /* * Wait/poll. * * ASSUMPTIONS: * 1. The usbProxyWinUrbReap can not be run concurrently with each other * so racing the cQueuedUrbs access/modification can not occur. * 2. The usbProxyWinUrbReap can not be run concurrently with * usbProxyWinUrbQueue so they can not race the pPriv->paHandles * access/realloc. */ unsigned cQueuedUrbs = ASMAtomicReadU32((volatile uint32_t *)&pPriv->cQueuedUrbs); DWORD cMilliesWait = cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies; PVUSBURB pUrb = NULL; DWORD rc = WaitForMultipleObjects(cQueuedUrbs + 1, pPriv->paHandles, FALSE, cMilliesWait); /* If the wakeup event fired return immediately. */ if (rc == WAIT_OBJECT_0 + cQueuedUrbs) { if (pPriv->cPendingUrbs) goto again; return NULL; } if (rc >= WAIT_OBJECT_0 && rc < WAIT_OBJECT_0 + cQueuedUrbs) { RTCritSectEnter(&pPriv->CritSect); unsigned iUrb = rc - WAIT_OBJECT_0; PQUEUED_URB pQUrbWin = pPriv->paQueuedUrbs[iUrb]; pUrb = pQUrbWin->urb; /* * Remove it from the arrays. */ cQueuedUrbs = --pPriv->cQueuedUrbs; if (cQueuedUrbs != iUrb) { /* Move the array forward */ for (unsigned i=iUrb;i<cQueuedUrbs;i++) { pPriv->paHandles[i] = pPriv->paHandles[i+1]; pPriv->paQueuedUrbs[i] = pPriv->paQueuedUrbs[i+1]; } } pPriv->paHandles[cQueuedUrbs] = pPriv->hEventWakeup; pPriv->paHandles[cQueuedUrbs + 1] = INVALID_HANDLE_VALUE; pPriv->paQueuedUrbs[cQueuedUrbs] = NULL; RTCritSectLeave(&pPriv->CritSect); Assert(cQueuedUrbs == pPriv->cQueuedUrbs); /* * Update the urb. */ pUrb->enmStatus = usbProxyWinStatusToVUsbStatus(pQUrbWin->urbwin.error); pUrb->cbData = (uint32_t)pQUrbWin->urbwin.len; if (pUrb->enmType == VUSBXFERTYPE_ISOC) { for (unsigned i = 0; i < pUrb->cIsocPkts; ++i) { /* NB: Windows won't change the packet offsets, but the packets may * be only partially filled or completely empty. */ pUrb->aIsocPkts[i].enmStatus = usbProxyWinStatusToVUsbStatus(pQUrbWin->urbwin.aIsoPkts[i].stat); pUrb->aIsocPkts[i].cb = pQUrbWin->urbwin.aIsoPkts[i].cb; } } Log(("usbproxy: pUrb=%p (#%d) ep=%d cbData=%d status=%d cIsocPkts=%d ready\n", pUrb, rc - WAIT_OBJECT_0, pQUrbWin->urb->EndPt, pQUrbWin->urb->cbData, pUrb->enmStatus, pUrb->cIsocPkts)); /* free the urb queuing structure */ if (pQUrbWin->overlapped.hEvent != INVALID_HANDLE_VALUE) { CloseHandle(pQUrbWin->overlapped.hEvent); pQUrbWin->overlapped.hEvent = INVALID_HANDLE_VALUE; } RTMemFree(pQUrbWin); } else if ( rc == WAIT_FAILED || (rc >= WAIT_ABANDONED_0 && rc < WAIT_ABANDONED_0 + cQueuedUrbs)) AssertMsgFailed(("USB: WaitForMultipleObjects %d objects failed with rc=%d and last error %d\n", cQueuedUrbs, rc, GetLastError())); return pUrb; }
/** * Internal worker for the sleep scenario. * * Called owning the spinlock, returns without it. * * @returns IPRT status code. * @param pThis The mutex instance. * @param cMillies The timeout. * @param fInterruptible Whether it's interruptible * (RTSemMutexRequestNoResume) or not * (RTSemMutexRequest). * @param hNativeSelf The thread handle of the caller. */ static int rtR0SemMutexDarwinRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible, RTNATIVETHREAD hNativeSelf) { /* * Grab a reference and indicate that we're waiting. */ pThis->cWaiters++; ASMAtomicIncU32(&pThis->cRefs); /* * Go to sleep, use the address of the mutex instance as sleep/blocking/event id. */ wait_result_t rcWait; if (cMillies == RT_INDEFINITE_WAIT) rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible); else { uint64_t u64AbsTime; nanoseconds_to_absolutetime(cMillies * UINT64_C(1000000), &u64AbsTime); u64AbsTime += mach_absolute_time(); rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible, u64AbsTime); } /* * Translate the rc. */ int rc; switch (rcWait) { case THREAD_AWAKENED: if (RT_LIKELY(pThis->u32Magic == RTSEMMUTEX_MAGIC)) { if (RT_LIKELY( pThis->cRecursions == 0 && pThis->hNativeOwner == NIL_RTNATIVETHREAD)) { pThis->cRecursions = 1; pThis->hNativeOwner = hNativeSelf; rc = VINF_SUCCESS; } else { Assert(pThis->cRecursions == 0); Assert(pThis->hNativeOwner == NIL_RTNATIVETHREAD); rc = VERR_INTERNAL_ERROR_3; } } else rc = VERR_SEM_DESTROYED; break; case THREAD_TIMED_OUT: Assert(cMillies != RT_INDEFINITE_WAIT); rc = VERR_TIMEOUT; break; case THREAD_INTERRUPTED: Assert(fInterruptible); rc = VERR_INTERRUPTED; break; case THREAD_RESTART: Assert(pThis->u32Magic == ~RTSEMMUTEX_MAGIC); rc = VERR_SEM_DESTROYED; break; default: AssertMsgFailed(("rcWait=%d\n", rcWait)); rc = VERR_GENERAL_FAILURE; break; } /* * Dereference it and quit the lock. */ Assert(pThis->cWaiters > 0); pThis->cWaiters--; Assert(pThis->cRefs > 0); if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0)) rtSemMutexDarwinFree(pThis); else lck_spin_unlock(pThis->pSpinlock); return rc; }
RTR3DECL(char *) RTUriFileNPath(const char *pszUri, uint32_t uFormat, size_t cchMax) { AssertPtrReturn(pszUri, NULL); size_t iPos1; size_t cbLen = RT_MIN(strlen(pszUri), cchMax); /* Find the end of the scheme. */ if (!rtUriFindSchemeEnd(pszUri, 0, cbLen, &iPos1)) return NULL; /* no URI */ else ++iPos1; /* Skip ':' */ /* Check that this is a file Uri */ if (RTStrNICmp(pszUri, "file:", iPos1) != 0) return NULL; size_t iPos2; size_t iPos3 = iPos1; /* Skip if no authority is found */ /* Find the start of the authority. */ if (rtUriCheckAuthorityStart(pszUri, iPos1, cbLen - iPos1, &iPos2)) { /* Find the end of the authority. If not found, then there is no path * component, cause the authority is the rest of the string. */ if (!rtUriFindAuthorityEnd(pszUri, iPos2, cbLen - iPos2, &iPos3)) return NULL; /* no path! */ } size_t iPos4; /* Find the start of the path */ if (rtUriCheckPathStart(pszUri, iPos3, cbLen - iPos3, &iPos4)) { uint32_t uFIntern = uFormat; /* Auto is based on the current OS. */ if (uFormat == URI_FILE_FORMAT_AUTO) #ifdef RT_OS_WINDOWS uFIntern = URI_FILE_FORMAT_WIN; #else /* RT_OS_WINDOWS */ uFIntern = URI_FILE_FORMAT_UNIX; #endif /* !RT_OS_WINDOWS */ if ( uFIntern != URI_FILE_FORMAT_UNIX && pszUri[iPos4] == '/') ++iPos4; /* Search for the end of the scheme. */ size_t iPos5 = cbLen; rtUriFindPathEnd(pszUri, iPos4, cbLen - iPos4, &iPos5); if (iPos5 > iPos4) /* Length check */ { char *pszPath = rtUriPercentDecodeN(&pszUri[iPos4], iPos5 - iPos4); if (uFIntern == URI_FILE_FORMAT_UNIX) return RTPathChangeToUnixSlashes(pszPath, true); else if (uFIntern == URI_FILE_FORMAT_WIN) return RTPathChangeToDosSlashes(pszPath, true); else { RTStrFree(pszPath); AssertMsgFailed(("Unknown uri file format %u", uFIntern)); return NULL; } } } return NULL; }
/** * Process all commands currently in the buffer. * * @returns VBox status code. Any error indicates the termination of the console session. * @param pDbgc Debugger console instance data. * @param fNoExecute Indicates that no commands should actually be executed. */ static int dbgcProcessCommands(PDBGC pDbgc, bool fNoExecute) { /** @todo Replace this with a sh/ksh/csh/rexx like toplevel language that * allows doing function, loops, if, cases, and such. */ int rc = VINF_SUCCESS; while (pDbgc->cInputLines) { /* * Empty the log buffer if we're hooking the log. */ if (pDbgc->fLog) { rc = dbgcProcessLog(pDbgc); if (RT_FAILURE(rc)) break; } if (pDbgc->iRead == pDbgc->iWrite) { AssertMsgFailed(("The input buffer is empty while cInputLines=%d!\n", pDbgc->cInputLines)); pDbgc->cInputLines = 0; return 0; } /* * Copy the command to the parse buffer. */ char ch; char *psz = &pDbgc->achInput[pDbgc->iRead]; char *pszTrg = &pDbgc->achScratch[0]; while ((*pszTrg = ch = *psz++) != ';' && ch != '\n' ) { if (psz == &pDbgc->achInput[sizeof(pDbgc->achInput)]) psz = &pDbgc->achInput[0]; if (psz == &pDbgc->achInput[pDbgc->iWrite]) { AssertMsgFailed(("The buffer contains no commands while cInputLines=%d!\n", pDbgc->cInputLines)); pDbgc->cInputLines = 0; return 0; } pszTrg++; } *pszTrg = '\0'; /* * Advance the buffer. */ pDbgc->iRead = psz - &pDbgc->achInput[0]; if (ch == '\n') pDbgc->cInputLines--; /* * Parse and execute this command. */ pDbgc->pszScratch = pszTrg + 1; pDbgc->iArg = 0; rc = dbgcEvalCommand(pDbgc, &pDbgc->achScratch[0], pszTrg - &pDbgc->achScratch[0] - 1, fNoExecute); if ( rc == VERR_DBGC_QUIT || rc == VWRN_DBGC_CMD_PENDING) break; rc = VINF_SUCCESS; /* ignore other statuses */ } return rc; }
/** * Resolves a symbol (or tries to do so at least). * * @returns 0 on success. * @returns VBox status on failure. * @param pDbgc The debug console instance. * @param pszSymbol The symbol name. * @param enmType The result type. Specifying DBGCVAR_TYPE_GC_FAR may * cause failure, avoid it. * @param pResult Where to store the result. */ int dbgcSymbolGet(PDBGC pDbgc, const char *pszSymbol, DBGCVARTYPE enmType, PDBGCVAR pResult) { int rc; /* * Builtin? */ PCDBGCSYM pSymDesc = dbgcLookupRegisterSymbol(pDbgc, pszSymbol); if (pSymDesc) { if (!pSymDesc->pfnGet) return VERR_DBGC_PARSE_WRITEONLY_SYMBOL; return pSymDesc->pfnGet(pSymDesc, &pDbgc->CmdHlp, enmType, pResult); } /* * A typical register? (Guest only) */ static const char s_szSixLetterRegisters[] = "rflags;eflags;" ; static const char s_szThreeLetterRegisters[] = "eax;rax;" "r10;" "r8d;r8w;r8b;" "cr0;" "dr0;" "ebx;rbx;" "r11;" "r9d;r9w;r8b;" "dr1;" "ecx;rcx;" "r12;" "cr2;" "dr2;" "edx;rdx;" "r13;" "cr3;" "dr3;" "edi;rdi;dil;" "r14;" "cr4;" "dr4;" "esi;rsi;sil;" "r15;" "cr8;" "ebp;rbp;" "esp;rsp;" "dr6;" "rip;eip;" "dr7;" "efl;" ; static const char s_szTwoLetterRegisters[] = "ax;al;ah;" "r8;" "bx;bl;bh;" "r9;" "cx;cl;ch;" "cs;" "dx;dl;dh;" "ds;" "di;" "es;" "si;" "fs;" "bp;" "gs;" "sp;" "ss;" "ip;" ; size_t const cchSymbol = strlen(pszSymbol); if ( (cchSymbol == 2 && strstr(s_szTwoLetterRegisters, pszSymbol)) || (cchSymbol == 3 && strstr(s_szThreeLetterRegisters, pszSymbol)) || (cchSymbol == 6 && strstr(s_szSixLetterRegisters, pszSymbol))) { if (!strchr(pszSymbol, ';')) { DBGCVAR Var; DBGCVAR_INIT_SYMBOL(&Var, pszSymbol); rc = dbgcOpRegister(pDbgc, &Var, DBGCVAR_CAT_ANY, pResult); if (RT_SUCCESS(rc)) return DBGCCmdHlpConvert(&pDbgc->CmdHlp, pResult, enmType, false /*fConvSyms*/, pResult); } } /* * Ask PDM. */ /** @todo resolve symbols using PDM. */ /* * Ask the debug info manager. */ RTDBGSYMBOL Symbol; rc = DBGFR3AsSymbolByName(pDbgc->pVM, pDbgc->hDbgAs, pszSymbol, &Symbol, NULL); if (RT_SUCCESS(rc)) { /* * Default return is a flat gc address. */ DBGCVAR_INIT_GC_FLAT(pResult, Symbol.Value); if (Symbol.cb) DBGCVAR_SET_RANGE(pResult, DBGCVAR_RANGE_BYTES, Symbol.cb); switch (enmType) { /* nothing to do. */ case DBGCVAR_TYPE_GC_FLAT: case DBGCVAR_TYPE_ANY: return VINF_SUCCESS; /* impossible at the moment. */ case DBGCVAR_TYPE_GC_FAR: return VERR_DBGC_PARSE_CONVERSION_FAILED; /* simply make it numeric. */ case DBGCVAR_TYPE_NUMBER: pResult->enmType = DBGCVAR_TYPE_NUMBER; pResult->u.u64Number = Symbol.Value; return VINF_SUCCESS; /* cast it. */ case DBGCVAR_TYPE_GC_PHYS: case DBGCVAR_TYPE_HC_FLAT: case DBGCVAR_TYPE_HC_PHYS: return DBGCCmdHlpConvert(&pDbgc->CmdHlp, pResult, enmType, false /*fConvSyms*/, pResult); default: AssertMsgFailed(("Internal error enmType=%d\n", enmType)); return VERR_INVALID_PARAMETER; } } return VERR_DBGC_PARSE_NOT_IMPLEMENTED; }
RTR3DECL(int) RTPathQueryInfoEx(const char *pszPath, PRTFSOBJINFO pObjInfo, RTFSOBJATTRADD enmAdditionalAttribs, uint32_t fFlags) { /* * Validate input. */ AssertPtrReturn(pszPath, VERR_INVALID_POINTER); AssertReturn(*pszPath, VERR_INVALID_PARAMETER); AssertPtrReturn(pObjInfo, VERR_INVALID_POINTER); AssertMsgReturn( enmAdditionalAttribs >= RTFSOBJATTRADD_NOTHING && enmAdditionalAttribs <= RTFSOBJATTRADD_LAST, ("Invalid enmAdditionalAttribs=%p\n", enmAdditionalAttribs), VERR_INVALID_PARAMETER); AssertMsgReturn(RTPATH_F_IS_VALID(fFlags, 0), ("%#x\n", fFlags), VERR_INVALID_PARAMETER); /* * Convert the filename. */ char const *pszNativePath; int rc = rtPathToNative(&pszNativePath, pszPath, NULL); if (RT_SUCCESS(rc)) { struct stat Stat; if (fFlags & RTPATH_F_FOLLOW_LINK) rc = stat(pszNativePath, &Stat); else rc = lstat(pszNativePath, &Stat); /** @todo how doesn't have lstat again? */ if (!rc) { rtFsConvertStatToObjInfo(pObjInfo, &Stat, pszPath, 0); switch (enmAdditionalAttribs) { case RTFSOBJATTRADD_NOTHING: case RTFSOBJATTRADD_UNIX: Assert(pObjInfo->Attr.enmAdditional == RTFSOBJATTRADD_UNIX); break; case RTFSOBJATTRADD_UNIX_OWNER: rtFsObjInfoAttrSetUnixOwner(pObjInfo, Stat.st_uid); break; case RTFSOBJATTRADD_UNIX_GROUP: rtFsObjInfoAttrSetUnixGroup(pObjInfo, Stat.st_gid); break; case RTFSOBJATTRADD_EASIZE: /** @todo Use SGI extended attribute interface to query EA info. */ pObjInfo->Attr.enmAdditional = RTFSOBJATTRADD_EASIZE; pObjInfo->Attr.u.EASize.cb = 0; break; default: AssertMsgFailed(("Impossible!\n")); return VERR_INTERNAL_ERROR; } } else rc = RTErrConvertFromErrno(errno); rtPathFreeNative(pszNativePath, pszPath); } LogFlow(("RTPathQueryInfoEx(%p:{%s}, pObjInfo=%p, %d): returns %Rrc\n", pszPath, pszPath, pObjInfo, enmAdditionalAttribs, rc)); return rc; }
QString UINativeHotKey::toString(int iKeyCode) { QString strKeyName; #ifdef Q_WS_WIN /* MapVirtualKey doesn't distinguish between right and left vkeys, * even under XP, despite that it stated in MSDN. Do it by hands. * Besides that it can't recognize such virtual keys as * VK_DIVIDE & VK_PAUSE, this is also known bug. */ int iScan; switch (iKeyCode) { /* Processing special keys... */ case VK_PAUSE: iScan = 0x45 << 16; break; case VK_RSHIFT: iScan = 0x36 << 16; break; case VK_RCONTROL: iScan = (0x1D << 16) | (1 << 24); break; case VK_RMENU: iScan = (0x38 << 16) | (1 << 24); break; /* Processing extended keys... */ case VK_APPS: case VK_LWIN: case VK_RWIN: case VK_NUMLOCK: iScan = (::MapVirtualKey(iKeyCode, 0) | 256) << 16; break; default: iScan = ::MapVirtualKey(iKeyCode, 0) << 16; } TCHAR *pKeyName = new TCHAR[256]; if (::GetKeyNameText(iScan, pKeyName, 256)) { strKeyName = QString::fromUtf16(pKeyName); } else { AssertMsgFailed(("That key have no name!\n")); strKeyName = UIHostComboEditor::tr("<key_%1>").arg(iKeyCode); } delete[] pKeyName; #endif /* Q_WS_WIN */ #ifdef Q_WS_X11 if (char *pNativeKeyName = ::XKeysymToString((KeySym)iKeyCode)) { strKeyName = m_keyNames[pNativeKeyName].isEmpty() ? QString(pNativeKeyName) : m_keyNames[pNativeKeyName]; } else { AssertMsgFailed(("That key have no name!\n")); strKeyName = UIHostComboEditor::tr("<key_%1>").arg(iKeyCode); } #endif /* Q_WS_X11 */ #ifdef Q_WS_MAC UInt32 modMask = DarwinKeyCodeToDarwinModifierMask(iKeyCode); switch (modMask) { case shiftKey: case optionKey: case controlKey: case cmdKey: strKeyName = UIHostComboEditor::tr("Left "); break; case rightShiftKey: case rightOptionKey: case rightControlKey: case kEventKeyModifierRightCmdKeyMask: strKeyName = UIHostComboEditor::tr("Right "); break; default: AssertMsgFailedReturn(("modMask=%#x\n", modMask), QString()); } switch (modMask) { case shiftKey: case rightShiftKey: strKeyName += QChar(kShiftUnicode); break; case optionKey: case rightOptionKey: strKeyName += QChar(kOptionUnicode); break; case controlKey: case rightControlKey: strKeyName += QChar(kControlUnicode); break; case cmdKey: case kEventKeyModifierRightCmdKeyMask: strKeyName += QChar(kCommandUnicode); break; } #endif /* Q_WS_MAC */ return strKeyName; }
/** * The slow case for SUPReadTsc where we need to apply deltas. * * Must only be called when deltas are applicable, so please do not call it * directly. * * @returns TSC with delta applied. * @param pGip Pointer to the GIP. * * @remarks May be called with interrupts disabled in ring-0! This is why the * ring-0 code doesn't attempt to figure the delta. * * @internal */ SUPDECL(uint64_t) SUPReadTscWithDelta(PSUPGLOBALINFOPAGE pGip) { uint64_t uTsc; uint16_t iGipCpu; AssertCompile(RT_IS_POWER_OF_TWO(RTCPUSET_MAX_CPUS)); AssertCompile(RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx) >= RTCPUSET_MAX_CPUS); Assert(pGip->enmUseTscDelta > SUPGIPUSETSCDELTA_PRACTICALLY_ZERO); /* * Read the TSC and get the corresponding aCPUs index. */ #ifdef IN_RING3 if (pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS) { /* RDTSCP gives us all we need, no loops/cli. */ uint32_t iCpuSet; uTsc = ASMReadTscWithAux(&iCpuSet); iCpuSet &= RTCPUSET_MAX_CPUS - 1; iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; } else if (pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS) { /* Storing the IDTR is normally very quick, but we need to loop. */ uint32_t cTries = 0; for (;;) { uint16_t cbLim = ASMGetIdtrLimit(); uTsc = ASMReadTSC(); if (RT_LIKELY(ASMGetIdtrLimit() == cbLim)) { uint16_t iCpuSet = cbLim - 256 * (ARCH_BITS == 64 ? 16 : 8); iCpuSet &= RTCPUSET_MAX_CPUS - 1; iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; break; } if (cTries >= 16) { iGipCpu = UINT16_MAX; break; } cTries++; } } else { /* Get APIC ID via the slow CPUID instruction, requires looping. */ uint32_t cTries = 0; for (;;) { uint8_t idApic = ASMGetApicId(); uTsc = ASMReadTSC(); if (RT_LIKELY(ASMGetApicId() == idApic)) { iGipCpu = pGip->aiCpuFromApicId[idApic]; break; } if (cTries >= 16) { iGipCpu = UINT16_MAX; break; } cTries++; } } #elif defined(IN_RING0) /* Ring-0: Use use RTMpCpuId(), no loops. */ RTCCUINTREG uFlags = ASMIntDisableFlags(); int iCpuSet = RTMpCpuIdToSetIndex(RTMpCpuId()); if (RT_LIKELY((unsigned)iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))) iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; else iGipCpu = UINT16_MAX; uTsc = ASMReadTSC(); ASMSetFlags(uFlags); # elif defined(IN_RC) /* Raw-mode context: We can get the host CPU set index via VMCPU, no loops. */ RTCCUINTREG uFlags = ASMIntDisableFlags(); /* Are already disable, but play safe. */ uint32_t iCpuSet = VMMGetCpu(&g_VM)->iHostCpuSet; if (RT_LIKELY(iCpuSet < RT_ELEMENTS(pGip->aiCpuFromCpuSetIdx))) iGipCpu = pGip->aiCpuFromCpuSetIdx[iCpuSet]; else iGipCpu = UINT16_MAX; uTsc = ASMReadTSC(); ASMSetFlags(uFlags); #else # error "IN_RING3, IN_RC or IN_RING0 must be defined!" #endif /* * If the delta is valid, apply it. */ if (RT_LIKELY(iGipCpu < pGip->cCpus)) { int64_t iTscDelta = pGip->aCPUs[iGipCpu].i64TSCDelta; if (RT_LIKELY(iTscDelta != INT64_MAX)) return uTsc - iTscDelta; # ifdef IN_RING3 /* * The delta needs calculating, call supdrv to get the TSC. */ int rc = SUPR3ReadTsc(&uTsc, NULL); if (RT_SUCCESS(rc)) return uTsc; AssertMsgFailed(("SUPR3ReadTsc -> %Rrc\n", rc)); uTsc = ASMReadTSC(); # endif /* IN_RING3 */ } /* * This shouldn't happen, especially not in ring-3 and raw-mode context. * But if it does, return something that's half useful. */ AssertMsgFailed(("iGipCpu=%d (%#x) cCpus=%d fGetGipCpu=%#x\n", iGipCpu, iGipCpu, pGip->cCpus, pGip->fGetGipCpu)); return uTsc; }
RTDECL(int) RTErrConvertFromDarwin(int iNativeCode) { /* * 'optimized' success case. */ if (iNativeCode == KERN_SUCCESS) return VINF_SUCCESS; switch (iNativeCode) { /* * Mach. */ case KERN_INVALID_ADDRESS: return VERR_INVALID_POINTER; //case KERN_PROTECTION_FAILURE: //case KERN_NO_SPACE: case KERN_INVALID_ARGUMENT: return VERR_INVALID_PARAMETER; //case KERN_FAILURE: //case KERN_RESOURCE_SHORTAGE: //case KERN_NOT_RECEIVER: case KERN_NO_ACCESS: return VERR_ACCESS_DENIED; //case KERN_MEMORY_FAILURE: //case KERN_MEMORY_ERROR: //case KERN_ALREADY_IN_SET: //case KERN_NOT_IN_SET: //case KERN_NAME_EXISTS: //case KERN_ABORTED: //case KERN_INVALID_NAME: //case KERN_INVALID_TASK: //case KERN_INVALID_RIGHT: //case KERN_INVALID_VALUE: //case KERN_UREFS_OVERFLOW: //case KERN_INVALID_CAPABILITY: //case KERN_RIGHT_EXISTS: //case KERN_INVALID_HOST: //case KERN_MEMORY_PRESENT: //case KERN_MEMORY_DATA_MOVED: //case KERN_MEMORY_RESTART_COPY: //case KERN_INVALID_PROCESSOR_SET: //case KERN_POLICY_LIMIT: //case KERN_INVALID_POLICY: //case KERN_INVALID_OBJECT: //case KERN_ALREADY_WAITING: //case KERN_DEFAULT_SET: //case KERN_EXCEPTION_PROTECTED: //case KERN_INVALID_LEDGER: //case KERN_INVALID_MEMORY_CONTROL: //case KERN_INVALID_SECURITY: //case KERN_NOT_DEPRESSED: //case KERN_TERMINATED: //case KERN_LOCK_SET_DESTROYED: //case KERN_LOCK_UNSTABLE: case KERN_LOCK_OWNED: return VERR_SEM_BUSY; //case KERN_LOCK_OWNED_SELF: case KERN_SEMAPHORE_DESTROYED: return VERR_SEM_DESTROYED; //case KERN_RPC_SERVER_TERMINATED: //case KERN_RPC_TERMINATE_ORPHAN: //case KERN_RPC_CONTINUE_ORPHAN: case KERN_NOT_SUPPORTED: return VERR_NOT_SUPPORTED; //case KERN_NODE_DOWN: //case KERN_NOT_WAITING: case KERN_OPERATION_TIMED_OUT: return VERR_TIMEOUT; /* * I/O Kit. */ case kIOReturnNoDevice: return VERR_IO_BAD_UNIT; case kIOReturnUnsupported: return VERR_NOT_SUPPORTED; case kIOReturnInternalError: return VERR_INTERNAL_ERROR; case kIOReturnNoResources: return VERR_OUT_OF_RESOURCES; case kIOReturnBadArgument: return VERR_INVALID_PARAMETER; case kIOReturnCannotWire: return VERR_LOCK_FAILED; #ifdef IN_RING3 /* * CoreFoundation COM (may overlap with I/O Kit and Mach). */ default: if ( (unsigned)iNativeCode >= 0x80000000U && (unsigned)iNativeCode >= 0x8000FFFFU) return RTErrConvertFromDarwinCOM(iNativeCode); break; #endif /* IN_RING3 */ } /* unknown error. */ AssertMsgFailed(("Unhandled error %#x\n", iNativeCode)); return VERR_UNRESOLVED_ERROR; }
/** * Allocates a page from the page pool. * * @returns Pointer to allocated page(s). * @returns NULL on failure. * @param pPool Pointer to the page pool. * @thread The Emulation Thread. */ DECLINLINE(void *) mmR3PagePoolAlloc(PMMPAGEPOOL pPool) { VM_ASSERT_EMT(pPool->pVM); STAM_COUNTER_INC(&pPool->cAllocCalls); /* * Walk free list. */ if (pPool->pHeadFree) { PMMPAGESUBPOOL pSub = pPool->pHeadFree; /* decrement free count and unlink if no more free entries. */ if (!--pSub->cPagesFree) pPool->pHeadFree = pSub->pNextFree; #ifdef VBOX_WITH_STATISTICS pPool->cFreePages--; #endif /* find free spot in bitmap. */ #ifdef USE_INLINE_ASM_BIT_OPS const int iPage = ASMBitFirstClear(pSub->auBitmap, pSub->cPages); if (iPage >= 0) { Assert(!ASMBitTest(pSub->auBitmap, iPage)); ASMBitSet(pSub->auBitmap, iPage); return (uint8_t *)pSub->pvPages + PAGE_SIZE * iPage; } #else unsigned *pu = &pSub->auBitmap[0]; unsigned *puEnd = &pSub->auBitmap[pSub->cPages / (sizeof(pSub->auBitmap) * 8)]; while (pu < puEnd) { unsigned u; if ((u = *pu) != ~0U) { unsigned iBit = 0; unsigned uMask = 1; while (iBit < sizeof(pSub->auBitmap[0]) * 8) { if (!(u & uMask)) { *pu |= uMask; return (uint8_t *)pSub->pvPages + PAGE_SIZE * (iBit + ((uint8_t *)pu - (uint8_t *)&pSub->auBitmap[0]) * 8); } iBit++; uMask <<= 1; } STAM_COUNTER_INC(&pPool->cErrors); AssertMsgFailed(("how odd, expected to find a free bit in %#x, but didn't\n", u)); } /* next */ pu++; } #endif STAM_COUNTER_INC(&pPool->cErrors); #ifdef VBOX_WITH_STATISTICS pPool->cFreePages++; #endif AssertMsgFailed(("how strange, expected to find a free bit in %p, but didn't (%d pages supposed to be free!)\n", pSub, pSub->cPagesFree + 1)); } /* * Allocate new subpool. */ unsigned cPages = !pPool->fLow ? 128 : 32; PMMPAGESUBPOOL pSub; int rc = MMHyperAlloc(pPool->pVM, RT_OFFSETOF(MMPAGESUBPOOL, auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)]) + (sizeof(SUPPAGE) + sizeof(MMPPLOOKUPHCPHYS)) * cPages + sizeof(MMPPLOOKUPHCPTR), 0, MM_TAG_MM_PAGE, (void **)&pSub); if (RT_FAILURE(rc)) return NULL; PSUPPAGE paPhysPages = (PSUPPAGE)&pSub->auBitmap[cPages / (sizeof(pSub->auBitmap[0]) * 8)]; Assert((uintptr_t)paPhysPages >= (uintptr_t)&pSub->auBitmap[1]); if (!pPool->fLow) { rc = SUPR3PageAllocEx(cPages, 0 /* fFlags */, &pSub->pvPages, NULL, paPhysPages); if (RT_FAILURE(rc)) rc = VMSetError(pPool->pVM, rc, RT_SRC_POS, N_("Failed to lock host %zd bytes of memory (out of memory)"), (size_t)cPages << PAGE_SHIFT); } else rc = SUPR3LowAlloc(cPages, &pSub->pvPages, NULL, paPhysPages); if (RT_SUCCESS(rc)) { /* * Setup the sub structure and allocate the requested page. */ pSub->cPages = cPages; pSub->cPagesFree= cPages - 1; pSub->paPhysPages = paPhysPages; memset(pSub->auBitmap, 0, cPages / 8); /* allocate first page. */ pSub->auBitmap[0] |= 1; /* link into free chain. */ pSub->pNextFree = pPool->pHeadFree; pPool->pHeadFree= pSub; /* link into main chain. */ pSub->pNext = pPool->pHead; pPool->pHead = pSub; /* update pool statistics. */ pPool->cSubPools++; pPool->cPages += cPages; #ifdef VBOX_WITH_STATISTICS pPool->cFreePages += cPages - 1; #endif /* * Initialize the physical pages with backpointer to subpool. */ unsigned i = cPages; while (i-- > 0) { AssertMsg(paPhysPages[i].Phys && !(paPhysPages[i].Phys & PAGE_OFFSET_MASK), ("i=%d Phys=%d\n", i, paPhysPages[i].Phys)); paPhysPages[i].uReserved = (RTHCUINTPTR)pSub; } /* * Initialize the physical lookup record with backpointers to the physical pages. */ PMMPPLOOKUPHCPHYS paLookupPhys = (PMMPPLOOKUPHCPHYS)&paPhysPages[cPages]; i = cPages; while (i-- > 0) { paLookupPhys[i].pPhysPage = &paPhysPages[i]; paLookupPhys[i].Core.Key = paPhysPages[i].Phys; RTAvlHCPhysInsert(&pPool->pLookupPhys, &paLookupPhys[i].Core); } /* * And the one record for virtual memory lookup. */ PMMPPLOOKUPHCPTR pLookupVirt = (PMMPPLOOKUPHCPTR)&paLookupPhys[cPages]; pLookupVirt->pSubPool = pSub; pLookupVirt->Core.Key = pSub->pvPages; RTAvlPVInsert(&pPool->pLookupVirt, &pLookupVirt->Core); /* return allocated page (first). */ return pSub->pvPages; } MMHyperFree(pPool->pVM, pSub); STAM_COUNTER_INC(&pPool->cErrors); if (pPool->fLow) VMSetError(pPool->pVM, rc, RT_SRC_POS, N_("Failed to expand page pool for memory below 4GB. Current size: %d pages"), pPool->cPages); AssertMsgFailed(("Failed to expand pool%s. rc=%Rrc poolsize=%d\n", pPool->fLow ? " (<4GB)" : "", rc, pPool->cPages)); return NULL; }
static int drvHostALSAAudioALSAToFmt(snd_pcm_format_t fmt, PDMAUDIOFMT *pFmt, PDMAUDIOENDIANNESS *pEndianness) { AssertPtrReturn(pFmt, VERR_INVALID_POINTER); /* pEndianness is optional. */ switch (fmt) { case SND_PCM_FORMAT_S8: *pFmt = AUD_FMT_S8; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_LITTLE; break; case SND_PCM_FORMAT_U8: *pFmt = AUD_FMT_U8; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_LITTLE; break; case SND_PCM_FORMAT_S16_LE: *pFmt = AUD_FMT_S16; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_LITTLE; break; case SND_PCM_FORMAT_U16_LE: *pFmt = AUD_FMT_U16; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_LITTLE; break; case SND_PCM_FORMAT_S16_BE: *pFmt = AUD_FMT_S16; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_BIG; break; case SND_PCM_FORMAT_U16_BE: *pFmt = AUD_FMT_U16; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_BIG; break; case SND_PCM_FORMAT_S32_LE: *pFmt = AUD_FMT_S32; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_LITTLE; break; case SND_PCM_FORMAT_U32_LE: *pFmt = AUD_FMT_U32; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_LITTLE; break; case SND_PCM_FORMAT_S32_BE: *pFmt = AUD_FMT_S32; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_BIG; break; case SND_PCM_FORMAT_U32_BE: *pFmt = AUD_FMT_U32; if (pEndianness) *pEndianness = PDMAUDIOENDIANNESS_BIG; break; default: AssertMsgFailed(("Format %ld not supported\n", fmt)); return VERR_NOT_SUPPORTED; } return VINF_SUCCESS; }
/** * @interface_method_impl{PDMDRVREG,pfnConstruct} */ DECLCALLBACK(int) VMMDev::drvConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfgHandle, uint32_t fFlags) { PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns); PDRVMAINVMMDEV pThis = PDMINS_2_DATA(pDrvIns, PDRVMAINVMMDEV); LogFlow(("Keyboard::drvConstruct: iInstance=%d\n", pDrvIns->iInstance)); /* * Validate configuration. */ if (!CFGMR3AreValuesValid(pCfgHandle, "Object\0")) return VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES; AssertMsgReturn(PDMDrvHlpNoAttach(pDrvIns) == VERR_PDM_NO_ATTACHED_DRIVER, ("Configuration error: Not possible to attach anything to this driver!\n"), VERR_PDM_DRVINS_NO_ATTACH); /* * IBase. */ pDrvIns->IBase.pfnQueryInterface = VMMDev::drvQueryInterface; pThis->Connector.pfnUpdateGuestStatus = vmmdevUpdateGuestStatus; pThis->Connector.pfnUpdateGuestUserState = vmmdevUpdateGuestUserState; pThis->Connector.pfnUpdateGuestInfo = vmmdevUpdateGuestInfo; pThis->Connector.pfnUpdateGuestInfo2 = vmmdevUpdateGuestInfo2; pThis->Connector.pfnUpdateGuestCapabilities = vmmdevUpdateGuestCapabilities; pThis->Connector.pfnUpdateMouseCapabilities = vmmdevUpdateMouseCapabilities; pThis->Connector.pfnUpdatePointerShape = vmmdevUpdatePointerShape; pThis->Connector.pfnVideoAccelEnable = iface_VideoAccelEnable; pThis->Connector.pfnVideoAccelFlush = iface_VideoAccelFlush; pThis->Connector.pfnVideoModeSupported = vmmdevVideoModeSupported; pThis->Connector.pfnGetHeightReduction = vmmdevGetHeightReduction; pThis->Connector.pfnSetCredentialsJudgementResult = vmmdevSetCredentialsJudgementResult; pThis->Connector.pfnSetVisibleRegion = vmmdevSetVisibleRegion; pThis->Connector.pfnQueryVisibleRegion = vmmdevQueryVisibleRegion; pThis->Connector.pfnReportStatistics = vmmdevReportStatistics; pThis->Connector.pfnQueryStatisticsInterval = vmmdevQueryStatisticsInterval; pThis->Connector.pfnQueryBalloonSize = vmmdevQueryBalloonSize; pThis->Connector.pfnIsPageFusionEnabled = vmmdevIsPageFusionEnabled; #ifdef VBOX_WITH_HGCM pThis->HGCMConnector.pfnConnect = iface_hgcmConnect; pThis->HGCMConnector.pfnDisconnect = iface_hgcmDisconnect; pThis->HGCMConnector.pfnCall = iface_hgcmCall; #endif /* * Get the IVMMDevPort interface of the above driver/device. */ pThis->pUpPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMIVMMDEVPORT); AssertMsgReturn(pThis->pUpPort, ("Configuration error: No VMMDev port interface above!\n"), VERR_PDM_MISSING_INTERFACE_ABOVE); #ifdef VBOX_WITH_HGCM pThis->pHGCMPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMIHGCMPORT); AssertMsgReturn(pThis->pHGCMPort, ("Configuration error: No HGCM port interface above!\n"), VERR_PDM_MISSING_INTERFACE_ABOVE); #endif /* * Get the Console object pointer and update the mpDrv member. */ void *pv; int rc = CFGMR3QueryPtr(pCfgHandle, "Object", &pv); if (RT_FAILURE(rc)) { AssertMsgFailed(("Configuration error: No/bad \"Object\" value! rc=%Rrc\n", rc)); return rc; } pThis->pVMMDev = (VMMDev*)pv; /** @todo Check this cast! */ pThis->pVMMDev->mpDrv = pThis; #ifdef VBOX_WITH_HGCM rc = pThis->pVMMDev->hgcmLoadService(VBOXSHAREDFOLDERS_DLL, "VBoxSharedFolders"); pThis->pVMMDev->fSharedFolderActive = RT_SUCCESS(rc); if (RT_SUCCESS(rc)) { PPDMLED pLed; PPDMILEDPORTS pLedPort; LogRel(("Shared Folders service loaded.\n")); pLedPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMILEDPORTS); AssertMsgReturn(pLedPort, ("Configuration error: No LED port interface above!\n"), VERR_PDM_MISSING_INTERFACE_ABOVE); rc = pLedPort->pfnQueryStatusLed(pLedPort, 0, &pLed); if (RT_SUCCESS(rc) && pLed) { VBOXHGCMSVCPARM parm; parm.type = VBOX_HGCM_SVC_PARM_PTR; parm.u.pointer.addr = pLed; parm.u.pointer.size = sizeof(*pLed); rc = HGCMHostCall("VBoxSharedFolders", SHFL_FN_SET_STATUS_LED, 1, &parm); } else AssertMsgFailed(("pfnQueryStatusLed failed with %Rrc (pLed=%x)\n", rc, pLed)); } else LogRel(("Failed to load Shared Folders service %Rrc\n", rc)); rc = PDMDrvHlpSSMRegisterEx(pDrvIns, HGCM_SSM_VERSION, 4096 /* bad guess */, NULL, NULL, NULL, NULL, iface_hgcmSave, NULL, NULL, iface_hgcmLoad, NULL); if (RT_FAILURE(rc)) return rc; #endif /* VBOX_WITH_HGCM */ return VINF_SUCCESS; }
DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem) { PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem; /* * Deal with it on a per type basis (just as a variation). */ switch (pMemNt->Core.enmType) { case RTR0MEMOBJTYPE_LOW: #ifndef IPRT_TARGET_NT4 if (pMemNt->fAllocatedPagesForMdl) { Assert(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]); MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]); pMemNt->Core.pv = NULL; if (pMemNt->pvSecureMem) { MmUnsecureVirtualMemory(pMemNt->pvSecureMem); pMemNt->pvSecureMem = NULL; } MmFreePagesFromMdl(pMemNt->apMdls[0]); ExFreePool(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; } #endif AssertFailed(); break; case RTR0MEMOBJTYPE_PAGE: Assert(pMemNt->Core.pv); ExFreePool(pMemNt->Core.pv); pMemNt->Core.pv = NULL; Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); IoFreeMdl(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; case RTR0MEMOBJTYPE_CONT: Assert(pMemNt->Core.pv); MmFreeContiguousMemory(pMemNt->Core.pv); pMemNt->Core.pv = NULL; Assert(pMemNt->cMdls == 1 && pMemNt->apMdls[0]); IoFreeMdl(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; case RTR0MEMOBJTYPE_PHYS: /* rtR0MemObjNativeEnterPhys? */ if (!pMemNt->Core.u.Phys.fAllocated) { #ifndef IPRT_TARGET_NT4 Assert(!pMemNt->fAllocatedPagesForMdl); #endif /* Nothing to do here. */ break; } /* fall thru */ case RTR0MEMOBJTYPE_PHYS_NC: #ifndef IPRT_TARGET_NT4 if (pMemNt->fAllocatedPagesForMdl) { MmFreePagesFromMdl(pMemNt->apMdls[0]); ExFreePool(pMemNt->apMdls[0]); pMemNt->apMdls[0] = NULL; pMemNt->cMdls = 0; break; } #endif AssertFailed(); break; case RTR0MEMOBJTYPE_LOCK: if (pMemNt->pvSecureMem) { MmUnsecureVirtualMemory(pMemNt->pvSecureMem); pMemNt->pvSecureMem = NULL; } for (uint32_t i = 0; i < pMemNt->cMdls; i++) { MmUnlockPages(pMemNt->apMdls[i]); IoFreeMdl(pMemNt->apMdls[i]); pMemNt->apMdls[i] = NULL; } break; case RTR0MEMOBJTYPE_RES_VIRT: /* if (pMemNt->Core.u.ResVirt.R0Process == NIL_RTR0PROCESS) { } else { }*/ AssertMsgFailed(("RTR0MEMOBJTYPE_RES_VIRT\n")); return VERR_INTERNAL_ERROR; break; case RTR0MEMOBJTYPE_MAPPING: { Assert(pMemNt->cMdls == 0 && pMemNt->Core.pv); PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent; Assert(pMemNtParent); if (pMemNtParent->cMdls) { Assert(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]); Assert( pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS || pMemNt->Core.u.Mapping.R0Process == RTR0ProcHandleSelf()); MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]); } else { Assert( pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemNtParent->Core.u.Phys.fAllocated); Assert(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS); MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb); } pMemNt->Core.pv = NULL; break; } default: AssertMsgFailed(("enmType=%d\n", pMemNt->Core.enmType)); return VERR_INTERNAL_ERROR; } return VINF_SUCCESS; }
DECL_FORCE_INLINE(int) rtSemMutexRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fAutoResume, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check if nested request. */ pthread_t Self = pthread_self(); if ( pThis->Owner == Self && pThis->cNestings > 0) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cNestings); return VINF_SUCCESS; } #ifdef RTSEMMUTEX_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies) { int rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorRec, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif /* * Convert timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64End = 0; /* shut up gcc */ if (cMillies != RT_INDEFINITE_WAIT) { ts.tv_sec = cMillies / 1000; ts.tv_nsec = (cMillies % 1000) * UINT32_C(1000000); u64End = RTTimeSystemNanoTS() + cMillies * UINT64_C(1000000); pTimeout = &ts; } /* * Lock the mutex. * Optimize for the uncontended case (makes 1-2 ns difference). */ if (RT_UNLIKELY(!ASMAtomicCmpXchgS32(&pThis->iState, 1, 0))) { for (;;) { int32_t iOld = ASMAtomicXchgS32(&pThis->iState, 2); /* * Was the lock released in the meantime? This is unlikely (but possible) */ if (RT_UNLIKELY(iOld == 0)) break; /* * Go to sleep. */ if (pTimeout && ( pTimeout->tv_sec || pTimeout->tv_nsec )) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclCheckBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 2, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); if (RT_UNLIKELY(pThis->u32Magic != RTSEMMUTEX_MAGIC)) return VERR_SEM_DESTROYED; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { Assert(pTimeout); return VERR_TIMEOUT; } if (rc == 0) /* we'll leave the loop now unless another thread is faster */; else if (rc == -EWOULDBLOCK) /* retry with new value. */; else if (rc == -EINTR) { if (!fAutoResume) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64End - RTTimeSystemNanoTS(); if (i64Diff < 1000) { rc = VERR_TIMEOUT; break; } ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } } /* * When leaving this loop, iState is set to 2. This means that we gained the * lock and there are _possibly_ some waiters. We don't know exactly as another * thread might entered this loop at nearly the same time. Therefore we will * call futex_wakeup once too often (if _no_ other thread entered this loop). * The key problem is the simple futex_wait test for x != y (iState != 2) in * our case). */ } /* * Set the owner and nesting. */ pThis->Owner = Self; ASMAtomicWriteU32(&pThis->cNestings, 1); #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }
/** * Create a new thread. * * @returns iprt status code. * @param pThread Where to store the thread handle to the new thread. (optional) * @param pfnThread The thread function. * @param pvUser User argument. * @param cbStack The size of the stack for the new thread. * Use 0 for the default stack size. * @param enmType The thread type. Used for deciding scheduling attributes * of the thread. * @param fFlags Flags of the RTTHREADFLAGS type (ORed together). * @param pszName Thread name. */ RTDECL(int) RTThreadCreate(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack, RTTHREADTYPE enmType, unsigned fFlags, const char *pszName) { int rc; PRTTHREADINT pThreadInt; LogFlow(("RTThreadCreate: pThread=%p pfnThread=%p pvUser=%p cbStack=%#x enmType=%d fFlags=%#x pszName=%p:{%s}\n", pThread, pfnThread, pvUser, cbStack, enmType, fFlags, pszName, pszName)); /* * Validate input. */ if (!VALID_PTR(pThread) && pThread) { Assert(VALID_PTR(pThread)); return VERR_INVALID_PARAMETER; } if (!VALID_PTR(pfnThread)) { Assert(VALID_PTR(pfnThread)); return VERR_INVALID_PARAMETER; } if (!pszName || !*pszName || strlen(pszName) >= RTTHREAD_NAME_LEN) { AssertMsgFailed(("pszName=%s (max len is %d because of logging)\n", pszName, RTTHREAD_NAME_LEN - 1)); return VERR_INVALID_PARAMETER; } if (fFlags & ~RTTHREADFLAGS_MASK) { AssertMsgFailed(("fFlags=%#x\n", fFlags)); return VERR_INVALID_PARAMETER; } /* * Allocate thread argument. */ pThreadInt = rtThreadAlloc(enmType, fFlags, 0, pszName); if (pThreadInt) { RTNATIVETHREAD NativeThread; pThreadInt->pfnThread = pfnThread; pThreadInt->pvUser = pvUser; pThreadInt->cbStack = cbStack; rc = rtThreadNativeCreate(pThreadInt, &NativeThread); if (RT_SUCCESS(rc)) { rtThreadInsert(pThreadInt, NativeThread); rtThreadRelease(pThreadInt); Log(("RTThreadCreate: Created thread %p (%p) %s\n", pThreadInt, NativeThread, pszName)); if (pThread) *pThread = pThreadInt; return VINF_SUCCESS; } pThreadInt->cRefs = 1; rtThreadRelease(pThreadInt); } else rc = VERR_NO_TMP_MEMORY; LogFlow(("RTThreadCreate: Failed to create thread, rc=%Rrc\n", rc)); AssertReleaseRC(rc); return rc; }
/** * @copydoc USBPROXYBACK::pfnUrbQueue */ static DECLCALLBACK(int) usbProxyWinUrbQueue(PUSBPROXYDEV pProxyDev, PVUSBURB pUrb) { PPRIV_USBW32 pPriv = USBPROXYDEV_2_DATA(pProxyDev, PPRIV_USBW32); Assert(pPriv); /* * Allocate and initialize a URB queue structure. */ /** @todo pool these */ PQUEUED_URB pQUrbWin = (PQUEUED_URB)RTMemAllocZ(sizeof(QUEUED_URB)); if (!pQUrbWin) return VERR_NO_MEMORY; switch (pUrb->enmType) { case VUSBXFERTYPE_CTRL: pQUrbWin->urbwin.type = USBSUP_TRANSFER_TYPE_CTRL; break; /* you won't ever see these */ case VUSBXFERTYPE_ISOC: pQUrbWin->urbwin.type = USBSUP_TRANSFER_TYPE_ISOC; pQUrbWin->urbwin.numIsoPkts = pUrb->cIsocPkts; for (unsigned i = 0; i < pUrb->cIsocPkts; ++i) { pQUrbWin->urbwin.aIsoPkts[i].cb = pUrb->aIsocPkts[i].cb; pQUrbWin->urbwin.aIsoPkts[i].off = pUrb->aIsocPkts[i].off; pQUrbWin->urbwin.aIsoPkts[i].stat = USBSUP_XFER_OK; } break; case VUSBXFERTYPE_BULK: pQUrbWin->urbwin.type = USBSUP_TRANSFER_TYPE_BULK; break; case VUSBXFERTYPE_INTR: pQUrbWin->urbwin.type = USBSUP_TRANSFER_TYPE_INTR; break; case VUSBXFERTYPE_MSG: pQUrbWin->urbwin.type = USBSUP_TRANSFER_TYPE_MSG; break; default: AssertMsgFailed(("Invalid type %d\n", pUrb->enmType)); return VERR_INVALID_PARAMETER; } switch (pUrb->enmDir) { case VUSBDIRECTION_SETUP: AssertFailed(); pQUrbWin->urbwin.dir = USBSUP_DIRECTION_SETUP; break; case VUSBDIRECTION_IN: pQUrbWin->urbwin.dir = USBSUP_DIRECTION_IN; break; case VUSBDIRECTION_OUT: pQUrbWin->urbwin.dir = USBSUP_DIRECTION_OUT; break; default: AssertMsgFailed(("Invalid direction %d\n", pUrb->enmDir)); return VERR_INVALID_PARAMETER; } Log(("usbproxy: Queue URB %p ep=%d cbData=%d abData=%p cIsocPkts=%d\n", pUrb, pUrb->EndPt, pUrb->cbData, pUrb->abData, pUrb->cIsocPkts)); pQUrbWin->urb = pUrb; pQUrbWin->urbwin.ep = pUrb->EndPt; pQUrbWin->urbwin.len = pUrb->cbData; pQUrbWin->urbwin.buf = pUrb->abData; pQUrbWin->urbwin.error = USBSUP_XFER_OK; pQUrbWin->urbwin.flags = USBSUP_FLAG_NONE; if (pUrb->enmDir == VUSBDIRECTION_IN && !pUrb->fShortNotOk) pQUrbWin->urbwin.flags = USBSUP_FLAG_SHORT_OK; int rc = VINF_SUCCESS; pQUrbWin->overlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL); if (pQUrbWin->overlapped.hEvent != INVALID_HANDLE_VALUE) { pUrb->Dev.pvPrivate = pQUrbWin; if ( DeviceIoControl(pPriv->hDev, SUPUSB_IOCTL_SEND_URB, &pQUrbWin->urbwin, sizeof(pQUrbWin->urbwin), &pQUrbWin->urbwin, sizeof(pQUrbWin->urbwin), &pQUrbWin->cbReturned, &pQUrbWin->overlapped) || GetLastError() == ERROR_IO_PENDING) { /* insert into the queue */ RTCritSectEnter(&pPriv->CritSect); unsigned j = pPriv->cPendingUrbs; pPriv->aPendingUrbs[j] = pQUrbWin; pPriv->cPendingUrbs++; RTCritSectLeave(&pPriv->CritSect); SetEvent(pPriv->hEventWakeup); return VINF_SUCCESS; } else { DWORD dwErr = GetLastError(); if ( dwErr == ERROR_INVALID_HANDLE_STATE || dwErr == ERROR_BAD_COMMAND) { Log(("usbproxy: device %p unplugged!!\n", pPriv->hDev)); pProxyDev->fDetached = true; } else AssertMsgFailed(("dwErr=%X urbwin.error=%d (submit urb)\n", dwErr, pQUrbWin->urbwin.error)); rc = RTErrConvertFromWin32(dwErr); CloseHandle(pQUrbWin->overlapped.hEvent); pQUrbWin->overlapped.hEvent = INVALID_HANDLE_VALUE; } } #ifdef DEBUG_misha else { AssertMsgFailed(("FAILED!!, hEvent(0x%p)\n", pQUrbWin->overlapped.hEvent)); rc = VERR_NO_MEMORY; } #endif Assert(pQUrbWin->overlapped.hEvent == INVALID_HANDLE_VALUE); RTMemFree(pQUrbWin); return rc; }
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType) { /* * Convert the priority type to scheduling policies. * (This is really just guess work.) */ bool fSetExtended = false; thread_extended_policy Extended = { true }; bool fSetTimeContstraint = false; thread_time_constraint_policy TimeConstraint = { 0, 0, 0, true }; thread_precedence_policy Precedence = { 0 }; switch (enmType) { case RTTHREADTYPE_INFREQUENT_POLLER: Precedence.importance = 1; break; case RTTHREADTYPE_EMULATION: Precedence.importance = 30; break; case RTTHREADTYPE_DEFAULT: Precedence.importance = 31; break; case RTTHREADTYPE_MSG_PUMP: Precedence.importance = 34; break; case RTTHREADTYPE_IO: Precedence.importance = 98; break; case RTTHREADTYPE_TIMER: Precedence.importance = 0x7fffffff; fSetExtended = true; Extended.timeshare = FALSE; fSetTimeContstraint = true; TimeConstraint.period = 0; /* not really true for a real timer thread, but we've really no idea. */ TimeConstraint.computation = rtDarwinAbsTimeFromNano(100000); /* 100 us*/ TimeConstraint.constraint = rtDarwinAbsTimeFromNano(500000); /* 500 us */ TimeConstraint.preemptible = FALSE; break; default: AssertMsgFailed(("enmType=%d\n", enmType)); return VERR_INVALID_PARAMETER; } RT_ASSERT_INTS_ON(); /* * Do the actual modification. */ kern_return_t kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_PRECEDENCE_POLICY, (thread_policy_t)&Precedence, THREAD_PRECEDENCE_POLICY_COUNT); AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); NOREF(kr); if (fSetExtended) { kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_EXTENDED_POLICY, (thread_policy_t)&Extended, THREAD_EXTENDED_POLICY_COUNT); AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); } if (fSetTimeContstraint) { kr = thread_policy_set((thread_t)pThread->Core.Key, THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t)&TimeConstraint, THREAD_TIME_CONSTRAINT_POLICY_COUNT); AssertMsg(kr == KERN_SUCCESS, ("%rc\n", kr)); } return VINF_SUCCESS; /* ignore any errors for now */ }
RTR3DECL(int) RTErrConvertFromWin32(unsigned uNativeCode) { /* very fast check for no error. */ if (uNativeCode == ERROR_SUCCESS) return(VINF_SUCCESS); /* process error codes. */ switch (uNativeCode) { case ERROR_INVALID_FUNCTION: return VERR_INVALID_FUNCTION; case ERROR_FILE_NOT_FOUND: return VERR_FILE_NOT_FOUND; case ERROR_PATH_NOT_FOUND: return VERR_PATH_NOT_FOUND; case ERROR_TOO_MANY_OPEN_FILES: return VERR_TOO_MANY_OPEN_FILES; case ERROR_ACCESS_DENIED: return VERR_ACCESS_DENIED; case ERROR_INVALID_HANDLE: case ERROR_DIRECT_ACCESS_HANDLE: return VERR_INVALID_HANDLE; case ERROR_NO_SYSTEM_RESOURCES: /** @todo better translation */ case ERROR_NOT_ENOUGH_MEMORY: case ERROR_OUTOFMEMORY: return VERR_NO_MEMORY; case ERROR_INVALID_DRIVE: return VERR_INVALID_DRIVE; case ERROR_CURRENT_DIRECTORY: return VERR_CANT_DELETE_DIRECTORY; case ERROR_NOT_SAME_DEVICE: return VERR_NOT_SAME_DEVICE; case ERROR_NO_MORE_FILES: return VERR_NO_MORE_FILES; case ERROR_WRITE_PROTECT: return VERR_WRITE_PROTECT; case ERROR_BAD_UNIT: return VERR_IO_BAD_UNIT; case ERROR_NOT_READY: return VERR_IO_NOT_READY; case ERROR_BAD_COMMAND: return VERR_IO_BAD_COMMAND; case ERROR_CRC: return VERR_IO_CRC; case ERROR_BAD_LENGTH: return VERR_IO_BAD_LENGTH; case ERROR_SEEK: return VERR_SEEK; case ERROR_NOT_DOS_DISK: return VERR_DISK_INVALID_FORMAT; case ERROR_SECTOR_NOT_FOUND: return VERR_IO_SECTOR_NOT_FOUND; case ERROR_WRITE_FAULT: return VERR_WRITE_ERROR; case ERROR_READ_FAULT: return VERR_READ_ERROR; case ERROR_GEN_FAILURE: return VERR_IO_GEN_FAILURE; case ERROR_SHARING_VIOLATION: return VERR_SHARING_VIOLATION; case ERROR_LOCK_VIOLATION: return VERR_FILE_LOCK_VIOLATION; case ERROR_HANDLE_EOF: return VERR_EOF; case ERROR_NOT_LOCKED: return VERR_FILE_NOT_LOCKED; case ERROR_DIR_NOT_EMPTY: return VERR_DIR_NOT_EMPTY; case ERROR_HANDLE_DISK_FULL: case ERROR_DISK_FULL: return VERR_DISK_FULL; case ERROR_NOT_SUPPORTED: return VERR_NOT_SUPPORTED; case ERROR_INVALID_PARAMETER: case ERROR_BAD_ARGUMENTS: case ERROR_INVALID_FLAGS: return VERR_INVALID_PARAMETER; case ERROR_REM_NOT_LIST: return VERR_NET_IO_ERROR; case ERROR_BAD_NETPATH: case ERROR_NETNAME_DELETED: return VERR_NET_HOST_NOT_FOUND; case ERROR_BAD_NET_NAME: case ERROR_DEV_NOT_EXIST: return VERR_NET_PATH_NOT_FOUND; case ERROR_NETWORK_BUSY: case ERROR_TOO_MANY_CMDS: case ERROR_TOO_MANY_NAMES: case ERROR_TOO_MANY_SESS: case ERROR_OUT_OF_STRUCTURES: return VERR_NET_OUT_OF_RESOURCES; case ERROR_PRINTQ_FULL: case ERROR_NO_SPOOL_SPACE: case ERROR_PRINT_CANCELLED: return VERR_NET_PRINT_ERROR; case ERROR_DUP_NAME: case ERROR_ADAP_HDW_ERR: case ERROR_BAD_NET_RESP: case ERROR_UNEXP_NET_ERR: case ERROR_BAD_REM_ADAP: case ERROR_NETWORK_ACCESS_DENIED: case ERROR_BAD_DEV_TYPE: case ERROR_SHARING_PAUSED: case ERROR_REQ_NOT_ACCEP: case ERROR_REDIR_PAUSED: case ERROR_ALREADY_ASSIGNED: case ERROR_INVALID_PASSWORD: case ERROR_NET_WRITE_FAULT: return VERR_NET_IO_ERROR; case ERROR_FILE_EXISTS: case ERROR_ALREADY_EXISTS: return VERR_ALREADY_EXISTS; case ERROR_CANNOT_MAKE: return VERR_CANT_CREATE; case ERROR_NO_PROC_SLOTS: return VERR_MAX_PROCS_REACHED; case ERROR_TOO_MANY_SEMAPHORES: return VERR_TOO_MANY_SEMAPHORES; case ERROR_EXCL_SEM_ALREADY_OWNED: return VERR_EXCL_SEM_ALREADY_OWNED; case ERROR_SEM_IS_SET: return VERR_SEM_IS_SET; case ERROR_TOO_MANY_SEM_REQUESTS: return VERR_TOO_MANY_SEM_REQUESTS; case ERROR_SEM_OWNER_DIED: return VERR_SEM_OWNER_DIED; case ERROR_DRIVE_LOCKED: return VERR_DRIVE_LOCKED; case ERROR_BROKEN_PIPE: return VERR_BROKEN_PIPE; case ERROR_OPEN_FAILED: return VERR_OPEN_FAILED; case ERROR_BUFFER_OVERFLOW: case ERROR_INSUFFICIENT_BUFFER: return VERR_BUFFER_OVERFLOW; case ERROR_NO_MORE_SEARCH_HANDLES: return VERR_NO_MORE_SEARCH_HANDLES; case ERROR_SEM_TIMEOUT: case WAIT_TIMEOUT: case ERROR_SERVICE_REQUEST_TIMEOUT: case ERROR_COUNTER_TIMEOUT: case ERROR_TIMEOUT: return VERR_TIMEOUT; case ERROR_INVALID_NAME: case ERROR_BAD_PATHNAME: return VERR_INVALID_NAME; case ERROR_NEGATIVE_SEEK: return VERR_NEGATIVE_SEEK; case ERROR_SEEK_ON_DEVICE: return VERR_SEEK_ON_DEVICE; case ERROR_SIGNAL_REFUSED: case ERROR_NO_SIGNAL_SENT: return VERR_SIGNAL_REFUSED; case ERROR_SIGNAL_PENDING: return VERR_SIGNAL_PENDING; case ERROR_MAX_THRDS_REACHED: return VERR_MAX_THRDS_REACHED; case ERROR_LOCK_FAILED: return VERR_FILE_LOCK_FAILED; case ERROR_SEM_NOT_FOUND: return VERR_SEM_NOT_FOUND; case ERROR_FILENAME_EXCED_RANGE: return VERR_FILENAME_TOO_LONG; case ERROR_INVALID_SIGNAL_NUMBER: return VERR_SIGNAL_INVALID; case ERROR_BAD_PIPE: return VERR_BAD_PIPE; case ERROR_PIPE_BUSY: return VERR_PIPE_BUSY; case ERROR_NO_DATA: return VERR_NO_DATA; case ERROR_PIPE_NOT_CONNECTED: return VERR_PIPE_NOT_CONNECTED; case ERROR_MORE_DATA: return VERR_MORE_DATA; case ERROR_NOT_OWNER: return VERR_NOT_OWNER; case ERROR_TOO_MANY_POSTS: return VERR_TOO_MANY_POSTS; case ERROR_PIPE_CONNECTED: case ERROR_PIPE_LISTENING: return VERR_PIPE_IO_ERROR; case ERROR_OPERATION_ABORTED: return VERR_INTERRUPTED; case ERROR_NO_UNICODE_TRANSLATION: return VERR_NO_TRANSLATION; case RPC_S_INVALID_STRING_UUID: return VERR_INVALID_UUID_FORMAT; case ERROR_PROC_NOT_FOUND: return VERR_SYMBOL_NOT_FOUND; case ERROR_MOD_NOT_FOUND: return VERR_MODULE_NOT_FOUND; case ERROR_INVALID_EXE_SIGNATURE: return VERR_INVALID_EXE_SIGNATURE; case ERROR_BAD_EXE_FORMAT: return VERR_BAD_EXE_FORMAT; case ERROR_RESOURCE_DATA_NOT_FOUND: return VERR_NO_DATA; ///@todo fix ERROR_RESOURCE_DATA_NOT_FOUND translation case ERROR_INVALID_ADDRESS: return VERR_INVALID_POINTER; ///@todo fix ERROR_INVALID_ADDRESS translation - dbghelp returns it on some line number queries. case ERROR_CANCELLED: return VERR_CANCELLED; case ERROR_DIRECTORY: return VERR_NOT_A_DIRECTORY; case ERROR_LOGON_FAILURE: return VERR_AUTHENTICATION_FAILURE; /* * Winsocket errors are mostly BSD errno.h wrappers. * This is copied from RTErrConvertFromErrno() and checked against winsock.h. * Please, keep things in sync! */ #ifdef WSAEPERM case WSAEPERM: return VERR_ACCESS_DENIED; /* 1 */ #endif #ifdef WSAENOENT case WSAENOENT: return VERR_FILE_NOT_FOUND; #endif #ifdef WSAESRCH case WSAESRCH: return VERR_PROCESS_NOT_FOUND; #endif case WSAEINTR: return VERR_INTERRUPTED; #ifdef WSAEIO case WSAEIO: return VERR_DEV_IO_ERROR; #endif #ifdef WSAE2BIG case WSAE2BIG: return VERR_TOO_MUCH_DATA; #endif #ifdef WSAENOEXEC case WSAENOEXEC: return VERR_BAD_EXE_FORMAT; #endif case WSAEBADF: return VERR_INVALID_HANDLE; #ifdef WSAECHILD case WSAECHILD: return VERR_PROCESS_NOT_FOUND; //... /* 10 */ #endif case WSAEWOULDBLOCK: return VERR_TRY_AGAIN; /* EAGAIN */ #ifdef WSAENOMEM case WSAENOMEM: return VERR_NO_MEMORY; #endif case WSAEACCES: return VERR_ACCESS_DENIED; case WSAEFAULT: return VERR_INVALID_POINTER; //case WSAENOTBLK: return VERR_; #ifdef WSAEBUSY case WSAEBUSY: return VERR_DEV_IO_ERROR; #endif #ifdef WSAEEXIST case WSAEEXIST: return VERR_ALREADY_EXISTS; #endif //case WSAEXDEV: #ifdef WSAENODEV case WSAENODEV: return VERR_NOT_SUPPORTED; #endif #ifdef WSAENOTDIR case WSAENOTDIR: return VERR_PATH_NOT_FOUND; /* 20 */ #endif #ifdef WSAEISDIR case WSAEISDIR: return VERR_FILE_NOT_FOUND; #endif case WSAEINVAL: return VERR_INVALID_PARAMETER; #ifdef WSAENFILE case WSAENFILE: return VERR_TOO_MANY_OPEN_FILES; #endif case WSAEMFILE: return VERR_TOO_MANY_OPEN_FILES; #ifdef WSAENOTTY case WSAENOTTY: return VERR_INVALID_FUNCTION; #endif #ifdef WSAETXTBSY case WSAETXTBSY: return VERR_SHARING_VIOLATION; #endif //case WSAEFBIG: #ifdef WSAENOSPC case WSAENOSPC: return VERR_DISK_FULL; #endif #ifdef WSAESPIPE case WSAESPIPE: return VERR_SEEK_ON_DEVICE; #endif #ifdef WSAEROFS case WSAEROFS: return VERR_WRITE_PROTECT; /* 30 */ #endif //case WSAEMLINK: #ifdef WSAEPIPE case WSAEPIPE: return VERR_BROKEN_PIPE; #endif #ifdef WSAEDOM case WSAEDOM: return VERR_INVALID_PARAMETER; #endif #ifdef WSAERANGE case WSAERANGE: return VERR_INVALID_PARAMETER; #endif #ifdef WSAEDEADLK case WSAEDEADLK: return VERR_DEADLOCK; #endif case WSAENAMETOOLONG: return VERR_FILENAME_TOO_LONG; #ifdef WSAENOLCK case WSAENOLCK: return VERR_FILE_LOCK_FAILED; #endif #ifdef WSAENOSYS case WSAENOSYS: return VERR_NOT_SUPPORTED; #endif case WSAENOTEMPTY: return VERR_CANT_DELETE_DIRECTORY; case WSAELOOP: return VERR_TOO_MANY_SYMLINKS; /* 40 */ //case WSAENOMSG 42 /* No message of desired type */ //case WSAEIDRM 43 /* Identifier removed */ //case WSAECHRNG 44 /* Channel number out of range */ //case WSAEL2NSYNC 45 /* Level 2 not synchronized */ //case WSAEL3HLT 46 /* Level 3 halted */ //case WSAEL3RST 47 /* Level 3 reset */ //case WSAELNRNG 48 /* Link number out of range */ //case WSAEUNATCH 49 /* Protocol driver not attached */ //case WSAENOCSI 50 /* No CSI structure available */ //case WSAEL2HLT 51 /* Level 2 halted */ //case WSAEBADE 52 /* Invalid exchange */ //case WSAEBADR 53 /* Invalid request descriptor */ //case WSAEXFULL 54 /* Exchange full */ //case WSAENOANO 55 /* No anode */ //case WSAEBADRQC 56 /* Invalid request code */ //case WSAEBADSLT 57 /* Invalid slot */ //case 58: //case WSAEBFONT 59 /* Bad font file format */ //case WSAENOSTR 60 /* Device not a stream */ #ifdef WSAENODATA case WSAENODATA: return VERR_NO_DATA; #endif //case WSAETIME 62 /* Timer expired */ //case WSAENOSR 63 /* Out of streams resources */ #ifdef WSAENONET case WSAENONET: return VERR_NET_NO_NETWORK; #endif //case WSAENOPKG 65 /* Package not installed */ //case WSAEREMOTE 66 /* Object is remote */ //case WSAENOLINK 67 /* Link has been severed */ //case WSAEADV 68 /* Advertise error */ //case WSAESRMNT 69 /* Srmount error */ //case WSAECOMM 70 /* Communication error on send */ //case WSAEPROTO 71 /* Protocol error */ //case WSAEMULTIHOP 72 /* Multihop attempted */ //case WSAEDOTDOT 73 /* RFS specific error */ //case WSAEBADMSG 74 /* Not a data message */ #ifdef WSAEOVERFLOW case WSAEOVERFLOW: return VERR_TOO_MUCH_DATA; #endif #ifdef WSAENOTUNIQ case WSAENOTUNIQ: return VERR_NET_NOT_UNIQUE_NAME; #endif #ifdef WSAEBADFD case WSAEBADFD: return VERR_INVALID_HANDLE; #endif //case WSAEREMCHG 78 /* Remote address changed */ //case WSAELIBACC 79 /* Can not access a needed shared library */ //case WSAELIBBAD 80 /* Accessing a corrupted shared library */ //case WSAELIBSCN 81 /* .lib section in a.out corrupted */ //case WSAELIBMAX 82 /* Attempting to link in too many shared libraries */ //case WSAELIBEXEC 83 /* Cannot exec a shared library directly */ #ifdef WSAEILSEQ case WSAEILSEQ: return VERR_NO_TRANSLATION; #endif #ifdef WSAERESTART case WSAERESTART: return VERR_INTERRUPTED; #endif //case WSAESTRPIPE 86 /* Streams pipe error */ //case WSAEUSERS 87 /* Too many users */ case WSAENOTSOCK: return VERR_NET_NOT_SOCKET; case WSAEDESTADDRREQ: return VERR_NET_DEST_ADDRESS_REQUIRED; case WSAEMSGSIZE: return VERR_NET_MSG_SIZE; case WSAEPROTOTYPE: return VERR_NET_PROTOCOL_TYPE; case WSAENOPROTOOPT: return VERR_NET_PROTOCOL_NOT_AVAILABLE; case WSAEPROTONOSUPPORT: return VERR_NET_PROTOCOL_NOT_SUPPORTED; case WSAESOCKTNOSUPPORT: return VERR_NET_SOCKET_TYPE_NOT_SUPPORTED; case WSAEOPNOTSUPP: return VERR_NET_OPERATION_NOT_SUPPORTED; case WSAEPFNOSUPPORT: return VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED; case WSAEAFNOSUPPORT: return VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED; case WSAEADDRINUSE: return VERR_NET_ADDRESS_IN_USE; case WSAEADDRNOTAVAIL: return VERR_NET_ADDRESS_NOT_AVAILABLE; case WSAENETDOWN: return VERR_NET_DOWN; case WSAENETUNREACH: return VERR_NET_UNREACHABLE; case WSAENETRESET: return VERR_NET_CONNECTION_RESET; case WSAECONNABORTED: return VERR_NET_CONNECTION_ABORTED; case WSAECONNRESET: return VERR_NET_CONNECTION_RESET_BY_PEER; case WSAENOBUFS: return VERR_NET_NO_BUFFER_SPACE; case WSAEISCONN: return VERR_NET_ALREADY_CONNECTED; case WSAENOTCONN: return VERR_NET_NOT_CONNECTED; case WSAESHUTDOWN: return VERR_NET_SHUTDOWN; case WSAETOOMANYREFS: return VERR_NET_TOO_MANY_REFERENCES; case WSAETIMEDOUT: return VERR_TIMEOUT; case WSAECONNREFUSED: return VERR_NET_CONNECTION_REFUSED; case WSAEHOSTDOWN: return VERR_NET_HOST_DOWN; case WSAEHOSTUNREACH: return VERR_NET_HOST_UNREACHABLE; case WSAEALREADY: return VERR_NET_ALREADY_IN_PROGRESS; case WSAEINPROGRESS: return VERR_NET_IN_PROGRESS; //case WSAESTALE 116 /* Stale NFS file handle */ //case WSAEUCLEAN 117 /* Structure needs cleaning */ //case WSAENOTNAM 118 /* Not a XENIX named type file */ //case WSAENAVAIL 119 /* No XENIX semaphores available */ //case WSAEISNAM 120 /* Is a named type file */ //case WSAEREMOTEIO 121 /* Remote I/O error */ case WSAEDQUOT: return VERR_DISK_FULL; #ifdef WSAENOMEDIUM case WSAENOMEDIUM: return VERR_MEDIA_NOT_PRESENT; #endif #ifdef WSAEMEDIUMTYPE case WSAEMEDIUMTYPE: return VERR_MEDIA_NOT_RECOGNIZED; #endif case WSAEPROCLIM: return VERR_MAX_PROCS_REACHED; //case WSAEDISCON: (WSABASEERR+101) //case WSASYSNOTREADY (WSABASEERR+91) //case WSAVERNOTSUPPORTED (WSABASEERR+92) //case WSANOTINITIALISED (WSABASEERR+93) #ifdef WSAHOST_NOT_FOUND case WSAHOST_NOT_FOUND: return VERR_NET_HOST_NOT_FOUND; #endif #ifdef WSATRY_AGAIN case WSATRY_AGAIN: return VERR_TRY_AGAIN; #endif #ifndef WSANO_RECOVERY case WSANO_RECOVERY: return VERR_IO_GEN_FAILURE; #endif #ifdef WSANO_DATA case WSANO_DATA: return VERR_NET_ADDRESS_NOT_AVAILABLE; #endif #ifndef ERROR_NOT_A_REPARSE_POINT # define ERROR_NOT_A_REPARSE_POINT 0x1126 #endif case ERROR_NOT_A_REPARSE_POINT: return VERR_NOT_SYMLINK; } /* unknown error. */ #ifndef DEBUG_dmik AssertMsgFailed(("Unhandled error %u\n", uNativeCode)); #endif return VERR_UNRESOLVED_ERROR; }
/** * Allocates memory in the Hypervisor (RC VMM) area. * The returned memory is of course zeroed. * * @returns VBox status code. * @param pVM The VM to operate on. * @param cb Number of bytes to allocate. * @param uAlignment Required memory alignment in bytes. * Values are 0,8,16,32,64 and PAGE_SIZE. * 0 -> default alignment, i.e. 8 bytes. * @param enmTag The statistics tag. * @param ppv Where to store the address to the allocated * memory. */ static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv) { AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb)); /* * Validate input and adjust it to reasonable values. */ if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN) uAlignment = MMHYPER_HEAP_ALIGN_MIN; uint32_t cbAligned; switch (uAlignment) { case 8: case 16: case 32: case 64: cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN); if (!cbAligned || cbAligned < cb) { Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment)); AssertMsgFailed(("Nice try.\n")); return VERR_INVALID_PARAMETER; } break; case PAGE_SIZE: AssertMsg(RT_ALIGN_32(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb)); cbAligned = RT_ALIGN_32(cb, PAGE_SIZE); if (!cbAligned) { Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment)); AssertMsgFailed(("Nice try.\n")); return VERR_INVALID_PARAMETER; } break; default: Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment)); AssertMsgFailed(("Invalid alignment %u\n", uAlignment)); return VERR_INVALID_PARAMETER; } /* * Get heap and statisticsStatistics. */ PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap); #ifdef VBOX_WITH_STATISTICS PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag); if (!pStat) { Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment)); AssertMsgFailed(("Failed to allocate statistics!\n")); return VERR_MM_HYPER_NO_MEMORY; } #endif if (uAlignment < PAGE_SIZE) { /* * Allocate a chunk. */ PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment); if (pChunk) { #ifdef VBOX_WITH_STATISTICS const uint32_t cbChunk = pChunk->offNext ? pChunk->offNext : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk; pStat->cbAllocated += (uint32_t)cbChunk; pStat->cbCurAllocated += (uint32_t)cbChunk; if (pStat->cbCurAllocated > pStat->cbMaxAllocated) pStat->cbMaxAllocated = pStat->cbCurAllocated; pStat->cAllocations++; pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk; #else pChunk->offStat = 0; #endif void *pv = pChunk + 1; *ppv = pv; ASMMemZero32(pv, cbAligned); Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv)); return VINF_SUCCESS; } } else { /* * Allocate page aligned memory. */ void *pv = mmHyperAllocPages(pHeap, cbAligned); if (pv) { #ifdef VBOX_WITH_STATISTICS pStat->cbAllocated += cbAligned; pStat->cbCurAllocated += cbAligned; if (pStat->cbCurAllocated > pStat->cbMaxAllocated) pStat->cbMaxAllocated = pStat->cbCurAllocated; pStat->cAllocations++; #endif *ppv = pv; /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPR3PageAlloc zeros it. */ Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv)); return VINF_SUCCESS; } } #ifdef VBOX_WITH_STATISTICS pStat->cAllocations++; pStat->cFailures++; #endif Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment)); AssertMsgFailed(("Failed to allocate %d bytes!\n", cb)); return VERR_MM_HYPER_NO_MEMORY; }
/** * Runs all the filters on the specified device. * * All filters mean global and active VM, with the exception of those * belonging to \a aMachine. If a global ignore filter matched or if * none of the filters matched, the device will be released back to * the host. * * The device calling us here will be in the HeldByProxy, Unused, or * Capturable state. The caller is aware that locks held might have * to be abandond because of IPC and that the device might be in * almost any state upon return. * * * @returns COM status code (only parameter & state checks will fail). * @param aDevice The USB device to apply filters to. * @param aIgnoreMachine The machine to ignore filters from (we've just * detached the device from this machine). * * @note The caller is expected to own no locks. */ HRESULT USBProxyService::runAllFiltersOnDevice(ComObjPtr<HostUSBDevice> &aDevice, SessionMachinesList &llOpenedMachines, SessionMachine *aIgnoreMachine) { LogFlowThisFunc(("{%s} ignoring=%p\n", aDevice->getName().c_str(), aIgnoreMachine)); /* * Verify preconditions. */ AssertReturn(!isWriteLockOnCurrentThread(), E_FAIL); AssertReturn(!aDevice->isWriteLockOnCurrentThread(), E_FAIL); AutoWriteLock alock(this COMMA_LOCKVAL_SRC_POS); AutoWriteLock devLock(aDevice COMMA_LOCKVAL_SRC_POS); AssertMsgReturn(aDevice->isCapturableOrHeld(), ("{%s} %s\n", aDevice->getName().c_str(), aDevice->getStateName()), E_FAIL); /* * Get the lists we'll iterate. */ Host::USBDeviceFilterList globalFilters; mHost->getUSBFilters(&globalFilters); /* * Run global filters filters first. */ bool fHoldIt = false; for (Host::USBDeviceFilterList::const_iterator it = globalFilters.begin(); it != globalFilters.end(); ++it) { AutoWriteLock filterLock(*it COMMA_LOCKVAL_SRC_POS); const HostUSBDeviceFilter::Data &data = (*it)->getData(); if (aDevice->isMatch(data)) { USBDeviceFilterAction_T action = USBDeviceFilterAction_Null; (*it)->COMGETTER(Action)(&action); if (action == USBDeviceFilterAction_Ignore) { /* * Release the device to the host and we're done. */ filterLock.release(); devLock.release(); alock.release(); aDevice->requestReleaseToHost(); return S_OK; } if (action == USBDeviceFilterAction_Hold) { /* * A device held by the proxy needs to be subjected * to the machine filters. */ fHoldIt = true; break; } AssertMsgFailed(("action=%d\n", action)); } } globalFilters.clear(); /* * Run the per-machine filters. */ for (SessionMachinesList::const_iterator it = llOpenedMachines.begin(); it != llOpenedMachines.end(); ++it) { ComObjPtr<SessionMachine> pMachine = *it; /* Skip the machine the device was just detached from. */ if ( aIgnoreMachine && pMachine == aIgnoreMachine) continue; /* runMachineFilters takes care of checking the machine state. */ devLock.release(); alock.release(); if (runMachineFilters(pMachine, aDevice)) { LogFlowThisFunc(("{%s} attached to %p\n", aDevice->getName().c_str(), (void *)pMachine)); return S_OK; } alock.acquire(); devLock.acquire(); } /* * No matching machine, so request hold or release depending * on global filter match. */ devLock.release(); alock.release(); if (fHoldIt) aDevice->requestHold(); else aDevice->requestReleaseToHost(); return S_OK; }
/** * Free memory a memory chunk. * * @returns VBox status code. * @param pHeap The heap. * @param pChunk The memory chunk to free. */ static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk) { Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk)); PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk; /* * Insert into the free list (which is sorted on address). * * We'll search towards the end of the heap to locate the * closest FREE chunk. */ PMMHYPERCHUNKFREE pLeft = NULL; PMMHYPERCHUNKFREE pRight = NULL; if (pHeap->offFreeTail != NIL_OFFSET) { if (pFree->core.offNext) { pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext); ASSERT_CHUNK(pHeap, &pRight->core); while (!MMHYPERCHUNK_ISFREE(&pRight->core)) { if (!pRight->core.offNext) { pRight = NULL; break; } pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext); ASSERT_CHUNK(pHeap, &pRight->core); } } if (!pRight) pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */ if (pRight) { ASSERT_CHUNK_FREE(pHeap, pRight); if (pRight->offPrev) { pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev); ASSERT_CHUNK_FREE(pHeap, pLeft); } } } if (pLeft == pFree) { AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk)); return VERR_INVALID_POINTER; } pChunk->offStat = 0; /* * Head free chunk list? */ if (!pLeft) { MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE); pFree->offPrev = 0; pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap); if (pRight) { pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree; pRight->offPrev = -(int32_t)pFree->offNext; } else { pFree->offNext = 0; pHeap->offFreeTail = pHeap->offFreeHead; } Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree)); } else { /* * Can we merge with left hand free chunk? */ if ((char *)pLeft + pLeft->core.offNext == (char *)pFree) { if (pFree->core.offNext) { pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext; MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext); } else pLeft->core.offNext = 0; pFree = pLeft; Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb)); pHeap->cbFree -= pLeft->cb; Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb)); } /* * No, just link it into the free list then. */ else { MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE); pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree; pLeft->offNext = -pFree->offPrev; if (pRight) { pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree; pRight->offPrev = -(int32_t)pFree->offNext; } else { pFree->offNext = 0; pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap); } Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft)); } } /* * Can we merge with right hand free chunk? */ if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext) { /* core */ if (pRight->core.offNext) { pFree->core.offNext += pRight->core.offNext; PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext); MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext); ASSERT_CHUNK(pHeap, pNext); } else pFree->core.offNext = 0; /* free */ if (pRight->offNext) { pFree->offNext += pRight->offNext; ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext; } else { pFree->offNext = 0; pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap); } Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb)); pHeap->cbFree -= pRight->cb; Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree)); } /* calculate the size. */ if (pFree->core.offNext) pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK); else pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK); Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb)); pHeap->cbFree += pFree->cb; ASSERT_CHUNK_FREE(pHeap, pFree); #ifdef MMHYPER_HEAP_STRICT mmHyperHeapCheck(pHeap); #endif return VINF_SUCCESS; }
/** * Internal worker for RTSemMutexRequestNoResume and it's debug companion. * * @returns Same as RTSEmMutexRequestNoResume * @param hMutexSem The mutex handle. * @param cMillies The number of milliseconds to wait. * @param pSrcPos The source position of the caller. */ DECL_FORCE_INLINE(int) rtSemMutexRequestNoResume(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate. */ RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check for recursive entry. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); RTNATIVETHREAD hNativeOwner; ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner); if (hNativeOwner == hNativeSelf) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncU32(&pThis->cRecursions); return VINF_SUCCESS; } /* * Lock mutex semaphore. */ RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies > 0) { #ifdef RTSEMMUTEX_STRICT hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_MUTEX, true); if (RT_FAILURE(rc9)) return rc9; #else hThreadSelf = RTThreadSelf(); RTThreadBlocking(hThreadSelf, RTTHREADSTATE_MUTEX, true); #endif } DWORD rc = WaitForSingleObjectEx(pThis->hMtx, cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies, TRUE /*fAlertable*/); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_MUTEX); switch (rc) { case WAIT_OBJECT_0: #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorRec, hThreadSelf, pSrcPos, true); #endif ASMAtomicWriteHandle(&pThis->hNativeOwner, hNativeSelf); ASMAtomicWriteU32(&pThis->cRecursions, 1); return VINF_SUCCESS; case WAIT_TIMEOUT: return VERR_TIMEOUT; case WAIT_IO_COMPLETION: return VERR_INTERRUPTED; case WAIT_ABANDONED: return VERR_SEM_OWNER_DIED; default: AssertMsgFailed(("%u\n", rc)); case WAIT_FAILED: { int rc2 = RTErrConvertFromWin32(GetLastError()); AssertMsgFailed(("Wait on hMutexSem %p failed, rc=%d lasterr=%d\n", hMutexSem, rc, GetLastError())); if (rc2 != VINF_SUCCESS) return rc2; AssertMsgFailed(("WaitForSingleObject(event) -> rc=%d while converted lasterr=%d\n", rc, rc2)); return VERR_INTERNAL_ERROR; } } }
/** * Restores virtualized flags. * * This function is called from CPUMRawLeave(). It will update the eflags register. * ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!! * * @param pVM Pointer to the VM. * @param pCtxCore The cpu context core. * @param rawRC Raw mode return code * @see @ref pg_raw */ VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) { bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip); /* * We will only be called if PATMRawEnter was previously called. */ register uint32_t efl = pCtxCore->eflags.u32; efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK); pCtxCore->eflags.u32 = efl; CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF; AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC)); AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC)); #ifdef IN_RING3 if ( (efl & X86_EFL_IF) && fPatchCode ) { if ( rawRC < VINF_PATM_LEAVE_RC_FIRST || rawRC > VINF_PATM_LEAVE_RC_LAST) { /* * Golden rules: * - Don't interrupt special patch streams that replace special instructions * - Don't break instruction fusing (sti, pop ss, mov ss) * - Don't go back to an instruction that has been overwritten by a patch jump * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect * */ if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */ { PATMTRANSSTATE enmState; RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState); AssertRelease(pOrgInstrGC); Assert(enmState != PATMTRANS_OVERWRITTEN); if (enmState == PATMTRANS_SAFE) { Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC)); Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack); pCtxCore->eip = pOrgInstrGC; fPatchCode = false; /* to reset the stack ptr */ CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */ } else { LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail); } } else { LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail); } } } #else /* !IN_RING3 */ AssertMsgFailed(("!IN_RING3")); #endif /* !IN_RING3 */ if (!fPatchCode) { if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip) { EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip); } CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* Reset the stack pointer to the top of the stack. */ #ifdef DEBUG if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE) { LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp)); } #endif CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE; } }
/* QString <= StorageSlot: */ template<> QString toString(const StorageSlot &storageSlot) { QString strResult; switch (storageSlot.bus) { case KStorageBus_IDE: { int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(storageSlot.bus); int iMaxDevice = vboxGlobal().virtualBox().GetSystemProperties().GetMaxDevicesPerPortForStorageBus(storageSlot.bus); if (storageSlot.port < 0 || storageSlot.port > iMaxPort) { AssertMsgFailed(("No text for bus=%d & port=%d", storageSlot.bus, storageSlot.port)); break; } if (storageSlot.device < 0 || storageSlot.device > iMaxDevice) { AssertMsgFailed(("No text for bus=%d & port=%d & device=%d", storageSlot.bus, storageSlot.port, storageSlot.device)); break; } if (storageSlot.port == 0 && storageSlot.device == 0) strResult = QApplication::translate("VBoxGlobal", "IDE Primary Master", "StorageSlot"); else if (storageSlot.port == 0 && storageSlot.device == 1) strResult = QApplication::translate("VBoxGlobal", "IDE Primary Slave", "StorageSlot"); else if (storageSlot.port == 1 && storageSlot.device == 0) strResult = QApplication::translate("VBoxGlobal", "IDE Secondary Master", "StorageSlot"); else if (storageSlot.port == 1 && storageSlot.device == 1) strResult = QApplication::translate("VBoxGlobal", "IDE Secondary Slave", "StorageSlot"); break; } case KStorageBus_SATA: { int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(storageSlot.bus); if (storageSlot.port < 0 || storageSlot.port > iMaxPort) { AssertMsgFailed(("No text for bus=%d & port=%d", storageSlot.bus, storageSlot.port)); break; } if (storageSlot.device != 0) { AssertMsgFailed(("No text for bus=%d & port=%d & device=%d", storageSlot.bus, storageSlot.port, storageSlot.device)); break; } strResult = QApplication::translate("VBoxGlobal", "SATA Port %1", "StorageSlot").arg(storageSlot.port); break; } case KStorageBus_SCSI: { int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(storageSlot.bus); if (storageSlot.port < 0 || storageSlot.port > iMaxPort) { AssertMsgFailed(("No text for bus=%d & port=%d", storageSlot.bus, storageSlot.port)); break; } if (storageSlot.device != 0) { AssertMsgFailed(("No text for bus=%d & port=%d & device=%d", storageSlot.bus, storageSlot.port, storageSlot.device)); break; } strResult = QApplication::translate("VBoxGlobal", "SCSI Port %1", "StorageSlot").arg(storageSlot.port); break; } case KStorageBus_SAS: { int iMaxPort = vboxGlobal().virtualBox().GetSystemProperties().GetMaxPortCountForStorageBus(storageSlot.bus); if (storageSlot.port < 0 || storageSlot.port > iMaxPort) { AssertMsgFailed(("No text for bus=%d & port=%d", storageSlot.bus, storageSlot.port)); break; } if (storageSlot.device != 0) { AssertMsgFailed(("No text for bus=%d & port=%d & device=%d", storageSlot.bus, storageSlot.port, storageSlot.device)); break; } strResult = QApplication::translate("VBoxGlobal", "SAS Port %1", "StorageSlot").arg(storageSlot.port); break; } case KStorageBus_Floppy: { int iMaxDevice = vboxGlobal().virtualBox().GetSystemProperties().GetMaxDevicesPerPortForStorageBus(storageSlot.bus); if (storageSlot.port != 0) { AssertMsgFailed(("No text for bus=%d & port=%d", storageSlot.bus, storageSlot.port)); break; } if (storageSlot.device < 0 || storageSlot.device > iMaxDevice) { AssertMsgFailed(("No text for bus=%d & port=%d & device=%d", storageSlot.bus, storageSlot.port, storageSlot.device)); break; } strResult = QApplication::translate("VBoxGlobal", "Floppy Device %1", "StorageSlot").arg(storageSlot.device); break; } default: { AssertMsgFailed(("No text for bus=%d & port=%d & device=%d", storageSlot.bus, storageSlot.port, storageSlot.device)); break; } } return strResult; }
DECL_FORCE_INLINE(int) rtSemRWRequestWrite(RTSEMRW hRWSem, RTMSINTERVAL cMillies, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMRWINTERNAL *pThis = hRWSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMRW_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); /* * Recursion? */ pthread_t Self = pthread_self(); pthread_t Writer; ATOMIC_GET_PTHREAD_T(&pThis->Writer, &Writer); if (Writer == Self) { #ifdef RTSEMRW_STRICT int rc9 = RTLockValidatorRecExclRecursion(&pThis->ValidatorWrite, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif Assert(pThis->cWrites < INT32_MAX); pThis->cWrites++; return VINF_SUCCESS; } /* * Try lock it. */ RTTHREAD hThreadSelf = NIL_RTTHREAD; if (cMillies) { #ifdef RTSEMRW_STRICT hThreadSelf = RTThreadSelfAutoAdopt(); int rc9 = RTLockValidatorRecExclCheckOrderAndBlocking(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true, cMillies, RTTHREADSTATE_RW_WRITE, true); if (RT_FAILURE(rc9)) return rc9; #else hThreadSelf = RTThreadSelf(); RTThreadBlocking(hThreadSelf, RTTHREADSTATE_RW_WRITE, true); #endif } if (cMillies == RT_INDEFINITE_WAIT) { /* take rwlock */ int rc = pthread_rwlock_wrlock(&pThis->RWLock); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); if (rc) { AssertMsgFailed(("Failed write lock read-write sem %p, rc=%d.\n", hRWSem, rc)); return RTErrConvertFromErrno(rc); } } else { #ifdef RT_OS_DARWIN AssertMsgFailed(("Not implemented on Darwin yet because of incomplete pthreads API.")); return VERR_NOT_IMPLEMENTED; #else /* !RT_OS_DARWIN */ /* * Get current time and calc end of wait time. */ struct timespec ts = {0,0}; clock_gettime(CLOCK_REALTIME, &ts); if (cMillies != 0) { ts.tv_nsec += (cMillies % 1000) * 1000000; ts.tv_sec += cMillies / 1000; if (ts.tv_nsec >= 1000000000) { ts.tv_nsec -= 1000000000; ts.tv_sec++; } } /* take rwlock */ int rc = pthread_rwlock_timedwrlock(&pThis->RWLock, &ts); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_RW_WRITE); if (rc) { AssertMsg(rc == ETIMEDOUT, ("Failed read lock read-write sem %p, rc=%d.\n", hRWSem, rc)); return RTErrConvertFromErrno(rc); } #endif /* !RT_OS_DARWIN */ } ATOMIC_SET_PTHREAD_T(&pThis->Writer, Self); pThis->cWrites = 1; Assert(!pThis->cReaders); #ifdef RTSEMRW_STRICT RTLockValidatorRecExclSetOwner(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, true); #endif return VINF_SUCCESS; }