/** * Creates a new USB device and adds it to the list. * * @returns VBox status code. * @param pDev Pointer to the USB/IP exported device structure to take * the information for the new device from. */ int USBProxyBackendUsbIp::addDeviceToList(PUsbIpExportedDevice pDev) { PUSBDEVICE pNew = (PUSBDEVICE)RTMemAllocZ(sizeof(USBDEVICE)); if (!pNew) return VERR_NO_MEMORY; pNew->pszManufacturer = RTStrDup(""); pNew->pszProduct = RTStrDup(""); pNew->pszSerialNumber = NULL; pNew->pszBackend = RTStrDup("usbip"); /* Make sure the Bus id is 0 terminated. */ pDev->szBusId[31] = '\0'; RTStrAPrintf((char **)&pNew->pszAddress, "usbip://%s:%u:%s", m->pszHost, m->uPort, &pDev->szBusId[0]); pNew->idVendor = pDev->u16VendorId; pNew->idProduct = pDev->u16ProductId; pNew->bcdDevice = pDev->u16BcdDevice; pNew->bDeviceClass = pDev->bDeviceClass; pNew->bDeviceSubClass = pDev->bDeviceSubClass; pNew->bDeviceProtocol = pDev->bDeviceProtocol; pNew->bNumConfigurations = pDev->bNumConfigurations; pNew->enmState = USBDEVICESTATE_USED_BY_HOST_CAPTURABLE; pNew->u64SerialHash = 0; pNew->bBus = (uint8_t)pDev->u32BusNum; pNew->bPort = (uint8_t)pDev->u32DevNum; switch (pDev->u32Speed) { case USBIP_SPEED_LOW: pNew->enmSpeed = USBDEVICESPEED_LOW; pNew->bcdUSB = 1 << 8; break; case USBIP_SPEED_FULL: pNew->enmSpeed = USBDEVICESPEED_FULL; pNew->bcdUSB = 1 << 8; break; case USBIP_SPEED_HIGH: pNew->enmSpeed = USBDEVICESPEED_HIGH; pNew->bcdUSB = 2 << 8; break; case USBIP_SPEED_WIRELESS: pNew->enmSpeed = USBDEVICESPEED_VARIABLE; pNew->bcdUSB = 1 << 8; break; case USBIP_SPEED_SUPER: pNew->enmSpeed = USBDEVICESPEED_SUPER; pNew->bcdUSB = 3 << 8; break; case USBIP_SPEED_UNKNOWN: default: pNew->bcdUSB = 1 << 8; pNew->enmSpeed = USBDEVICESPEED_UNKNOWN; } /* link it */ pNew->pNext = NULL; pNew->pPrev = *m->ppNext; *m->ppNext = pNew; m->ppNext = &pNew->pNext; m->cDevicesCur++; return VINF_SUCCESS; }
int NetIfList(std::list <ComObjPtr<HostNetworkInterface> > &list) { int rc = VINF_SUCCESS; size_t cbNeeded; char *pBuf, *pNext; int aiMib[6]; unsigned short u16DefaultIface = 0; /* shut up gcc. */ bool fDefaultIfaceExistent = true; /* Get the index of the interface associated with default route. */ rc = getDefaultIfaceIndex(&u16DefaultIface, PF_INET); if (RT_FAILURE(rc)) { fDefaultIfaceExistent = false; rc = VINF_SUCCESS; } aiMib[0] = CTL_NET; aiMib[1] = PF_ROUTE; aiMib[2] = 0; aiMib[3] = 0; /* address family */ aiMib[4] = NET_RT_IFLIST; aiMib[5] = 0; if (sysctl(aiMib, 6, NULL, &cbNeeded, NULL, 0) < 0) { Log(("NetIfList: Failed to get estimate for list size (errno=%d).\n", errno)); return RTErrConvertFromErrno(errno); } if ((pBuf = (char*)malloc(cbNeeded)) == NULL) return VERR_NO_MEMORY; if (sysctl(aiMib, 6, pBuf, &cbNeeded, NULL, 0) < 0) { free(pBuf); Log(("NetIfList: Failed to retrieve interface table (errno=%d).\n", errno)); return RTErrConvertFromErrno(errno); } int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); if (sock < 0) { free(pBuf); Log(("NetIfList: socket() -> %d\n", errno)); return RTErrConvertFromErrno(errno); } char *pEnd = pBuf + cbNeeded; for (pNext = pBuf; pNext < pEnd;) { struct if_msghdr *pIfMsg = (struct if_msghdr *)pNext; if (pIfMsg->ifm_type != RTM_IFINFO) { Log(("NetIfList: Got message %u while expecting %u.\n", pIfMsg->ifm_type, RTM_IFINFO)); rc = VERR_INTERNAL_ERROR; break; } struct sockaddr_dl *pSdl = (struct sockaddr_dl *)(pIfMsg + 1); size_t cbNameLen = pSdl->sdl_nlen + 1; PNETIFINFO pNew = (PNETIFINFO)RTMemAllocZ(RT_OFFSETOF(NETIFINFO, szName[cbNameLen])); if (!pNew) { rc = VERR_NO_MEMORY; break; } memcpy(pNew->MACAddress.au8, LLADDR(pSdl), sizeof(pNew->MACAddress.au8)); pNew->enmMediumType = NETIF_T_ETHERNET; Assert(sizeof(pNew->szShortName) >= cbNameLen); strlcpy(pNew->szShortName, pSdl->sdl_data, cbNameLen); strlcpy(pNew->szName, pSdl->sdl_data, cbNameLen); /* Generate UUID from name and MAC address. */ RTUUID uuid; RTUuidClear(&uuid); memcpy(&uuid, pNew->szShortName, RT_MIN(cbNameLen, sizeof(uuid))); uuid.Gen.u8ClockSeqHiAndReserved = (uuid.Gen.u8ClockSeqHiAndReserved & 0x3f) | 0x80; uuid.Gen.u16TimeHiAndVersion = (uuid.Gen.u16TimeHiAndVersion & 0x0fff) | 0x4000; memcpy(uuid.Gen.au8Node, pNew->MACAddress.au8, sizeof(uuid.Gen.au8Node)); pNew->Uuid = uuid; pNext += pIfMsg->ifm_msglen; while (pNext < pEnd) { struct ifa_msghdr *pIfAddrMsg = (struct ifa_msghdr *)pNext; if (pIfAddrMsg->ifam_type != RTM_NEWADDR) break; extractAddressesToNetInfo(pIfAddrMsg->ifam_addrs, (char *)(pIfAddrMsg + 1), pIfAddrMsg->ifam_msglen + (char *)pIfAddrMsg, pNew); pNext += pIfAddrMsg->ifam_msglen; } if (pSdl->sdl_type == IFT_ETHER || pSdl->sdl_type == IFT_L2VLAN) { struct ifreq IfReq; RTStrCopy(IfReq.ifr_name, sizeof(IfReq.ifr_name), pNew->szShortName); if (ioctl(sock, SIOCGIFFLAGS, &IfReq) < 0) { Log(("NetIfList: ioctl(SIOCGIFFLAGS) -> %d\n", errno)); pNew->enmStatus = NETIF_S_UNKNOWN; } else pNew->enmStatus = (IfReq.ifr_flags & IFF_UP) ? NETIF_S_UP : NETIF_S_DOWN; HostNetworkInterfaceType_T enmType; if (strncmp(pNew->szName, RT_STR_TUPLE("vboxnet"))) enmType = HostNetworkInterfaceType_Bridged; else enmType = HostNetworkInterfaceType_HostOnly; ComObjPtr<HostNetworkInterface> IfObj; IfObj.createObject(); if (SUCCEEDED(IfObj->init(Bstr(pNew->szName), enmType, pNew))) { /* Make sure the default interface gets to the beginning. */ if ( fDefaultIfaceExistent && pIfMsg->ifm_index == u16DefaultIface) list.push_front(IfObj); else list.push_back(IfObj); } } RTMemFree(pNew); } close(sock); free(pBuf); return rc; }
/** * Allocate a new symbol structure. * * @returns Pointer to a new structure on success, NULL on failure. */ RTDECL(PRTDBGSYMBOL) RTDbgSymbolAlloc(void) { return (PRTDBGSYMBOL)RTMemAllocZ(sizeof(RTDBGSYMBOL)); }
/** @copydoc VBOXHDDBACKEND::pfnCreate */ static int rawCreate(const char *pszFilename, uint64_t cbSize, unsigned uImageFlags, const char *pszComment, PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid, unsigned uOpenFlags, unsigned uPercentStart, unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation, VDTYPE enmType, void **ppBackendData) { LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p enmType=%u ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, enmType, ppBackendData)); int rc; PRAWIMAGE pImage; PFNVDPROGRESS pfnProgress = NULL; void *pvUser = NULL; PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); if (pIfProgress) { pfnProgress = pIfProgress->pfnProgress; pvUser = pIfProgress->Core.pvUser; } /* Check the VD container type. Yes, hard disk must be allowed, otherwise * various tools using this backend for hard disk images will fail. */ if (enmType != VDTYPE_HDD && enmType != VDTYPE_DVD && enmType != VDTYPE_FLOPPY) { rc = VERR_VD_INVALID_TYPE; goto out; } /* Check open flags. All valid flags are supported. */ if (uOpenFlags & ~VD_OPEN_FLAGS_MASK) { rc = VERR_INVALID_PARAMETER; goto out; } /* Check remaining arguments. */ if ( !VALID_PTR(pszFilename) || !*pszFilename || !VALID_PTR(pPCHSGeometry) || !VALID_PTR(pLCHSGeometry)) { rc = VERR_INVALID_PARAMETER; goto out; } pImage = (PRAWIMAGE)RTMemAllocZ(sizeof(RAWIMAGE)); if (!pImage) { rc = VERR_NO_MEMORY; goto out; } pImage->pszFilename = pszFilename; pImage->pStorage = NULL; pImage->pVDIfsDisk = pVDIfsDisk; pImage->pVDIfsImage = pVDIfsImage; rc = rawCreateImage(pImage, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, uOpenFlags, pfnProgress, pvUser, uPercentStart, uPercentSpan); if (RT_SUCCESS(rc)) { /* So far the image is opened in read/write mode. Make sure the * image is opened in read-only mode if the caller requested that. */ if (uOpenFlags & VD_OPEN_FLAGS_READONLY) { rawFreeImage(pImage, false); rc = rawOpenImage(pImage, uOpenFlags); if (RT_FAILURE(rc)) { RTMemFree(pImage); goto out; } } *ppBackendData = pImage; } else RTMemFree(pImage); out: LogFlowFunc(("returns %Rrc (pBackendData=%#p)\n", rc, *ppBackendData)); return rc; }
static int solarisWalkDeviceNode(di_node_t Node, void *pvArg) { PUSBDEVICELIST pList = (PUSBDEVICELIST)pvArg; AssertPtrReturn(pList, DI_WALK_TERMINATE); /* * Check if it's a USB device in the first place. */ bool fUSBDevice = false; char *pszCompatNames = NULL; int cCompatNames = di_compatible_names(Node, &pszCompatNames); for (int i = 0; i < cCompatNames; i++, pszCompatNames += strlen(pszCompatNames) + 1) if (!strncmp(pszCompatNames, RT_STR_TUPLE("usb"))) { fUSBDevice = true; break; } if (!fUSBDevice) return DI_WALK_CONTINUE; /* * Check if it's a device node or interface. */ int *pInt = NULL; char *pStr = NULL; int rc = DI_WALK_CONTINUE; if (di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "interface", &pInt) < 0) { /* It's a device node. */ char *pszDevicePath = di_devfs_path(Node); PUSBDEVICE pCur = (PUSBDEVICE)RTMemAllocZ(sizeof(*pCur)); if (!pCur) { LogRel(("USBService: failed to allocate %d bytes for PUSBDEVICE.\n", sizeof(*pCur))); return DI_WALK_TERMINATE; } bool fValidDevice = false; do { AssertBreak(pszDevicePath); char *pszDriverName = di_driver_name(Node); /* * Skip hubs */ if ( pszDriverName && !strcmp(pszDriverName, "hubd")) { break; } /* * Mandatory. * snv_85 and above have usb-dev-descriptor node properties, but older one's do not. * So if we cannot obtain the entire device descriptor, we try falling back to the * individual properties (those must not fail, if it does we drop the device). */ uchar_t *pDevData = NULL; int cbProp = di_prop_lookup_bytes(DDI_DEV_T_ANY, Node, "usb-dev-descriptor", &pDevData); if ( cbProp > 0 && pDevData) { usb_dev_descr_t *pDeviceDescriptor = (usb_dev_descr_t *)pDevData; pCur->bDeviceClass = pDeviceDescriptor->bDeviceClass; pCur->bDeviceSubClass = pDeviceDescriptor->bDeviceSubClass; pCur->bDeviceProtocol = pDeviceDescriptor->bDeviceProtocol; pCur->idVendor = pDeviceDescriptor->idVendor; pCur->idProduct = pDeviceDescriptor->idProduct; pCur->bcdDevice = pDeviceDescriptor->bcdDevice; pCur->bcdUSB = pDeviceDescriptor->bcdUSB; pCur->bNumConfigurations = pDeviceDescriptor->bNumConfigurations; pCur->fPartialDescriptor = false; } else { AssertBreak(di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "usb-vendor-id", &pInt) > 0); pCur->idVendor = (uint16_t)*pInt; AssertBreak(di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "usb-product-id", &pInt) > 0); pCur->idProduct = (uint16_t)*pInt; AssertBreak(di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "usb-revision-id", &pInt) > 0); pCur->bcdDevice = (uint16_t)*pInt; AssertBreak(di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "usb-release", &pInt) > 0); pCur->bcdUSB = (uint16_t)*pInt; pCur->fPartialDescriptor = true; } char *pszPortAddr = di_bus_addr(Node); if (pszPortAddr) pCur->bPort = RTStrToUInt8(pszPortAddr); /* Bus & Port are mixed up (kernel driver/userland) */ else pCur->bPort = 0; char szBuf[PATH_MAX + 48]; RTStrPrintf(szBuf, sizeof(szBuf), "%#x:%#x:%d:%s", pCur->idVendor, pCur->idProduct, pCur->bcdDevice, pszDevicePath); pCur->pszAddress = RTStrDup(szBuf); AssertBreak(pCur->pszAddress); pCur->pszDevicePath = RTStrDup(pszDevicePath); AssertBreak(pCur->pszDevicePath); pCur->pszBackend = RTStrDup("host"); AssertBreak(pCur->pszBackend); /* * Optional (some devices don't have all these) */ char *pszCopy; if (di_prop_lookup_strings(DDI_DEV_T_ANY, Node, "usb-product-name", &pStr) > 0) { pCur->pszProduct = pszCopy = RTStrDup(pStr); USBLibPurgeEncoding(pszCopy); } if (di_prop_lookup_strings(DDI_DEV_T_ANY, Node, "usb-vendor-name", &pStr) > 0) { pCur->pszManufacturer = pszCopy = RTStrDup(pStr); USBLibPurgeEncoding(pszCopy); } if (di_prop_lookup_strings(DDI_DEV_T_ANY, Node, "usb-serialno", &pStr) > 0) { pCur->pszSerialNumber = pszCopy = RTStrDup(pStr); USBLibPurgeEncoding(pszCopy); } if (pCur->bcdUSB == 0x300) pCur->enmSpeed = USBDEVICESPEED_SUPER; else if (di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "low-speed", &pInt) >= 0) pCur->enmSpeed = USBDEVICESPEED_LOW; else if (di_prop_lookup_ints(DDI_DEV_T_ANY, Node, "high-speed", &pInt) >= 0) pCur->enmSpeed = USBDEVICESPEED_HIGH; else pCur->enmSpeed = USBDEVICESPEED_FULL; /* Determine state of the USB device. */ pCur->enmState = solarisDetermineUSBDeviceState(pCur, Node); /* * Valid device, add it to the list. */ fValidDevice = true; pCur->pPrev = pList->pTail; if (pList->pTail) pList->pTail = pList->pTail->pNext = pCur; else pList->pTail = pList->pHead = pCur; rc = DI_WALK_CONTINUE; } while (0); di_devfs_path_free(pszDevicePath); if (!fValidDevice) solarisFreeUSBDevice(pCur); } return rc; }
HRESULT VBoxDispD3DGlobal2DFormatsInit(PVBOXWDDMDISP_ADAPTER pAdapter) { HRESULT hr = S_OK; memset(&pAdapter->D3D, 0, sizeof (pAdapter->D3D)); memset(&pAdapter->Formats, 0, sizeof (pAdapter->Formats)); /* just calc the max number of formats */ uint32_t cFormats = RT_ELEMENTS(gVBoxFormatOpsBase); uint32_t cSurfDescs = RT_ELEMENTS(gVBoxSurfDescsBase); uint32_t cOverlayFormats = 0; for (uint32_t i = 0; i < pAdapter->cHeads; ++i) { VBOXDISPVHWA_INFO *pVhwa = &pAdapter->aHeads[i].Vhwa; if (pVhwa->Settings.fFlags & VBOXVHWA_F_ENABLED) { cOverlayFormats += pVhwa->Settings.cFormats; } } cFormats += cOverlayFormats; cSurfDescs += cOverlayFormats; uint32_t cbFormatOps = cFormats * sizeof (FORMATOP); cbFormatOps = (cbFormatOps + 7) & ~3; /* ensure the surf descs are 8 byte aligned */ uint32_t offSurfDescs = (cbFormatOps + 7) & ~3; uint32_t cbSurfDescs = cSurfDescs * sizeof (DDSURFACEDESC); uint32_t cbBuf = offSurfDescs + cbSurfDescs; uint8_t* pvBuf = (uint8_t*)RTMemAllocZ(cbBuf); if (pvBuf) { pAdapter->Formats.paFormstOps = (FORMATOP*)pvBuf; memcpy ((void*)pAdapter->Formats.paFormstOps , gVBoxFormatOpsBase, sizeof (gVBoxFormatOpsBase)); pAdapter->Formats.cFormstOps = RT_ELEMENTS(gVBoxFormatOpsBase); FORMATOP fo = {D3DDDIFMT_UNKNOWN, 0, 0, 0, 0}; for (uint32_t i = 0; i < pAdapter->cHeads; ++i) { VBOXDISPVHWA_INFO *pVhwa = &pAdapter->aHeads[i].Vhwa; if (pVhwa->Settings.fFlags & VBOXVHWA_F_ENABLED) { for (uint32_t j = 0; j < pVhwa->Settings.cFormats; ++j) { fo.Format = pVhwa->Settings.aFormats[j]; fo.Operations = FORMATOP_OVERLAY; hr = vboxFormatOpsMerge((FORMATOP *)pAdapter->Formats.paFormstOps, &pAdapter->Formats.cFormstOps, cFormats, &fo); if (FAILED(hr)) { WARN(("vboxFormatOpsMerge failed, hr 0x%x", hr)); } } } } pAdapter->Formats.paSurfDescs = (DDSURFACEDESC*)(pvBuf + offSurfDescs); memcpy ((void*)pAdapter->Formats.paSurfDescs , gVBoxSurfDescsBase, sizeof (gVBoxSurfDescsBase)); pAdapter->Formats.cSurfDescs = RT_ELEMENTS(gVBoxSurfDescsBase); DDSURFACEDESC sd; for (uint32_t i = 0; i < pAdapter->cHeads; ++i) { VBOXDISPVHWA_INFO *pVhwa = &pAdapter->aHeads[i].Vhwa; if (pVhwa->Settings.fFlags & VBOXVHWA_F_ENABLED) { for (uint32_t j = 0; j < pVhwa->Settings.cFormats; ++j) { uint32_t fourcc = vboxWddmFormatToFourcc(pVhwa->Settings.aFormats[j]); if (fourcc) { vboxVhwaPopulateOverlayFourccSurfDesc(&sd, fourcc); hr = vboxSurfDescMerge((DDSURFACEDESC *)pAdapter->Formats.paSurfDescs, &pAdapter->Formats.cSurfDescs, cSurfDescs, &sd); if (FAILED(hr)) { WARN(("vboxFormatOpsMerge failed, hr 0x%x", hr)); } } } } } } else { WARN(("RTMemAllocZ failed")); return E_FAIL; } return S_OK; }
/** * Helper function to create XNU VFS vnode object. * * @param mp Mount data structure * @param type vnode type (directory, regular file, etc) * @param pParent Parent vnode object (NULL for VBoxVFS root vnode) * @param fIsRoot Flag that indicates if created vnode object is * VBoxVFS root vnode (TRUE for VBoxVFS root vnode, FALSE * for all aother vnodes) * @param Path within Shared Folder * @param ret Returned newly created vnode * * @return 0 on success, error code otherwise */ int vboxvfs_create_vnode_internal(struct mount *mp, enum vtype type, vnode_t pParent, int fIsRoot, PSHFLSTRING Path, vnode_t *ret) { int rc; vnode_t vnode; vboxvfs_vnode_t *pVnodeData; vboxvfs_mount_t *pMount; AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); AssertReturn(pMount->pLockGroup, EINVAL); AssertReturn(Path, EINVAL); pVnodeData = (vboxvfs_vnode_t *)RTMemAllocZ(sizeof(vboxvfs_vnode_t)); AssertReturn(pVnodeData, ENOMEM); /* Initialize private data */ pVnodeData->pHandle = SHFL_HANDLE_NIL; pVnodeData->pPath = Path; pVnodeData->pLockAttr = lck_attr_alloc_init(); if (pVnodeData->pLockAttr) { pVnodeData->pLock = lck_rw_alloc_init(pMount->pLockGroup, pVnodeData->pLockAttr); if (pVnodeData->pLock) { struct vnode_fsparam vnode_params; vnode_params.vnfs_mp = mp; vnode_params.vnfs_vtype = type; vnode_params.vnfs_str = NULL; vnode_params.vnfs_dvp = pParent; vnode_params.vnfs_fsnode = pVnodeData; /** Private data attached per xnu's vnode object */ vnode_params.vnfs_vops = g_VBoxVFSVnodeDirOpsVector; vnode_params.vnfs_markroot = fIsRoot; vnode_params.vnfs_marksystem = FALSE; vnode_params.vnfs_rdev = 0; vnode_params.vnfs_filesize = 0; vnode_params.vnfs_cnp = NULL; vnode_params.vnfs_flags = VNFS_ADDFSREF | VNFS_NOCACHE; rc = vnode_create(VNCREATE_FLAVOR, sizeof(vnode_params), &vnode_params, &vnode); if (rc == 0) *ret = vnode; return 0; } else { PDEBUG("Unable to allocate lock"); rc = ENOMEM; } lck_attr_free(pVnodeData->pLockAttr); } else { PDEBUG("Unable to allocate lock attr"); rc = ENOMEM; } return rc; }
/** * Virtio Pci get queue routine. Allocates a PCI queue and DMA resources. * * @param pDevice Pointer to the Virtio device instance. * @param pQueue Where to store the queue. * * @return An allocated Virtio Pci queue, or NULL in case of errors. */ static void *VirtioPciGetQueue(PVIRTIODEVICE pDevice, PVIRTIOQUEUE pQueue) { LogFlowFunc((VIRTIOLOGNAME ":VirtioPciGetQueue pDevice=%p pQueue=%p\n", pDevice, pQueue)); AssertReturn(pDevice, NULL); virtio_pci_t *pPci = pDevice->pvHyper; AssertReturn(pPci, NULL); /* * Select a Queue. */ ddi_put16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_SEL), pQueue->QueueIndex); /* * Get the currently selected Queue's size. */ pQueue->Ring.cDesc = ddi_get16(pPci->hIO, (uint16_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_NUM)); if (RT_UNLIKELY(!pQueue->Ring.cDesc)) { LogRel((VIRTIOLOGNAME ": VirtioPciGetQueue: Queue[%d] has no descriptors.\n", pQueue->QueueIndex)); return NULL; } /* * Check if it's already active. */ uint32_t QueuePFN = ddi_get32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN)); if (QueuePFN != 0) { LogRel((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d] is already used.\n", pQueue->QueueIndex)); return NULL; } LogFlow(("Queue[%d] has %d slots.\n", pQueue->QueueIndex, pQueue->Ring.cDesc)); /* * Allocate and initialize Pci queue data. */ virtio_pci_queue_t *pPciQueue = RTMemAllocZ(sizeof(virtio_pci_queue_t)); if (pPciQueue) { /* * Setup DMA. */ size_t cbQueue = VirtioRingSize(pQueue->Ring.cDesc, VIRTIO_PCI_RING_ALIGN); int rc = ddi_dma_alloc_handle(pDevice->pDip, &g_VirtioPciDmaAttrRing, DDI_DMA_SLEEP, 0 /* addr */, &pPciQueue->hDMA); if (rc == DDI_SUCCESS) { rc = ddi_dma_mem_alloc(pPciQueue->hDMA, cbQueue, &g_VirtioPciAccAttrRing, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0 /* addr */, &pQueue->pQueue, &pPciQueue->cbBuf, &pPciQueue->hIO); if (rc == DDI_SUCCESS) { AssertRelease(pPciQueue->cbBuf >= cbQueue); ddi_dma_cookie_t DmaCookie; uint_t cCookies; rc = ddi_dma_addr_bind_handle(pPciQueue->hDMA, NULL /* addrspace */, pQueue->pQueue, pPciQueue->cbBuf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0 /* addr */, &DmaCookie, &cCookies); if (rc == DDI_SUCCESS) { pPciQueue->physBuf = DmaCookie.dmac_laddress; pPciQueue->pageBuf = pPciQueue->physBuf >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; LogFlow((VIRTIOLOGNAME ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %#x\n", pQueue->QueueIndex, pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf)); cmn_err(CE_NOTE, ":VirtioPciGetQueue: Queue[%d]%p physBuf=%x pfn of Buf %x\n", pQueue->QueueIndex, pQueue->pQueue, pPciQueue->physBuf, pPciQueue->pageBuf); /* * Activate the queue and initialize a ring for the queue. */ memset(pQueue->pQueue, 0, pPciQueue->cbBuf); ddi_put32(pPci->hIO, (uint32_t *)(pPci->addrIOBase + VIRTIO_PCI_QUEUE_PFN), pPciQueue->pageBuf); VirtioRingInit(pQueue, pQueue->Ring.cDesc, pQueue->pQueue, VIRTIO_PCI_RING_ALIGN); return pPciQueue; } else
static int parallelsOpenImage(PPARALLELSIMAGE pImage, unsigned uOpenFlags) { int rc = VINF_SUCCESS; ParallelsHeader parallelsHeader; pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk); pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); rc = vdIfIoIntFileOpen(pImage->pIfIo, pImage->pszFilename, VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */), &pImage->pStorage); if (RT_FAILURE(rc)) goto out; rc = vdIfIoIntFileGetSize(pImage->pIfIo, pImage->pStorage, &pImage->cbFileCurrent); if (RT_FAILURE(rc)) goto out; AssertMsg(pImage->cbFileCurrent % 512 == 0, ("File size is not a multiple of 512\n")); rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage, 0, ¶llelsHeader, sizeof(parallelsHeader), NULL); if (RT_FAILURE(rc)) goto out; if (memcmp(parallelsHeader.HeaderIdentifier, PARALLELS_HEADER_MAGIC, 16)) { /* Check if the file has hdd as extension. It is a fixed size raw image then. */ char *pszExtension = RTPathExt(pImage->pszFilename); if (strcmp(pszExtension, ".hdd")) { rc = VERR_VD_PARALLELS_INVALID_HEADER; goto out; } /* This is a fixed size image. */ pImage->uImageFlags |= VD_IMAGE_FLAGS_FIXED; pImage->cbSize = pImage->cbFileCurrent; pImage->PCHSGeometry.cHeads = 16; pImage->PCHSGeometry.cSectors = 63; uint64_t cCylinders = pImage->cbSize / (512 * pImage->PCHSGeometry.cSectors * pImage->PCHSGeometry.cHeads); pImage->PCHSGeometry.cCylinders = (uint32_t)cCylinders; } else { if (parallelsHeader.uVersion != PARALLELS_DISK_VERSION) { rc = VERR_NOT_SUPPORTED; goto out; } if (parallelsHeader.cEntriesInAllocationBitmap > (1 << 30)) { rc = VERR_NOT_SUPPORTED; goto out; } Log(("cSectors=%u\n", parallelsHeader.cSectors)); pImage->cbSize = ((uint64_t)parallelsHeader.cSectors) * 512; pImage->uImageFlags = VD_IMAGE_FLAGS_NONE; pImage->cAllocationBitmapEntries = parallelsHeader.cEntriesInAllocationBitmap; pImage->pAllocationBitmap = (uint32_t *)RTMemAllocZ((uint32_t)pImage->cAllocationBitmapEntries * sizeof(uint32_t)); if (!pImage->pAllocationBitmap) { rc = VERR_NO_MEMORY; goto out; } rc = vdIfIoIntFileReadSync(pImage->pIfIo, pImage->pStorage, sizeof(ParallelsHeader), pImage->pAllocationBitmap, pImage->cAllocationBitmapEntries * sizeof(uint32_t), NULL); if (RT_FAILURE(rc)) goto out; pImage->PCHSGeometry.cCylinders = parallelsHeader.cCylinders; pImage->PCHSGeometry.cHeads = parallelsHeader.cHeads; pImage->PCHSGeometry.cSectors = parallelsHeader.cSectorsPerTrack; } out: LogFlowFunc(("returns %Rrc\n", rc)); return rc; }
/** * Creates a new instance. * * @returns VBox status code. * @param pGlobals The globals. * @param pszName The instance name. * @param ppDevPort Where to store the pointer to our port interface. */ static int vboxPciNewInstance(PVBOXRAWPCIGLOBALS pGlobals, uint32_t u32HostAddress, uint32_t fFlags, PRAWPCIPERVM pVmCtx, PRAWPCIDEVPORT *ppDevPort, uint32_t *pfDevFlags) { int rc; PVBOXRAWPCIINS pNew = (PVBOXRAWPCIINS)RTMemAllocZ(sizeof(*pNew)); if (!pNew) return VERR_NO_MEMORY; pNew->pGlobals = pGlobals; pNew->hSpinlock = NIL_RTSPINLOCK; pNew->cRefs = 1; pNew->pNext = NULL; pNew->HostPciAddress = u32HostAddress; pNew->pVmCtx = pVmCtx; pNew->DevPort.u32Version = RAWPCIDEVPORT_VERSION; pNew->DevPort.pfnInit = vboxPciDevInit; pNew->DevPort.pfnDeinit = vboxPciDevDeinit; pNew->DevPort.pfnDestroy = vboxPciDevDestroy; pNew->DevPort.pfnGetRegionInfo = vboxPciDevGetRegionInfo; pNew->DevPort.pfnMapRegion = vboxPciDevMapRegion; pNew->DevPort.pfnUnmapRegion = vboxPciDevUnmapRegion; pNew->DevPort.pfnPciCfgRead = vboxPciDevPciCfgRead; pNew->DevPort.pfnPciCfgWrite = vboxPciDevPciCfgWrite; pNew->DevPort.pfnPciCfgRead = vboxPciDevPciCfgRead; pNew->DevPort.pfnPciCfgWrite = vboxPciDevPciCfgWrite; pNew->DevPort.pfnRegisterIrqHandler = vboxPciDevRegisterIrqHandler; pNew->DevPort.pfnUnregisterIrqHandler = vboxPciDevUnregisterIrqHandler; pNew->DevPort.pfnPowerStateChange = vboxPciDevPowerStateChange; pNew->DevPort.u32VersionEnd = RAWPCIDEVPORT_VERSION; rc = RTSpinlockCreate(&pNew->hSpinlock); if (RT_SUCCESS(rc)) { rc = RTSemFastMutexCreate(&pNew->hFastMtx); if (RT_SUCCESS(rc)) { rc = pNew->DevPort.pfnInit(&pNew->DevPort, fFlags); if (RT_SUCCESS(rc)) { *ppDevPort = &pNew->DevPort; pNew->pNext = pGlobals->pInstanceHead; pGlobals->pInstanceHead = pNew; } else { RTSemFastMutexDestroy(pNew->hFastMtx); RTSpinlockDestroy(pNew->hSpinlock); RTMemFree(pNew); } } } return rc; }
/** * Virtio Pci private data allocation routine. * * @param pDevice Pointer to the Virtio device instance. * @return Allocated private data structure which must only be freed by calling * VirtioPciFree(). */ static void *VirtioPciAlloc(PVIRTIODEVICE pDevice) { LogFlowFunc((VIRTIOLOGNAME ":VirtioPciAlloc pDevice=%p\n", pDevice)); virtio_pci_t *pPciData = RTMemAllocZ(sizeof(virtio_pci_t)); return pPciData; }
/** * Initialize the network shaper. * * @returns VBox status code * @param pVM Pointer to the VM. */ int pdmR3NetShaperInit(PVM pVM) { LogFlowFunc((": pVM=%p\n", pVM)); VM_ASSERT_EMT(pVM); PPDMNETSHAPER pNetShaper = NULL; int rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_NET_SHAPER, sizeof(PDMNETSHAPER), (void **)&pNetShaper); if (RT_SUCCESS(rc)) { PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM); PCFGMNODE pCfgNetShaper = CFGMR3GetChild(CFGMR3GetChild(pCfgRoot, "PDM"), "NetworkShaper"); pNetShaper->pVM = pVM; rc = RTCritSectInit(&pNetShaper->cs); if (RT_SUCCESS(rc)) { /* Create all bandwidth groups. */ PCFGMNODE pCfgBwGrp = CFGMR3GetChild(pCfgNetShaper, "BwGroups"); if (pCfgBwGrp) { for (PCFGMNODE pCur = CFGMR3GetFirstChild(pCfgBwGrp); pCur; pCur = CFGMR3GetNextChild(pCur)) { uint64_t cbMax; size_t cchName = CFGMR3GetNameLen(pCur) + 1; char *pszBwGrpId = (char *)RTMemAllocZ(cchName); if (!pszBwGrpId) { rc = VERR_NO_MEMORY; break; } rc = CFGMR3GetName(pCur, pszBwGrpId, cchName); AssertRC(rc); if (RT_SUCCESS(rc)) rc = CFGMR3QueryU64(pCur, "Max", &cbMax); if (RT_SUCCESS(rc)) rc = pdmNsBwGroupCreate(pNetShaper, pszBwGrpId, cbMax); RTMemFree(pszBwGrpId); if (RT_FAILURE(rc)) break; } } if (RT_SUCCESS(rc)) { PUVM pUVM = pVM->pUVM; AssertMsg(!pUVM->pdm.s.pNetShaper, ("Network shaper was already initialized\n")); char szDesc[256]; static unsigned iThread; RTStrPrintf(szDesc, sizeof(szDesc), "PDMNSTXThread-%d", ++iThread); rc = PDMR3ThreadCreate(pVM, &pNetShaper->hTxThread, pNetShaper, pdmR3NsTxThread, pdmR3NsTxWakeUp, 0, RTTHREADTYPE_IO, szDesc); if (RT_SUCCESS(rc)) { pUVM->pdm.s.pNetShaper = pNetShaper; return VINF_SUCCESS; } } RTCritSectDelete(&pNetShaper->cs); } MMR3HeapFree(pNetShaper); } LogFlowFunc((": pVM=%p rc=%Rrc\n", pVM, rc)); return rc; }
/** * Creates the default logger instance for a VBox process. * * @returns Pointer to the logger instance. */ RTDECL(PRTLOGGER) RTLogDefaultInit(void) { /* * Initialize the default logger instance. * Take care to do this once and not recursively. */ static volatile uint32_t fInitializing = 0; PRTLOGGER pLogger; int rc; if (g_pLogger || !ASMAtomicCmpXchgU32(&fInitializing, 1, 0)) return g_pLogger; #ifdef IN_RING3 /* * Assert the group definitions. */ #define ASSERT_LOG_GROUP(grp) ASSERT_LOG_GROUP2(LOG_GROUP_##grp, #grp) #define ASSERT_LOG_GROUP2(def, str) \ do { if (strcmp(g_apszGroups[def], str)) {printf("%s='%s' expects '%s'\n", #def, g_apszGroups[def], str); RTAssertDoPanic(); } } while (0) ASSERT_LOG_GROUP(DEFAULT); ASSERT_LOG_GROUP(CFGM); ASSERT_LOG_GROUP(CPUM); ASSERT_LOG_GROUP(CSAM); ASSERT_LOG_GROUP(DBGC); ASSERT_LOG_GROUP(DBGF); ASSERT_LOG_GROUP(DBGF_INFO); ASSERT_LOG_GROUP(DEV); ASSERT_LOG_GROUP(DEV_ACPI); ASSERT_LOG_GROUP(DEV_APIC); ASSERT_LOG_GROUP(DEV_AUDIO); ASSERT_LOG_GROUP(DEV_FDC); ASSERT_LOG_GROUP(DEV_HPET); ASSERT_LOG_GROUP(DEV_IDE); ASSERT_LOG_GROUP(DEV_KBD); ASSERT_LOG_GROUP(DEV_LPC); ASSERT_LOG_GROUP(DEV_NE2000); ASSERT_LOG_GROUP(DEV_PC); ASSERT_LOG_GROUP(DEV_PC_ARCH); ASSERT_LOG_GROUP(DEV_PC_BIOS); ASSERT_LOG_GROUP(DEV_PCI); ASSERT_LOG_GROUP(DEV_PCNET); ASSERT_LOG_GROUP(DEV_PIC); ASSERT_LOG_GROUP(DEV_PIT); ASSERT_LOG_GROUP(DEV_RTC); ASSERT_LOG_GROUP(DEV_SERIAL); ASSERT_LOG_GROUP(DEV_SMC); ASSERT_LOG_GROUP(DEV_USB); ASSERT_LOG_GROUP(DEV_VGA); ASSERT_LOG_GROUP(DEV_VMM); ASSERT_LOG_GROUP(DEV_VMM_STDERR); ASSERT_LOG_GROUP(DIS); ASSERT_LOG_GROUP(DRV); ASSERT_LOG_GROUP(DRV_ACPI); ASSERT_LOG_GROUP(DRV_BLOCK); ASSERT_LOG_GROUP(DRV_FLOPPY); ASSERT_LOG_GROUP(DRV_HOST_DVD); ASSERT_LOG_GROUP(DRV_HOST_FLOPPY); ASSERT_LOG_GROUP(DRV_ISO); ASSERT_LOG_GROUP(DRV_KBD_QUEUE); ASSERT_LOG_GROUP(DRV_MOUSE_QUEUE); ASSERT_LOG_GROUP(DRV_NAT); ASSERT_LOG_GROUP(DRV_RAW_IMAGE); ASSERT_LOG_GROUP(DRV_TUN); ASSERT_LOG_GROUP(DRV_USBPROXY); ASSERT_LOG_GROUP(DRV_VBOXHDD); ASSERT_LOG_GROUP(DRV_VSWITCH); ASSERT_LOG_GROUP(DRV_VUSB); ASSERT_LOG_GROUP(EM); ASSERT_LOG_GROUP(GUI); ASSERT_LOG_GROUP(HGCM); ASSERT_LOG_GROUP(HWACCM); ASSERT_LOG_GROUP(IOM); ASSERT_LOG_GROUP(MAIN); ASSERT_LOG_GROUP(MM); ASSERT_LOG_GROUP(MM_HEAP); ASSERT_LOG_GROUP(MM_HYPER); ASSERT_LOG_GROUP(MM_HYPER_HEAP); ASSERT_LOG_GROUP(MM_PHYS); ASSERT_LOG_GROUP(MM_POOL); ASSERT_LOG_GROUP(NAT_SERVICE); ASSERT_LOG_GROUP(NET_SERVICE); ASSERT_LOG_GROUP(PATM); ASSERT_LOG_GROUP(PDM); ASSERT_LOG_GROUP(PDM_DEVICE); ASSERT_LOG_GROUP(PDM_DRIVER); ASSERT_LOG_GROUP(PDM_LDR); ASSERT_LOG_GROUP(PDM_QUEUE); ASSERT_LOG_GROUP(PGM); ASSERT_LOG_GROUP(PGM_POOL); ASSERT_LOG_GROUP(REM); ASSERT_LOG_GROUP(REM_DISAS); ASSERT_LOG_GROUP(REM_HANDLER); ASSERT_LOG_GROUP(REM_IOPORT); ASSERT_LOG_GROUP(REM_MMIO); ASSERT_LOG_GROUP(REM_PRINTF); ASSERT_LOG_GROUP(REM_RUN); ASSERT_LOG_GROUP(SELM); ASSERT_LOG_GROUP(SSM); ASSERT_LOG_GROUP(STAM); ASSERT_LOG_GROUP(SUP); ASSERT_LOG_GROUP(TM); ASSERT_LOG_GROUP(TRPM); ASSERT_LOG_GROUP(VM); ASSERT_LOG_GROUP(VMM); ASSERT_LOG_GROUP(VRDP); #undef ASSERT_LOG_GROUP #undef ASSERT_LOG_GROUP2 #endif /* IN_RING3 */ /* * Create the default logging instance. */ #ifdef IN_RING3 # ifndef IN_GUEST char szExecName[RTPATH_MAX]; if (!RTProcGetExecutablePath(szExecName, sizeof(szExecName))) strcpy(szExecName, "VBox"); RTTIMESPEC TimeSpec; RTTIME Time; RTTimeExplode(&Time, RTTimeNow(&TimeSpec)); rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_FILE, "./%04d-%02d-%02d-%02d-%02d-%02d.%03d-%s-%d.log", Time.i32Year, Time.u8Month, Time.u8MonthDay, Time.u8Hour, Time.u8Minute, Time.u8Second, Time.u32Nanosecond / 10000000, RTPathFilename(szExecName), RTProcSelf()); if (RT_SUCCESS(rc)) { /* * Write a log header. */ char szBuf[RTPATH_MAX]; RTTimeSpecToString(&TimeSpec, szBuf, sizeof(szBuf)); RTLogLoggerEx(pLogger, 0, ~0U, "Log created: %s\n", szBuf); RTLogLoggerEx(pLogger, 0, ~0U, "Executable: %s\n", szExecName); /* executable and arguments - tricky and all platform specific. */ # if defined(RT_OS_WINDOWS) RTLogLoggerEx(pLogger, 0, ~0U, "Commandline: %ls\n", GetCommandLineW()); # elif defined(RT_OS_SOLARIS) psinfo_t psi; char szArgFileBuf[80]; RTStrPrintf(szArgFileBuf, sizeof(szArgFileBuf), "/proc/%ld/psinfo", (long)getpid()); FILE* pFile = fopen(szArgFileBuf, "rb"); if (pFile) { if (fread(&psi, sizeof(psi), 1, pFile) == 1) { # if 0 /* 100% safe:*/ RTLogLoggerEx(pLogger, 0, ~0U, "Args: %s\n", psi.pr_psargs); # else /* probably safe: */ const char * const *argv = (const char * const *)psi.pr_argv; for (int iArg = 0; iArg < psi.pr_argc; iArg++) RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%d]: %s\n", iArg, argv[iArg]); # endif } fclose(pFile); } # elif defined(RT_OS_LINUX) FILE *pFile = fopen("/proc/self/cmdline", "r"); if (pFile) { /* braindead */ unsigned iArg = 0; int ch; bool fNew = true; while (!feof(pFile) && (ch = fgetc(pFile)) != EOF) { if (fNew) { RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%u]: ", iArg++); fNew = false; } if (ch) RTLogLoggerEx(pLogger, 0, ~0U, "%c", ch); else { RTLogLoggerEx(pLogger, 0, ~0U, "\n"); fNew = true; } } if (!fNew) RTLogLoggerEx(pLogger, 0, ~0U, "\n"); fclose(pFile); } # elif defined(RT_OS_FREEBSD) /* Retrieve the required length first */ int aiName[4]; aiName[0] = CTL_KERN; aiName[1] = KERN_PROC; aiName[2] = KERN_PROC_ARGS; /* Introduced in FreeBSD 4.0 */ aiName[3] = getpid(); size_t cchArgs = 0; int rcBSD = sysctl(aiName, RT_ELEMENTS(aiName), NULL, &cchArgs, NULL, 0); if (cchArgs > 0) { char *pszArgFileBuf = (char *)RTMemAllocZ(cchArgs + 1 /* Safety */); if (pszArgFileBuf) { /* Retrieve the argument list */ rcBSD = sysctl(aiName, RT_ELEMENTS(aiName), pszArgFileBuf, &cchArgs, NULL, 0); if (!rcBSD) { unsigned iArg = 0; size_t off = 0; while (off < cchArgs) { size_t cchArg = strlen(&pszArgFileBuf[off]); RTLogLoggerEx(pLogger, 0, ~0U, "Arg[%u]: %s\n", iArg, &pszArgFileBuf[off]); /* advance */ off += cchArg + 1; iArg++; } } RTMemFree(pszArgFileBuf); } } # elif defined(RT_OS_L4) || defined(RT_OS_OS2) || defined(RT_OS_DARWIN) /* commandline? */ # else # error needs porting. # endif } # else /* IN_GUEST */ /* The user destination is backdoor logging. */ rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_USER, "VBox.log"); # endif /* IN_GUEST */ #else /* IN_RING0 */ # ifndef IN_GUEST rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_FILE, "VBox-ring0.log"); # else /* IN_GUEST */ rc = RTLogCreate(&pLogger, 0, NULL, "VBOX_LOG", RT_ELEMENTS(g_apszGroups), &g_apszGroups[0], RTLOGDEST_USER, "VBox-ring0.log"); # endif /* IN_GUEST */ if (RT_SUCCESS(rc)) { /* * This is where you set your ring-0 logging preferences. * * On platforms which don't differ between debugger and kernel * log printing, STDOUT is gonna be a stub and the DEBUGGER * destination is the one doing all the work. On platforms * that do differ (like Darwin), STDOUT is the kernel log. */ # if defined(DEBUG_bird) /*RTLogGroupSettings(pLogger, "all=~0 -default.l6.l5.l4.l3");*/ RTLogFlags(pLogger, "enabled unbuffered pid tid"); # ifndef IN_GUEST pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT; # endif # endif # if defined(DEBUG_sandervl) && !defined(IN_GUEST) RTLogGroupSettings(pLogger, "+all"); RTLogFlags(pLogger, "enabled unbuffered"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER; # endif # if defined(DEBUG_ramshankar) /* Guest ring-0 as well */ RTLogGroupSettings(pLogger, "+all.e.l.f"); RTLogFlags(pLogger, "enabled unbuffered"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER; # endif # if defined(DEBUG_aleksey) /* Guest ring-0 as well */ RTLogGroupSettings(pLogger, "+net_adp_drv.e.l.f+net_flt_drv.e.l.l2.l3.l4.l5.f"); RTLogFlags(pLogger, "enabled unbuffered"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT; # endif # if defined(DEBUG_andy) /* Guest ring-0 as well */ RTLogGroupSettings(pLogger, "+all.e.l.f"); RTLogFlags(pLogger, "enabled unbuffered pid tid"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT; # endif # if defined(DEBUG_misha) /* Guest ring-0 as well */ RTLogFlags(pLogger, "enabled unbuffered"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER; # endif # if defined(DEBUG_leo) /* Guest ring-0 as well */ RTLogGroupSettings(pLogger, "+drv_mouse.e.l.f+drv_miniport.e.l.f+drv_display.e.l.f"); RTLogFlags(pLogger, "enabled unbuffered"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER; # endif # if 0 /* vboxdrv logging - ATTENTION: this is what we're referring to guys! Change to '# if 1'. */ RTLogGroupSettings(pLogger, "all=~0 -default.l6.l5.l4.l3"); RTLogFlags(pLogger, "enabled unbuffered tid"); pLogger->fDestFlags |= RTLOGDEST_DEBUGGER | RTLOGDEST_STDOUT; # endif } #endif /* IN_RING0 */ return g_pLogger = RT_SUCCESS(rc) ? pLogger : NULL; }
/** * Inspect all running processes for executables and dlls that might be worth sharing * with other VMs. * */ void VBoxServicePageSharingInspectGuest() { VBoxServicePageSharingInspectModules(GetCurrentProcessId()); printf("\n\nUSER RESULTS\n"); printf("cNotPresentPages = %d\n", cNotPresentPages); printf("cWritablePages = %d\n", cWritablePages); printf("cPrivatePages = %d\n", cPrivatePages); printf("cSharedPages = %d\n", cSharedPages); cNotPresentPages = 0; cWritablePages = 0; cPrivatePages = 0; cSharedPages = 0; /* Check all loaded kernel modules. */ if (ZwQuerySystemInformation) { ULONG cbBuffer = 0; PVOID pBuffer = NULL; PRTL_PROCESS_MODULES pSystemModules; NTSTATUS ret = ZwQuerySystemInformation(SystemModuleInformation, (PVOID)&cbBuffer, 0, &cbBuffer); if (!cbBuffer) { printf("ZwQuerySystemInformation returned length 0\n"); goto skipkernelmodules; } pBuffer = RTMemAllocZ(cbBuffer); if (!pBuffer) goto skipkernelmodules; ret = ZwQuerySystemInformation(SystemModuleInformation, pBuffer, cbBuffer, &cbBuffer); if (ret != 0) { printf("ZwQuerySystemInformation returned %x (1)\n", ret); goto skipkernelmodules; } pSystemModules = (PRTL_PROCESS_MODULES)pBuffer; for (unsigned i = 0; i < pSystemModules->NumberOfModules; i++) { /* User-mode modules seem to have no flags set; skip them as we detected them above. */ if (pSystemModules->Modules[i].Flags == 0) continue; /* New module; register it. */ char szFullFilePath[512]; MODULEENTRY32 ModuleInfo; strcpy(ModuleInfo.szModule, &pSystemModules->Modules[i].FullPathName[pSystemModules->Modules[i].OffsetToFileName]); GetSystemDirectoryA(szFullFilePath, sizeof(szFullFilePath)); /* skip \Systemroot\system32 */ char *lpPath = strchr(&pSystemModules->Modules[i].FullPathName[1], '\\'); if (!lpPath) { printf("Unexpected kernel module name %s\n", pSystemModules->Modules[i].FullPathName); break; } lpPath = strchr(lpPath+1, '\\'); if (!lpPath) { printf("Unexpected kernel module name %s\n", pSystemModules->Modules[i].FullPathName); break; } strcat(szFullFilePath, lpPath); strcpy(ModuleInfo.szExePath, szFullFilePath); ModuleInfo.modBaseAddr = (BYTE *)pSystemModules->Modules[i].ImageBase; ModuleInfo.modBaseSize = pSystemModules->Modules[i].ImageSize; VBoxServicePageSharingCheckModule(&ModuleInfo); } skipkernelmodules: if (pBuffer) RTMemFree(pBuffer); } printf("\n\nKERNEL RESULTS\n"); printf("cNotPresentPages = %d\n", cNotPresentPages); printf("cWritablePages = %d\n", cWritablePages); printf("cPrivatePages = %d\n", cPrivatePages); printf("cSharedPages = %d\n", cSharedPages); }
static int vboxvfs_mount(struct mount *mp, struct thread *td) { int rc; char *pszShare; int cbShare, cbOption; int uid = 0, gid = 0; struct sf_glob_info *pShFlGlobalInfo; SHFLSTRING *pShFlShareName = NULL; int cbShFlShareName; printf("%s: Enter\n", __FUNCTION__); if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS)) return EOPNOTSUPP; if (vfs_filteropt(mp->mnt_optnew, vboxvfs_opts)) { vfs_mount_error(mp, "%s", "Invalid option"); return EINVAL; } rc = vfs_getopt(mp->mnt_optnew, "from", (void **)&pszShare, &cbShare); if (rc || pszShare[cbShare-1] != '\0' || cbShare > 0xfffe) return EINVAL; rc = vfs_getopt(mp->mnt_optnew, "gid", (void **)&gid, &cbOption); if ((rc != ENOENT) && (rc || cbOption != sizeof(gid))) return EINVAL; rc = vfs_getopt(mp->mnt_optnew, "uid", (void **)&uid, &cbOption); if ((rc != ENOENT) && (rc || cbOption != sizeof(uid))) return EINVAL; pShFlGlobalInfo = RTMemAllocZ(sizeof(struct sf_glob_info)); if (!pShFlGlobalInfo) return ENOMEM; cbShFlShareName = offsetof (SHFLSTRING, String.utf8) + cbShare + 1; pShFlShareName = RTMemAllocZ(cbShFlShareName); if (!pShFlShareName) return VERR_NO_MEMORY; pShFlShareName->u16Length = cbShare; pShFlShareName->u16Size = cbShare + 1; memcpy (pShFlShareName->String.utf8, pszShare, cbShare + 1); rc = vboxCallMapFolder (&g_vboxSFClient, pShFlShareName, &pShFlGlobalInfo->map); RTMemFree(pShFlShareName); if (RT_FAILURE (rc)) { RTMemFree(pShFlGlobalInfo); printf("vboxCallMapFolder failed rc=%d\n", rc); return EPROTO; } pShFlGlobalInfo->uid = uid; pShFlGlobalInfo->gid = gid; mp->mnt_data = pShFlGlobalInfo; /* @todo root vnode. */ vfs_getnewfsid(mp); vfs_mountedfrom(mp, pszShare); printf("%s: Leave rc=0\n", __FUNCTION__); return 0; }
/** * Internal: Create a parallels image. */ static int parallelsCreateImage(PPARALLELSIMAGE pImage, uint64_t cbSize, unsigned uImageFlags, const char *pszComment, PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry, unsigned uOpenFlags, PFNVDPROGRESS pfnProgress, void *pvUser, unsigned uPercentStart, unsigned uPercentSpan) { int rc = VINF_SUCCESS; int32_t fOpen; if (uImageFlags & VD_IMAGE_FLAGS_FIXED) { rc = vdIfError(pImage->pIfError, VERR_VD_INVALID_TYPE, RT_SRC_POS, N_("Parallels: cannot create fixed image '%s'. Create a raw image"), pImage->pszFilename); goto out; } pImage->uOpenFlags = uOpenFlags & ~VD_OPEN_FLAGS_READONLY; pImage->uImageFlags = uImageFlags; pImage->PCHSGeometry = *pPCHSGeometry; pImage->LCHSGeometry = *pLCHSGeometry; if (!pImage->PCHSGeometry.cCylinders) { /* Set defaults. */ pImage->PCHSGeometry.cSectors = 63; pImage->PCHSGeometry.cHeads = 16; pImage->PCHSGeometry.cCylinders = pImage->cbSize / (512 * pImage->PCHSGeometry.cSectors * pImage->PCHSGeometry.cHeads); } pImage->pIfError = VDIfErrorGet(pImage->pVDIfsDisk); pImage->pIfIo = VDIfIoIntGet(pImage->pVDIfsImage); AssertPtrReturn(pImage->pIfIo, VERR_INVALID_PARAMETER); /* Create image file. */ fOpen = VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags, true /* fCreate */); rc = vdIfIoIntFileOpen(pImage->pIfIo, pImage->pszFilename, fOpen, &pImage->pStorage); if (RT_FAILURE(rc)) { rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("Parallels: cannot create image '%s'"), pImage->pszFilename); goto out; } if (RT_SUCCESS(rc) && pfnProgress) pfnProgress(pvUser, uPercentStart + uPercentSpan * 98 / 100); /* Setup image state. */ pImage->cbSize = cbSize; pImage->cAllocationBitmapEntries = cbSize / 512 / pImage->PCHSGeometry.cSectors; if (pImage->cAllocationBitmapEntries * pImage->PCHSGeometry.cSectors * 512 < cbSize) pImage->cAllocationBitmapEntries++; pImage->fAllocationBitmapChanged = true; pImage->cbFileCurrent = sizeof(ParallelsHeader) + pImage->cAllocationBitmapEntries * sizeof(uint32_t); /* Round to next sector boundary. */ pImage->cbFileCurrent += 512 - pImage->cbFileCurrent % 512; Assert(!(pImage->cbFileCurrent % 512)); pImage->pAllocationBitmap = (uint32_t *)RTMemAllocZ(pImage->cAllocationBitmapEntries * sizeof(uint32_t)); if (!pImage->pAllocationBitmap) rc = VERR_NO_MEMORY; if (RT_SUCCESS(rc)) { ParallelsHeader Header; memcpy(Header.HeaderIdentifier, PARALLELS_HEADER_MAGIC, sizeof(Header.HeaderIdentifier)); Header.uVersion = RT_H2LE_U32(PARALLELS_DISK_VERSION); Header.cHeads = RT_H2LE_U32(pImage->PCHSGeometry.cHeads); Header.cCylinders = RT_H2LE_U32(pImage->PCHSGeometry.cCylinders); Header.cSectorsPerTrack = RT_H2LE_U32(pImage->PCHSGeometry.cSectors); Header.cEntriesInAllocationBitmap = RT_H2LE_U32(pImage->cAllocationBitmapEntries); Header.cSectors = RT_H2LE_U32(pImage->cbSize / 512); memset(Header.Padding, 0, sizeof(Header.Padding)); /* Write header and allocation bitmap. */ rc = vdIfIoIntFileSetSize(pImage->pIfIo, pImage->pStorage, pImage->cbFileCurrent); if (RT_SUCCESS(rc)) rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage, 0, &Header, sizeof(Header), NULL); if (RT_SUCCESS(rc)) rc = parallelsFlushImage(pImage); /* Writes the allocation bitmap. */ } out: if (RT_SUCCESS(rc) && pfnProgress) pfnProgress(pvUser, uPercentStart + uPercentSpan); if (RT_FAILURE(rc)) parallelsFreeImage(pImage, rc != VERR_ALREADY_EXISTS); return rc; }
DECLCALLBACK(int) vboxUhgsmiD3DBufferCreate(PVBOXUHGSMI pHgsmi, uint32_t cbBuf, VBOXUHGSMI_SYNCHOBJECT_TYPE enmSynchType, HVBOXUHGSMI_SYNCHOBJECT hSynch, PVBOXUHGSMI_BUFFER* ppBuf) { bool bSynchCreated = false; if (!cbBuf) return VERR_INVALID_PARAMETER; int rc = vboxUhgsmiBaseEventChkCreate(enmSynchType, &hSynch, &bSynchCreated); AssertRC(rc); if (RT_FAILURE(rc)) return rc; cbBuf = VBOXWDDM_ROUNDBOUND(cbBuf, 0x1000); Assert(cbBuf); uint32_t cPages = cbBuf >> 12; Assert(cPages); PVBOXUHGSMI_PRIVATE_D3D pPrivate = VBOXUHGSMID3D_GET(pHgsmi); PVBOXUHGSMI_BUFFER_PRIVATE_D3D pBuf = (PVBOXUHGSMI_BUFFER_PRIVATE_D3D)RTMemAllocZ(RT_OFFSETOF(VBOXUHGSMI_BUFFER_PRIVATE_D3D, aLockPageIndices[cPages])); Assert(pBuf); if (pBuf) { struct { D3DDDICB_ALLOCATE DdiAlloc; D3DDDI_ALLOCATIONINFO DdiAllocInfo; VBOXWDDM_ALLOCINFO AllocInfo; } Buf; memset(&Buf, 0, sizeof (Buf)); Buf.DdiAlloc.hResource = NULL; Buf.DdiAlloc.hKMResource = NULL; Buf.DdiAlloc.NumAllocations = 1; Buf.DdiAlloc.pAllocationInfo = &Buf.DdiAllocInfo; Buf.DdiAllocInfo.pPrivateDriverData = &Buf.AllocInfo; Buf.DdiAllocInfo.PrivateDriverDataSize = sizeof (Buf.AllocInfo); Buf.AllocInfo.enmType = VBOXWDDM_ALLOC_TYPE_UMD_HGSMI_BUFFER; Buf.AllocInfo.cbBuffer = cbBuf; Buf.AllocInfo.hSynch = hSynch; Buf.AllocInfo.enmSynchType = enmSynchType; HRESULT hr = pPrivate->pDevice->RtCallbacks.pfnAllocateCb(pPrivate->pDevice->hDevice, &Buf.DdiAlloc); Assert(hr == S_OK); if (hr == S_OK) { Assert(Buf.DdiAllocInfo.hAllocation); pBuf->BasePrivate.Base.pfnLock = vboxUhgsmiD3DBufferLock; pBuf->BasePrivate.Base.pfnUnlock = vboxUhgsmiD3DBufferUnlock; // pBuf->Base.pfnAdjustValidDataRange = vboxUhgsmiD3DBufferAdjustValidDataRange; pBuf->BasePrivate.Base.pfnDestroy = vboxUhgsmiD3DBufferDestroy; pBuf->BasePrivate.Base.hSynch = hSynch; pBuf->BasePrivate.Base.enmSynchType = enmSynchType; pBuf->BasePrivate.Base.cbBuffer = cbBuf; pBuf->BasePrivate.Base.bSynchCreated = bSynchCreated; pBuf->pDevice = pPrivate->pDevice; pBuf->BasePrivate.hAllocation = Buf.DdiAllocInfo.hAllocation; *ppBuf = &pBuf->BasePrivate.Base; return VINF_SUCCESS; } RTMemFree(pBuf); } else rc = VERR_NO_MEMORY; if (bSynchCreated) CloseHandle(hSynch); return rc; }
/** @copydoc VBOXHDDBACKEND::pfnCreate */ static int parallelsCreate(const char *pszFilename, uint64_t cbSize, unsigned uImageFlags, const char *pszComment, PCVDGEOMETRY pPCHSGeometry, PCVDGEOMETRY pLCHSGeometry, PCRTUUID pUuid, unsigned uOpenFlags, unsigned uPercentStart, unsigned uPercentSpan, PVDINTERFACE pVDIfsDisk, PVDINTERFACE pVDIfsImage, PVDINTERFACE pVDIfsOperation, void **ppBackendData) { LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData)); int rc = VINF_SUCCESS; PPARALLELSIMAGE pImage; PFNVDPROGRESS pfnProgress = NULL; void *pvUser = NULL; PVDINTERFACEPROGRESS pIfProgress = VDIfProgressGet(pVDIfsOperation); if (pIfProgress) { pfnProgress = pIfProgress->pfnProgress; pvUser = pIfProgress->Core.pvUser; } /* Check open flags. All valid flags are supported. */ if (uOpenFlags & ~VD_OPEN_FLAGS_MASK) { rc = VERR_INVALID_PARAMETER; goto out; } /* Check remaining arguments. */ if ( !VALID_PTR(pszFilename) || !*pszFilename || !VALID_PTR(pPCHSGeometry) || !VALID_PTR(pLCHSGeometry)) { rc = VERR_INVALID_PARAMETER; goto out; } pImage = (PPARALLELSIMAGE)RTMemAllocZ(sizeof(PARALLELSIMAGE)); if (!pImage) { rc = VERR_NO_MEMORY; goto out; } pImage->pszFilename = pszFilename; pImage->pStorage = NULL; pImage->pVDIfsDisk = pVDIfsDisk; pImage->pVDIfsImage = pVDIfsImage; rc = parallelsCreateImage(pImage, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, uOpenFlags, pfnProgress, pvUser, uPercentStart, uPercentSpan); if (RT_SUCCESS(rc)) { /* So far the image is opened in read/write mode. Make sure the * image is opened in read-only mode if the caller requested that. */ if (uOpenFlags & VD_OPEN_FLAGS_READONLY) { parallelsFreeImage(pImage, false); rc = parallelsOpenImage(pImage, uOpenFlags); if (RT_FAILURE(rc)) { RTMemFree(pImage); goto out; } } *ppBackendData = pImage; } else RTMemFree(pImage); out: LogFlowFunc(("returns %Rrc\n", rc)); return rc; }
/** * Convert guest absolute VFS path (starting from VFS root) to a host path * within mounted shared folder (returning it as a char *). * * @param mp Mount data structure * @param pszGuestPath Guest absolute VFS path (starting from VFS root) * @param cbGuestPath Size of pszGuestPath * @param pszHostPath Returned char * wich contains host path * @param cbHostPath Returned pszHostPath size * * @return 0 on success, error code otherwise */ int vboxvfs_guest_path_to_char_path_internal(mount_t mp, char *pszGuestPath, int cbGuestPath, char **pszHostPath, int *cbHostPath) { vboxvfs_mount_t *pMount; /* Guest side: mount point path buffer and its size */ char *pszMntPointPath; int cbMntPointPath = MAXPATHLEN; /* Host side: path within mounted shared folder and its size */ char *pszHostPathInternal; size_t cbHostPathInternal; int rc; AssertReturn(mp, EINVAL); AssertReturn(pszGuestPath, EINVAL); AssertReturn(cbGuestPath >= 0, EINVAL); AssertReturn(pszHostPath, EINVAL); AssertReturn(cbHostPath, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); AssertReturn(pMount->pRootVnode, EINVAL); /* Get mount point path */ pszMntPointPath = (char *)RTMemAllocZ(cbMntPointPath); if (pszMntPointPath) { rc = vn_getpath(pMount->pRootVnode, pszMntPointPath, &cbMntPointPath); if (rc == 0 && cbGuestPath >= cbMntPointPath) { cbHostPathInternal = cbGuestPath - cbMntPointPath + 1; pszHostPathInternal = (char *)RTMemAllocZ(cbHostPathInternal); if (pszHostPathInternal) { memcpy(pszHostPathInternal, pszGuestPath + cbMntPointPath, cbGuestPath - cbMntPointPath); PDEBUG("guest<->host path converion result: '%s' mounted to '%s'", pszHostPathInternal, pszMntPointPath); RTMemFree(pszMntPointPath); *pszHostPath = pszHostPathInternal; *cbHostPath = cbGuestPath - cbMntPointPath; return 0; } else { PDEBUG("No memory to allocate buffer for guest<->host path conversion (cbHostPathInternal)"); rc = ENOMEM; } } else { PDEBUG("Unable to get guest vnode path: %d", rc); } RTMemFree(pszMntPointPath); } else { PDEBUG("No memory to allocate buffer for guest<->host path conversion (pszMntPointPath)"); rc = ENOMEM; } return rc; }
int NetIfCreateHostOnlyNetworkInterface(VirtualBox *pVBox, IHostNetworkInterface **aHostNetworkInterface, IProgress **aProgress, const char *pcszName) { #if defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) /* create a progress object */ ComObjPtr<Progress> progress; progress.createObject(); ComPtr<IHost> host; HRESULT hrc = pVBox->COMGETTER(Host)(host.asOutParam()); if (SUCCEEDED(hrc)) { hrc = progress->init(pVBox, host, Bstr("Creating host only network interface").raw(), FALSE /* aCancelable */); if (SUCCEEDED(hrc)) { progress.queryInterfaceTo(aProgress); char szAdpCtl[RTPATH_MAX]; int rc = RTPathExecDir(szAdpCtl, sizeof(szAdpCtl) - sizeof("/" VBOXNETADPCTL_NAME " add")); if (RT_FAILURE(rc)) { progress->notifyComplete(E_FAIL, COM_IIDOF(IHostNetworkInterface), HostNetworkInterface::getStaticComponentName(), "Failed to get program path, rc=%Rrc\n", rc); return rc; } strcat(szAdpCtl, "/" VBOXNETADPCTL_NAME " "); if (pcszName && strlen(pcszName) <= RTPATH_MAX - strlen(szAdpCtl) - sizeof(" add")) { strcat(szAdpCtl, pcszName); strcat(szAdpCtl, " add"); } else strcat(szAdpCtl, "add"); if (strlen(szAdpCtl) < RTPATH_MAX - sizeof(" 2>&1")) strcat(szAdpCtl, " 2>&1"); FILE *fp = popen(szAdpCtl, "r"); if (fp) { char szBuf[128]; /* We are not interested in long error messages. */ if (fgets(szBuf, sizeof(szBuf), fp)) { if (!strncmp(VBOXNETADPCTL_NAME ":", szBuf, sizeof(VBOXNETADPCTL_NAME))) { progress->notifyComplete(E_FAIL, COM_IIDOF(IHostNetworkInterface), HostNetworkInterface::getStaticComponentName(), "%s", szBuf); pclose(fp); return E_FAIL; } char *pLast = szBuf + strlen(szBuf) - 1; if (pLast >= szBuf && *pLast == '\n') *pLast = 0; size_t cbNameLen = strlen(szBuf) + 1; PNETIFINFO pInfo = (PNETIFINFO)RTMemAllocZ(RT_OFFSETOF(NETIFINFO, szName[cbNameLen])); if (!pInfo) rc = VERR_NO_MEMORY; else { strcpy(pInfo->szShortName, szBuf); strcpy(pInfo->szName, szBuf); rc = NetIfGetConfigByName(pInfo); if (RT_FAILURE(rc)) { progress->notifyComplete(E_FAIL, COM_IIDOF(IHostNetworkInterface), HostNetworkInterface::getStaticComponentName(), "Failed to get config info for %s (as reported by '" VBOXNETADPCTL_NAME " add')\n", szBuf); } else { Bstr IfName(szBuf); /* create a new uninitialized host interface object */ ComObjPtr<HostNetworkInterface> iface; iface.createObject(); iface->init(IfName, HostNetworkInterfaceType_HostOnly, pInfo); iface->setVirtualBox(pVBox); iface.queryInterfaceTo(aHostNetworkInterface); } RTMemFree(pInfo); } if ((rc = pclose(fp)) != 0) { progress->notifyComplete(E_FAIL, COM_IIDOF(IHostNetworkInterface), HostNetworkInterface::getStaticComponentName(), "Failed to execute '"VBOXNETADPCTL_NAME " add' (exit status: %d)", rc); rc = VERR_INTERNAL_ERROR; } } else { /* Failed to add an interface */ rc = VERR_PERMISSION_DENIED; progress->notifyComplete(E_FAIL, COM_IIDOF(IHostNetworkInterface), HostNetworkInterface::getStaticComponentName(), "Failed to execute '"VBOXNETADPCTL_NAME " add' (exit status: %d). Check permissions!", rc); pclose(fp); } } if (RT_SUCCESS(rc)) progress->notifyComplete(rc); else hrc = E_FAIL; } } return hrc; #else NOREF(pVBox); NOREF(aHostNetworkInterface); NOREF(aProgress); NOREF(pcszName); return VERR_NOT_IMPLEMENTED; #endif }
VBOXDDU_DECL(int) VDDbgIoLogEventGetStartDiscard(VDIOLOGGER hIoLogger, uint64_t *pidEvent, bool *pfAsync, PRTRANGE *ppaRanges, unsigned *pcRanges) { int rc = VINF_SUCCESS; PVDIOLOGGERINT pIoLogger = hIoLogger; AssertPtrReturn(pIoLogger, VERR_INVALID_HANDLE); AssertPtrReturn(pidEvent, VERR_INVALID_POINTER); AssertPtrReturn(pfAsync, VERR_INVALID_POINTER); rc = RTSemFastMutexRequest(pIoLogger->hMtx); AssertRCReturn(rc, rc); if ( pIoLogger->u32EventTypeNext == VDIOLOG_EVENT_START && pIoLogger->enmReqTypeNext == VDDBGIOLOGREQ_DISCARD) { IoLogEntryStart Entry; rc = RTFileReadAt(pIoLogger->hFile, pIoLogger->offReadNext, &Entry, sizeof(Entry), NULL); if (RT_SUCCESS(rc)) { PRTRANGE paRanges = NULL; IoLogEntryDiscard DiscardRange; pIoLogger->offReadNext += sizeof(Entry); *pfAsync = RT_BOOL(Entry.u8AsyncIo); *pidEvent = RT_LE2H_U64(Entry.u64Id); *pcRanges = RT_LE2H_U32(Entry.Discard.cRanges); paRanges = (PRTRANGE)RTMemAllocZ(*pcRanges * sizeof(RTRANGE)); if (paRanges) { for (unsigned i = 0; i < *pcRanges; i++) { rc = RTFileReadAt(pIoLogger->hFile, pIoLogger->offReadNext + i*sizeof(DiscardRange), &DiscardRange, sizeof(DiscardRange), NULL); if (RT_FAILURE(rc)) break; paRanges[i].offStart = RT_LE2H_U64(DiscardRange.u64Off); paRanges[i].cbRange = RT_LE2H_U32(DiscardRange.u32Discard); } if (RT_SUCCESS(rc)) { pIoLogger->offReadNext += *pcRanges * sizeof(DiscardRange); *ppaRanges = paRanges; } else { pIoLogger->offReadNext -= sizeof(Entry); RTMemFree(paRanges); } } else rc = VERR_NO_MEMORY; } } else rc = VERR_INVALID_STATE; if (RT_SUCCESS(rc)) pIoLogger->u32EventTypeNext = 0; RTSemFastMutexRelease(pIoLogger->hMtx); return rc; }
/** * Run once function that initializes the kstats we need here. * * @returns IPRT status code. * @param pvUser Unused. */ static DECLCALLBACK(int) rtMpSolarisOnce(void *pvUser) { int rc = VINF_SUCCESS; NOREF(pvUser); /* * Open kstat and find the cpu_info entries for each of the CPUs. */ g_pKsCtl = kstat_open(); if (g_pKsCtl) { g_capCpuInfo = RTMpGetCount(); g_papCpuInfo = (kstat_t **)RTMemAllocZ(g_capCpuInfo * sizeof(kstat_t *)); if (g_papCpuInfo) { g_cu64CoreIds = g_capCpuInfo; g_pu64CoreIds = (uint64_t *)RTMemAllocZ(g_cu64CoreIds * sizeof(uint64_t)); if (g_pu64CoreIds) { rc = RTCritSectInit(&g_MpSolarisCritSect); if (RT_SUCCESS(rc)) { RTCPUID i = 0; for (kstat_t *pKsp = g_pKsCtl->kc_chain; pKsp != NULL; pKsp = pKsp->ks_next) { if (!RTStrCmp(pKsp->ks_module, "cpu_info")) { AssertBreak(i < g_capCpuInfo); g_papCpuInfo[i++] = pKsp; /** @todo ks_instance == cpu_id (/usr/src/uts/common/os/cpu.c)? Check this and fix it ASAP. */ } } rc = rtMpSolarisGetCoreIds(); if (RT_SUCCESS(rc)) return VINF_SUCCESS; else Log(("rtMpSolarisGetCoreIds failed. rc=%Rrc\n", rc)); } RTMemFree(g_pu64CoreIds); g_pu64CoreIds = NULL; } else rc = VERR_NO_MEMORY; /* bail out, we failed. */ RTMemFree(g_papCpuInfo); g_papCpuInfo = NULL; } else rc = VERR_NO_MEMORY; kstat_close(g_pKsCtl); g_pKsCtl = NULL; } else { rc = RTErrConvertFromErrno(errno); if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR; Log(("kstat_open() -> %d (%Rrc)\n", errno, rc)); } return rc; }
int main(int argc, char **argv) { int rcRet = 0; int i; int rc; int cIterations = argc > 1 ? RTStrToUInt32(argv[1]) : 32; if (cIterations == 0) cIterations = 64; /* * Init. */ RTR3InitExe(argc, &argv, 0); PSUPDRVSESSION pSession; rc = SUPR3Init(&pSession); rcRet += rc != 0; RTPrintf("tstInt: SUPR3Init -> rc=%Rrc\n", rc); char szFile[RTPATH_MAX]; if (!rc) { rc = RTPathExecDir(szFile, sizeof(szFile) - sizeof("/VMMR0.r0")); } char szAbsFile[RTPATH_MAX]; if (RT_SUCCESS(rc)) { strcat(szFile, "/VMMR0.r0"); rc = RTPathAbs(szFile, szAbsFile, sizeof(szAbsFile)); } if (RT_SUCCESS(rc)) { /* * Load VMM code. */ rc = SUPR3LoadVMM(szAbsFile); if (RT_SUCCESS(rc)) { /* * Create a fake 'VM'. */ PVMR0 pVMR0 = NIL_RTR0PTR; PVM pVM = NULL; const unsigned cPages = RT_ALIGN_Z(sizeof(*pVM), PAGE_SIZE) >> PAGE_SHIFT; PSUPPAGE paPages = (PSUPPAGE)RTMemAllocZ(cPages * sizeof(SUPPAGE)); if (paPages) rc = SUPR3LowAlloc(cPages, (void **)&pVM, &pVMR0, &paPages[0]); else rc = VERR_NO_MEMORY; if (RT_SUCCESS(rc)) { pVM->pVMRC = 0; pVM->pVMR3 = pVM; pVM->pVMR0 = pVMR0; pVM->paVMPagesR3 = paPages; pVM->pSession = pSession; pVM->enmVMState = VMSTATE_CREATED; rc = SUPR3SetVMForFastIOCtl(pVMR0); if (!rc) { /* * Call VMM code with invalid function. */ for (i = cIterations; i > 0; i--) { rc = SUPR3CallVMMR0(pVMR0, NIL_VMCPUID, VMMR0_DO_SLOW_NOP, NULL); if (rc != VINF_SUCCESS) { RTPrintf("tstInt: SUPR3CallVMMR0 -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } RTPrintf("tstInt: Performed SUPR3CallVMMR0 %d times (rc=%Rrc)\n", cIterations, rc); /* * The fast path. */ if (rc == VINF_SUCCESS) { RTTimeNanoTS(); uint64_t StartTS = RTTimeNanoTS(); uint64_t StartTick = ASMReadTSC(); uint64_t MinTicks = UINT64_MAX; for (i = 0; i < 1000000; i++) { uint64_t OneStartTick = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVMR0, VMMR0_DO_NOP, 0); uint64_t Ticks = ASMReadTSC() - OneStartTick; if (Ticks < MinTicks) MinTicks = Ticks; if (RT_UNLIKELY(rc != VINF_SUCCESS)) { RTPrintf("tstInt: SUPR3CallVMMR0Fast -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } uint64_t Ticks = ASMReadTSC() - StartTick; uint64_t NanoSecs = RTTimeNanoTS() - StartTS; RTPrintf("tstInt: SUPR3CallVMMR0Fast - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); /* * The ordinary path. */ RTTimeNanoTS(); StartTS = RTTimeNanoTS(); StartTick = ASMReadTSC(); MinTicks = UINT64_MAX; for (i = 0; i < 1000000; i++) { uint64_t OneStartTick = ASMReadTSC(); rc = SUPR3CallVMMR0Ex(pVMR0, NIL_VMCPUID, VMMR0_DO_SLOW_NOP, 0, NULL); uint64_t OneTicks = ASMReadTSC() - OneStartTick; if (OneTicks < MinTicks) MinTicks = OneTicks; if (RT_UNLIKELY(rc != VINF_SUCCESS)) { RTPrintf("tstInt: SUPR3CallVMMR0Ex -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } Ticks = ASMReadTSC() - StartTick; NanoSecs = RTTimeNanoTS() - StartTS; RTPrintf("tstInt: SUPR3CallVMMR0Ex - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); } } else { RTPrintf("tstInt: SUPR3SetVMForFastIOCtl failed: %Rrc\n", rc); rcRet++; } } else { RTPrintf("tstInt: SUPR3ContAlloc(%#zx,,) failed\n", sizeof(*pVM)); rcRet++; } /* * Unload VMM. */ rc = SUPR3UnloadVMM(); if (rc) { RTPrintf("tstInt: SUPR3UnloadVMM failed with rc=%Rrc\n", rc); rcRet++; } } else { RTPrintf("tstInt: SUPR3LoadVMM failed with rc=%Rrc\n", rc); rcRet++; } /* * Terminate. */ rc = SUPR3Term(false /*fForced*/); rcRet += rc != 0; RTPrintf("tstInt: SUPR3Term -> rc=%Rrc\n", rc); }
/** * Construct a disk integrity driver instance. * * @copydoc FNPDMDRVCONSTRUCT */ static DECLCALLBACK(int) drvdiskintConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfg, uint32_t fFlags) { int rc = VINF_SUCCESS; PDRVDISKINTEGRITY pThis = PDMINS_2_DATA(pDrvIns, PDRVDISKINTEGRITY); LogFlow(("drvdiskintConstruct: iInstance=%d\n", pDrvIns->iInstance)); PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns); /* * Validate configuration. */ if (!CFGMR3AreValuesValid(pCfg, "CheckConsistency\0" "TraceRequests\0" "CheckIntervalMs\0" "ExpireIntervalMs\0" "CheckDoubleCompletions\0" "HistorySize\0" "IoLog\0")) return VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES; rc = CFGMR3QueryBoolDef(pCfg, "CheckConsistency", &pThis->fCheckConsistency, false); AssertRC(rc); rc = CFGMR3QueryBoolDef(pCfg, "TraceRequests", &pThis->fTraceRequests, false); AssertRC(rc); rc = CFGMR3QueryU32Def(pCfg, "CheckIntervalMs", &pThis->uCheckIntervalMs, 5000); /* 5 seconds */ AssertRC(rc); rc = CFGMR3QueryU32Def(pCfg, "ExpireIntervalMs", &pThis->uExpireIntervalMs, 20000); /* 20 seconds */ AssertRC(rc); rc = CFGMR3QueryBoolDef(pCfg, "CheckDoubleCompletions", &pThis->fCheckDoubleCompletion, false); AssertRC(rc); rc = CFGMR3QueryU32Def(pCfg, "HistorySize", &pThis->cEntries, 512); AssertRC(rc); char *pszIoLogFilename = NULL; rc = CFGMR3QueryStringAlloc(pCfg, "IoLog", &pszIoLogFilename); Assert(RT_SUCCESS(rc) || rc == VERR_CFGM_VALUE_NOT_FOUND); /* * Initialize most of the data members. */ pThis->pDrvIns = pDrvIns; /* IBase. */ pDrvIns->IBase.pfnQueryInterface = drvdiskintQueryInterface; /* IMedia */ pThis->IMedia.pfnRead = drvdiskintRead; pThis->IMedia.pfnWrite = drvdiskintWrite; pThis->IMedia.pfnFlush = drvdiskintFlush; pThis->IMedia.pfnGetSize = drvdiskintGetSize; pThis->IMedia.pfnIsReadOnly = drvdiskintIsReadOnly; pThis->IMedia.pfnBiosGetPCHSGeometry = drvdiskintBiosGetPCHSGeometry; pThis->IMedia.pfnBiosSetPCHSGeometry = drvdiskintBiosSetPCHSGeometry; pThis->IMedia.pfnBiosGetLCHSGeometry = drvdiskintBiosGetLCHSGeometry; pThis->IMedia.pfnBiosSetLCHSGeometry = drvdiskintBiosSetLCHSGeometry; pThis->IMedia.pfnGetUuid = drvdiskintGetUuid; /* IMediaAsync */ pThis->IMediaAsync.pfnStartRead = drvdiskintStartRead; pThis->IMediaAsync.pfnStartWrite = drvdiskintStartWrite; pThis->IMediaAsync.pfnStartFlush = drvdiskintStartFlush; /* IMediaAsyncPort. */ pThis->IMediaAsyncPort.pfnTransferCompleteNotify = drvdiskintAsyncTransferCompleteNotify; /* IMediaPort. */ pThis->IMediaPort.pfnQueryDeviceLocation = drvdiskintQueryDeviceLocation; /* Query the media port interface above us. */ pThis->pDrvMediaPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMIMEDIAPORT); if (!pThis->pDrvMediaPort) return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_BELOW, N_("No media port inrerface above")); /* Try to attach async media port interface above.*/ pThis->pDrvMediaAsyncPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMIMEDIAASYNCPORT); /* * Try attach driver below and query it's media interface. */ PPDMIBASE pBase; rc = PDMDrvHlpAttach(pDrvIns, fFlags, &pBase); if (RT_FAILURE(rc)) return PDMDrvHlpVMSetError(pDrvIns, rc, RT_SRC_POS, N_("Failed to attach driver below us! %Rrc"), rc); pThis->pDrvMedia = PDMIBASE_QUERY_INTERFACE(pBase, PDMIMEDIA); if (!pThis->pDrvMedia) return PDMDRV_SET_ERROR(pDrvIns, VERR_PDM_MISSING_INTERFACE_BELOW, N_("No media or async media interface below")); pThis->pDrvMediaAsync = PDMIBASE_QUERY_INTERFACE(pBase, PDMIMEDIAASYNC); if (pThis->pDrvMedia->pfnDiscard) pThis->IMedia.pfnDiscard = drvdiskintDiscard; if (pThis->fCheckConsistency) { /* Create the AVL tree. */ pThis->pTreeSegments = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE)); if (!pThis->pTreeSegments) rc = VERR_NO_MEMORY; } if (pThis->fTraceRequests) { for (unsigned i = 0; i < RT_ELEMENTS(pThis->apReqActive); i++) { pThis->apReqActive[i].pIoReq = NULL; pThis->apReqActive[i].tsStart = 0; } pThis->iNextFreeSlot = 0; /* Init event semaphore. */ rc = RTSemEventCreate(&pThis->SemEvent); AssertRC(rc); pThis->fRunning = true; rc = RTThreadCreate(&pThis->hThread, drvdiskIntIoReqExpiredCheck, pThis, 0, RTTHREADTYPE_INFREQUENT_POLLER, 0, "DiskIntegrity"); AssertRC(rc); } if (pThis->fCheckDoubleCompletion) { pThis->iEntry = 0; pThis->papIoReq = (PDRVDISKAIOREQ *)RTMemAllocZ(pThis->cEntries * sizeof(PDRVDISKAIOREQ)); AssertPtr(pThis->papIoReq); } if (pszIoLogFilename) { rc = VDDbgIoLogCreate(&pThis->hIoLogger, pszIoLogFilename, VDDBG_IOLOG_LOG_DATA); MMR3HeapFree(pszIoLogFilename); } return rc; }
int NetIfList(std::list <ComObjPtr<HostNetworkInterface> > &list) { int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP); if (sock < 0) { Log(("NetIfList: socket() -> %d\n", errno)); return NULL; } struct ifaddrs *IfAddrs, *pAddr; int rc = getifaddrs(&IfAddrs); if (rc) { close(sock); Log(("NetIfList: getifaddrs() -> %d\n", rc)); return VERR_INTERNAL_ERROR; } PDARWINETHERNIC pEtherNICs = DarwinGetEthernetControllers(); while (pEtherNICs) { size_t cbNameLen = strlen(pEtherNICs->szName) + 1; PNETIFINFO pNew = (PNETIFINFO)RTMemAllocZ(RT_OFFSETOF(NETIFINFO, szName[cbNameLen])); pNew->MACAddress = pEtherNICs->Mac; pNew->enmMediumType = NETIF_T_ETHERNET; pNew->Uuid = pEtherNICs->Uuid; Assert(sizeof(pNew->szShortName) > sizeof(pEtherNICs->szBSDName)); memcpy(pNew->szShortName, pEtherNICs->szBSDName, sizeof(pEtherNICs->szBSDName)); pNew->szShortName[sizeof(pEtherNICs->szBSDName)] = '\0'; memcpy(pNew->szName, pEtherNICs->szName, cbNameLen); struct ifreq IfReq; RTStrCopy(IfReq.ifr_name, sizeof(IfReq.ifr_name), pNew->szShortName); if (ioctl(sock, SIOCGIFFLAGS, &IfReq) < 0) { Log(("NetIfList: ioctl(SIOCGIFFLAGS) -> %d\n", errno)); pNew->enmStatus = NETIF_S_UNKNOWN; } else pNew->enmStatus = (IfReq.ifr_flags & IFF_UP) ? NETIF_S_UP : NETIF_S_DOWN; for (pAddr = IfAddrs; pAddr != NULL; pAddr = pAddr->ifa_next) { if (strcmp(pNew->szShortName, pAddr->ifa_name)) continue; struct sockaddr_in *pIPAddr, *pIPNetMask; struct sockaddr_in6 *pIPv6Addr, *pIPv6NetMask; switch (pAddr->ifa_addr->sa_family) { case AF_INET: if (pNew->IPAddress.u) break; pIPAddr = (struct sockaddr_in *)pAddr->ifa_addr; Assert(sizeof(pNew->IPAddress) == sizeof(pIPAddr->sin_addr)); pNew->IPAddress.u = pIPAddr->sin_addr.s_addr; pIPNetMask = (struct sockaddr_in *)pAddr->ifa_netmask; Assert(pIPNetMask->sin_family == AF_INET); Assert(sizeof(pNew->IPNetMask) == sizeof(pIPNetMask->sin_addr)); pNew->IPNetMask.u = pIPNetMask->sin_addr.s_addr; break; case AF_INET6: if (pNew->IPv6Address.s.Lo || pNew->IPv6Address.s.Hi) break; pIPv6Addr = (struct sockaddr_in6 *)pAddr->ifa_addr; Assert(sizeof(pNew->IPv6Address) == sizeof(pIPv6Addr->sin6_addr)); memcpy(pNew->IPv6Address.au8, pIPv6Addr->sin6_addr.__u6_addr.__u6_addr8, sizeof(pNew->IPv6Address)); pIPv6NetMask = (struct sockaddr_in6 *)pAddr->ifa_netmask; Assert(pIPv6NetMask->sin6_family == AF_INET6); Assert(sizeof(pNew->IPv6NetMask) == sizeof(pIPv6NetMask->sin6_addr)); memcpy(pNew->IPv6NetMask.au8, pIPv6NetMask->sin6_addr.__u6_addr.__u6_addr8, sizeof(pNew->IPv6NetMask)); break; } } ComObjPtr<HostNetworkInterface> IfObj; IfObj.createObject(); if (SUCCEEDED(IfObj->init(Bstr(pEtherNICs->szName), HostNetworkInterfaceType_Bridged, pNew))) list.push_back(IfObj); RTMemFree(pNew); /* next, free current */ void *pvFree = pEtherNICs; pEtherNICs = pEtherNICs->pNext; RTMemFree(pvFree); } freeifaddrs(IfAddrs); close(sock); return VINF_SUCCESS; }
/** * Record a successful write to the virtual disk. * * @returns VBox status code. * @param pThis Disk integrity driver instance data. * @param paSeg Segment array of the write to record. * @param cSeg Number of segments. * @param off Start offset. * @param cbWrite Number of bytes to record. */ static int drvdiskintWriteRecord(PDRVDISKINTEGRITY pThis, PCRTSGSEG paSeg, unsigned cSeg, uint64_t off, size_t cbWrite) { int rc = VINF_SUCCESS; LogFlowFunc(("pThis=%#p paSeg=%#p cSeg=%u off=%llx cbWrite=%u\n", pThis, paSeg, cSeg, off, cbWrite)); /* Update the segments */ size_t cbLeft = cbWrite; RTFOFF offCurr = (RTFOFF)off; RTSGBUF SgBuf; PIOLOGENT pIoLogEnt = (PIOLOGENT)RTMemAllocZ(sizeof(IOLOGENT)); if (!pIoLogEnt) return VERR_NO_MEMORY; pIoLogEnt->off = off; pIoLogEnt->cbWrite = cbWrite; pIoLogEnt->cRefs = 0; RTSgBufInit(&SgBuf, paSeg, cSeg); while (cbLeft) { PDRVDISKSEGMENT pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetRangeGet(pThis->pTreeSegments, offCurr); size_t cbRange = 0; bool fSet = false; unsigned offSeg = 0; if (!pSeg) { /* Get next segment */ pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetGetBestFit(pThis->pTreeSegments, offCurr, true); if ( !pSeg || offCurr + (RTFOFF)cbLeft <= pSeg->Core.Key) cbRange = cbLeft; else cbRange = pSeg->Core.Key - offCurr; Assert(cbRange % 512 == 0); /* Create new segment */ pSeg = (PDRVDISKSEGMENT)RTMemAllocZ(RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbRange / 512])); if (pSeg) { pSeg->Core.Key = offCurr; pSeg->Core.KeyLast = offCurr + (RTFOFF)cbRange - 1; pSeg->cbSeg = cbRange; pSeg->pbSeg = (uint8_t *)RTMemAllocZ(cbRange); pSeg->cIoLogEntries = cbRange / 512; if (!pSeg->pbSeg) RTMemFree(pSeg); else { bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core); AssertMsg(fInserted, ("Bug!\n")); fSet = true; } } } else { fSet = true; offSeg = offCurr - pSeg->Core.Key; cbRange = RT_MIN(cbLeft, (size_t)(pSeg->Core.KeyLast + 1 - offCurr)); } if (fSet) { AssertPtr(pSeg); size_t cbCopied = RTSgBufCopyToBuf(&SgBuf, pSeg->pbSeg + offSeg, cbRange); Assert(cbCopied == cbRange); /* Update the I/O log pointers */ Assert(offSeg % 512 == 0); Assert(cbRange % 512 == 0); while (offSeg < cbRange) { uint32_t uSector = offSeg / 512; PIOLOGENT pIoLogOld = NULL; AssertMsg(uSector < pSeg->cIoLogEntries, ("Internal bug!\n")); pIoLogOld = pSeg->apIoLog[uSector]; if (pIoLogOld) { pIoLogOld->cRefs--; if (!pIoLogOld->cRefs) RTMemFree(pIoLogOld); } pSeg->apIoLog[uSector] = pIoLogEnt; pIoLogEnt->cRefs++; offSeg += 512; } } else RTSgBufAdvance(&SgBuf, cbRange); offCurr += cbRange; cbLeft -= cbRange; } return rc; }
static DECLCALLBACK(int) scriptRun(PVM pVM, RTFILE File) { RTPrintf("info: running script...\n"); uint64_t cb; int rc = RTFileGetSize(File, &cb); if (RT_SUCCESS(rc)) { if (cb == 0) return VINF_SUCCESS; if (cb < _1M) { char *pszBuf = (char *)RTMemAllocZ(cb + 1); if (pszBuf) { rc = RTFileRead(File, pszBuf, cb, NULL); if (RT_SUCCESS(rc)) { pszBuf[cb] = '\0'; /* * Now process what's in the buffer. */ char *psz = pszBuf; while (psz && *psz) { /* skip blanks. */ while (RT_C_IS_SPACE(*psz)) psz++; if (!*psz) break; /* end of line */ char *pszNext; char *pszEnd = strchr(psz, '\n'); if (!pszEnd) pszEnd = strchr(psz, '\r'); if (!pszEnd) pszNext = pszEnd = strchr(psz, '\0'); else pszNext = pszEnd + 1; if (*psz != ';' && *psz != '#' && *psz != '/') { /* strip end */ *pszEnd = '\0'; while (pszEnd > psz && RT_C_IS_SPACE(pszEnd[-1])) *--pszEnd = '\0'; /* process the line */ RTPrintf("debug: executing script line '%s'\n", psz); rc = scriptCommand(pVM, psz, pszEnd - psz); if (RT_FAILURE(rc)) { RTPrintf("error: '%s' failed: %Rrc\n", psz, rc); break; } } /* else comment line */ /* next */ psz = pszNext; } } else RTPrintf("error: failed to read script file: %Rrc\n", rc); RTMemFree(pszBuf); } else { RTPrintf("error: Out of memory. (%d bytes)\n", cb + 1); rc = VERR_NO_MEMORY; } } else RTPrintf("error: script file is too large (0x%llx bytes)\n", cb); } else RTPrintf("error: couldn't get size of script file: %Rrc\n", rc); return rc; }
/** * Discards the given ranges from the disk. * * @returns VBox status code. * @param pThis Disk integrity driver instance data. * @param paRanges Array of ranges to discard. * @param cRanges Number of ranges in the array. */ static int drvdiskintDiscardRecords(PDRVDISKINTEGRITY pThis, PPDMRANGE paRanges, unsigned cRanges) { int rc = VINF_SUCCESS; LogFlowFunc(("pThis=%#p paRanges=%#p cRanges=%u\n", pThis, paRanges, cRanges)); for (unsigned i = 0; i < cRanges; i++) { uint64_t offStart = paRanges[i].offStart; size_t cbLeft = paRanges[i].cbRange; LogFlowFunc(("Discarding off=%llu cbRange=%zu\n", offStart, cbLeft)); while (cbLeft) { size_t cbRange; PDRVDISKSEGMENT pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetRangeGet(pThis->pTreeSegments, offStart); if (!pSeg) { /* Get next segment */ pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetGetBestFit(pThis->pTreeSegments, offStart, true); if ( !pSeg || (RTFOFF)offStart + (RTFOFF)cbLeft <= pSeg->Core.Key) cbRange = cbLeft; else cbRange = pSeg->Core.Key - offStart; Assert(!(cbRange % 512)); } else { size_t cbPreLeft, cbPostLeft; cbRange = RT_MIN(cbRange, pSeg->Core.KeyLast - offStart + 1); cbPreLeft = offStart - pSeg->Core.Key; cbPostLeft = pSeg->cbSeg - cbRange - cbPreLeft; Assert(!(cbRange % 512)); Assert(!(cbPreLeft % 512)); Assert(!(cbPostLeft % 512)); LogFlowFunc(("cbRange=%zu cbPreLeft=%zu cbPostLeft=%zu\n", cbRange, cbPreLeft, cbPostLeft)); RTAvlrFileOffsetRemove(pThis->pTreeSegments, pSeg->Core.Key); if (!cbPreLeft && !cbPostLeft) { /* Just free the whole segment. */ LogFlowFunc(("Freeing whole segment pSeg=%#p\n", pSeg)); RTMemFree(pSeg->pbSeg); for (unsigned idx = 0; idx < pSeg->cIoLogEntries; idx++) drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]); RTMemFree(pSeg); } else if (cbPreLeft && !cbPostLeft) { /* Realloc to new size and insert. */ LogFlowFunc(("Realloc segment pSeg=%#p\n", pSeg)); pSeg->pbSeg = (uint8_t *)RTMemRealloc(pSeg->pbSeg, cbPreLeft); for (unsigned idx = cbPreLeft / 512; idx < pSeg->cIoLogEntries; idx++) drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]); pSeg = (PDRVDISKSEGMENT)RTMemRealloc(pSeg, RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPreLeft / 512])); pSeg->Core.KeyLast = pSeg->Core.Key + cbPreLeft - 1; pSeg->cbSeg = cbPreLeft; pSeg->cIoLogEntries = cbPreLeft / 512; bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core); Assert(fInserted); } else if (!cbPreLeft && cbPostLeft) { /* Move data to the front and realloc. */ LogFlowFunc(("Move data and realloc segment pSeg=%#p\n", pSeg)); memmove(pSeg->pbSeg, pSeg->pbSeg + cbRange, cbPostLeft); for (unsigned idx = 0; idx < cbRange / 512; idx++) drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]); for (unsigned idx = 0; idx < cbPostLeft /512; idx++) pSeg->apIoLog[idx] = pSeg->apIoLog[(cbRange / 512) + idx]; pSeg = (PDRVDISKSEGMENT)RTMemRealloc(pSeg, RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPostLeft / 512])); pSeg->pbSeg = (uint8_t *)RTMemRealloc(pSeg->pbSeg, cbPostLeft); pSeg->Core.Key += cbRange; pSeg->cbSeg = cbPostLeft; pSeg->cIoLogEntries = cbPostLeft / 512; bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core); Assert(fInserted); } else { /* Split the segment into 2 new segments. */ LogFlowFunc(("Split segment pSeg=%#p\n", pSeg)); PDRVDISKSEGMENT pSegPost = (PDRVDISKSEGMENT)RTMemAllocZ(RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPostLeft / 512])); if (pSegPost) { pSegPost->Core.Key = pSeg->Core.Key + cbPreLeft + cbRange; pSegPost->Core.KeyLast = pSeg->Core.KeyLast; pSegPost->cbSeg = cbPostLeft; pSegPost->pbSeg = (uint8_t *)RTMemAllocZ(cbPostLeft); pSegPost->cIoLogEntries = cbPostLeft / 512; if (!pSegPost->pbSeg) RTMemFree(pSegPost); else { memcpy(pSegPost->pbSeg, pSeg->pbSeg + cbPreLeft + cbRange, cbPostLeft); for (unsigned idx = 0; idx < cbPostLeft / 512; idx++) pSegPost->apIoLog[idx] = pSeg->apIoLog[((cbPreLeft + cbRange) / 512) + idx]; bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSegPost->Core); Assert(fInserted); } } /* Shrink the current segment. */ pSeg->pbSeg = (uint8_t *)RTMemRealloc(pSeg->pbSeg, cbPreLeft); for (unsigned idx = cbPreLeft / 512; idx < (cbPreLeft + cbRange) / 512; idx++) drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]); pSeg = (PDRVDISKSEGMENT)RTMemRealloc(pSeg, RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPreLeft / 512])); pSeg->Core.KeyLast = pSeg->Core.Key + cbPreLeft - 1; pSeg->cbSeg = cbPreLeft; pSeg->cIoLogEntries = cbPreLeft / 512; bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core); Assert(fInserted); } /* if (cbPreLeft && cbPostLeft) */ } offStart += cbRange; cbLeft -= cbRange; } } LogFlowFunc(("returns rc=%Rrc\n", rc)); return rc; }
/** * Allocate a new line number structure. * * @returns Pointer to a new structure on success, NULL on failure. */ RTDECL(PRTDBGLINE) RTDbgLineAlloc(void) { return (PRTDBGLINE)RTMemAllocZ(sizeof(RTDBGLINE)); }
static int drvscsihostProcessRequestOne(PDRVSCSIHOST pThis, PPDMSCSIREQUEST pRequest) { int rc = VINF_SUCCESS; unsigned uTxDir; LogFlowFunc(("Entered\n")); #ifdef DEBUG drvscsihostDumpScsiRequest(pRequest); #endif /* We implement only one device. */ if (pRequest->uLogicalUnit != 0) { switch (pRequest->pbCDB[0]) { case SCSI_INQUIRY: { SCSIINQUIRYDATA ScsiInquiryReply; memset(&ScsiInquiryReply, 0, sizeof(ScsiInquiryReply)); ScsiInquiryReply.u5PeripheralDeviceType = SCSI_INQUIRY_DATA_PERIPHERAL_DEVICE_TYPE_UNKNOWN; ScsiInquiryReply.u3PeripheralQualifier = SCSI_INQUIRY_DATA_PERIPHERAL_QUALIFIER_NOT_CONNECTED_NOT_SUPPORTED; drvscsihostScatterGatherListCopyFromBuffer(pRequest, &ScsiInquiryReply, sizeof(SCSIINQUIRYDATA)); drvscsihostCmdOk(pRequest); break; } default: AssertMsgFailed(("Command not implemented for attached device\n")); drvscsiCmdError(pRequest, SCSI_SENSE_ILLEGAL_REQUEST, SCSI_ASC_NONE); } } else { #if defined(RT_OS_LINUX) sg_io_hdr_t ScsiIoReq; sg_iovec_t *paSG = NULL; /* Setup SCSI request. */ memset(&ScsiIoReq, 0, sizeof(sg_io_hdr_t)); ScsiIoReq.interface_id = 'S'; if (pRequest->uDataDirection == PDMSCSIREQUESTTXDIR_UNKNOWN) uTxDir = drvscsihostGetTransferDirectionFromCommand(pRequest->pbCDB[0]); else uTxDir = pRequest->uDataDirection; if (uTxDir == PDMSCSIREQUESTTXDIR_NONE) ScsiIoReq.dxfer_direction = SG_DXFER_NONE; else if (uTxDir == PDMSCSIREQUESTTXDIR_TO_DEVICE) ScsiIoReq.dxfer_direction = SG_DXFER_TO_DEV; else if (uTxDir == PDMSCSIREQUESTTXDIR_FROM_DEVICE) ScsiIoReq.dxfer_direction = SG_DXFER_FROM_DEV; else AssertMsgFailed(("Invalid transfer direction %u\n", uTxDir)); ScsiIoReq.cmd_len = pRequest->cbCDB; ScsiIoReq.mx_sb_len = pRequest->cbSenseBuffer; ScsiIoReq.dxfer_len = pRequest->cbScatterGather; if (pRequest->cScatterGatherEntries > 0) { if (pRequest->cScatterGatherEntries == 1) { ScsiIoReq.iovec_count = 0; ScsiIoReq.dxferp = pRequest->paScatterGatherHead[0].pvSeg; } else { ScsiIoReq.iovec_count = pRequest->cScatterGatherEntries; paSG = (sg_iovec_t *)RTMemAllocZ(pRequest->cScatterGatherEntries * sizeof(sg_iovec_t)); AssertReturn(paSG, VERR_NO_MEMORY); for (unsigned i = 0; i < pRequest->cScatterGatherEntries; i++) { paSG[i].iov_base = pRequest->paScatterGatherHead[i].pvSeg; paSG[i].iov_len = pRequest->paScatterGatherHead[i].cbSeg; } ScsiIoReq.dxferp = paSG; } } ScsiIoReq.cmdp = pRequest->pbCDB; ScsiIoReq.sbp = pRequest->pbSenseBuffer; ScsiIoReq.timeout = UINT_MAX; ScsiIoReq.flags |= SG_FLAG_DIRECT_IO; /* Issue command. */ rc = ioctl(RTFileToNative(pThis->hDeviceFile), SG_IO, &ScsiIoReq); if (rc < 0) { AssertMsgFailed(("Ioctl failed with rc=%d\n", rc)); } /* Request processed successfully. */ Log(("Command successfully processed\n")); if (ScsiIoReq.iovec_count > 0) RTMemFree(paSG); #endif } /* Notify device that request finished. */ rc = pThis->pDevScsiPort->pfnSCSIRequestCompleted(pThis->pDevScsiPort, pRequest, SCSI_STATUS_OK, false, VINF_SUCCESS); AssertMsgRC(rc, ("Notifying device above failed rc=%Rrc\n", rc)); return rc; }