Esempio n. 1
0
/** @copydoc VBOXHDDBACKEND::pfnWrite */
static int parallelsWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
                          size_t cbToWrite, size_t *pcbWriteProcess,
                          size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
{
    LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p\n",
                 pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess));
    PPARALLELSIMAGE pImage = (PPARALLELSIMAGE)pBackendData;
    int rc = VINF_SUCCESS;

    AssertPtr(pImage);
    Assert(uOffset % 512 == 0);
    Assert(cbToWrite % 512 == 0);

    if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
        rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage, uOffset,
                                    pvBuf, cbToWrite, NULL);
    else
    {
        uint64_t uSector;
        uint64_t uOffsetInFile;
        uint32_t iIndexInAllocationTable;

        /* Calculate offset in the real file. */
        uSector = uOffset / 512;
        /* One chunk in the file is always one track big. */
        iIndexInAllocationTable = (uint32_t)(uSector / pImage->PCHSGeometry.cSectors);
        uSector = uSector % pImage->PCHSGeometry.cSectors;

        Assert(iIndexInAllocationTable < pImage->cAllocationBitmapEntries);

        cbToWrite = RT_MIN(cbToWrite, (pImage->PCHSGeometry.cSectors - uSector)*512);

        LogFlowFunc(("AllocationBitmap[%u]=%u uSector=%u cbToWrite=%zu cAllocationBitmapEntries=%u\n",
                     iIndexInAllocationTable, pImage->pAllocationBitmap[iIndexInAllocationTable],
                     uSector, cbToWrite, pImage->cAllocationBitmapEntries));

        if (pImage->pAllocationBitmap[iIndexInAllocationTable] == 0)
        {
            if (   cbToWrite == pImage->PCHSGeometry.cSectors * 512
                && !(fWrite & VD_WRITE_NO_ALLOC))
            {
                /* Stay on the safe side. Do not run the risk of confusing the higher
                 * level, as that can be pretty lethal to image consistency. */
                *pcbPreRead = 0;
                *pcbPostRead = 0;

                /* Allocate new chunk in the file. */
                AssertMsg(pImage->cbFileCurrent % 512 == 0, ("File size is not a multiple of 512\n"));
                pImage->pAllocationBitmap[iIndexInAllocationTable] = (uint32_t)(pImage->cbFileCurrent / 512);
                pImage->cbFileCurrent += pImage->PCHSGeometry.cSectors * 512;
                pImage->fAllocationBitmapChanged = true;

                uOffsetInFile = (uint64_t)pImage->pAllocationBitmap[iIndexInAllocationTable] * 512;

                LogFlowFunc(("uOffsetInFile=%llu\n", uOffsetInFile));

                /*
                 * Write the new block at the current end of the file.
                 */
                rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage,
                                            uOffsetInFile, pvBuf, cbToWrite, NULL);
            }
            else
            {
                /* Trying to do a partial write to an unallocated cluster. Don't do
                 * anything except letting the upper layer know what to do. */
                *pcbPreRead  = uSector * 512;
                *pcbPostRead = (pImage->PCHSGeometry.cSectors * 512) - cbToWrite - *pcbPreRead;
                rc = VERR_VD_BLOCK_FREE;
            }
        }
        else
        {
            uOffsetInFile = ((uint64_t)pImage->pAllocationBitmap[iIndexInAllocationTable] + uSector) * 512;

            LogFlowFunc(("uOffsetInFile=%llu\n", uOffsetInFile));
            rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pImage->pStorage, uOffsetInFile,
                                        pvBuf, cbToWrite, NULL);
        }
    }

    if (pcbWriteProcess)
        *pcbWriteProcess = cbToWrite;

out:
    LogFlowFunc(("returns %Rrc\n", rc));
    return rc;
}
RTR3DECL(int)  RTFileRead(RTFILE hFile, void *pvBuf, size_t cbToRead, size_t *pcbRead)
{
    if (cbToRead <= 0)
        return VINF_SUCCESS;
    ULONG cbToReadAdj = (ULONG)cbToRead;
    AssertReturn(cbToReadAdj == cbToRead, VERR_NUMBER_TOO_BIG);

    ULONG cbRead = 0;
    if (ReadFile((HANDLE)RTFileToNative(hFile), pvBuf, cbToReadAdj, &cbRead, NULL))
    {
        if (pcbRead)
            /* Caller can handle partial reads. */
            *pcbRead = cbRead;
        else
        {
            /* Caller expects everything to be read. */
            while (cbToReadAdj > cbRead)
            {
                ULONG cbReadPart = 0;
                if (!ReadFile((HANDLE)RTFileToNative(hFile), (char*)pvBuf + cbRead, cbToReadAdj - cbRead, &cbReadPart, NULL))
                    return RTErrConvertFromWin32(GetLastError());
                if (cbReadPart == 0)
                    return VERR_EOF;
                cbRead += cbReadPart;
            }
        }
        return VINF_SUCCESS;
    }

    /*
     * If it's a console, we might bump into out of memory conditions in the
     * ReadConsole call.
     */
    DWORD dwErr = GetLastError();
    if (dwErr == ERROR_NOT_ENOUGH_MEMORY)
    {
        ULONG cbChunk = cbToReadAdj / 2;
        if (cbChunk > 16*_1K)
            cbChunk = 16*_1K;
        else
            cbChunk = RT_ALIGN_32(cbChunk, 256);

        cbRead = 0;
        while (cbToReadAdj > cbRead)
        {
            ULONG cbToRead   = RT_MIN(cbChunk, cbToReadAdj - cbRead);
            ULONG cbReadPart = 0;
            if (!ReadFile((HANDLE)RTFileToNative(hFile), (char *)pvBuf + cbRead, cbToRead, &cbReadPart, NULL))
            {
                /* If we failed because the buffer is too big, shrink it and
                   try again. */
                dwErr = GetLastError();
                if (   dwErr == ERROR_NOT_ENOUGH_MEMORY
                    && cbChunk > 8)
                {
                    cbChunk /= 2;
                    continue;
                }
                return RTErrConvertFromWin32(dwErr);
            }
            cbRead += cbReadPart;

            /* Return if the caller can handle partial reads, otherwise try
               fill the buffer all the way up. */
            if (pcbRead)
            {
                *pcbRead = cbRead;
                break;
            }
            if (cbReadPart == 0)
                return VERR_EOF;
        }
        return VINF_SUCCESS;
    }

    return RTErrConvertFromWin32(dwErr);
}
Esempio n. 3
0
/**
 * Get get length in code points of an UTF-16 encoded string, validating the
 * string while doing so.
 *
 * @returns IPRT status code.
 * @param   pwsz            Pointer to the UTF-16 string.
 * @param   cwc             The max length of the string in UTF-16 units.  Use
 *                          RTSTR_MAX if all of the string is to be examined.
 * @param   pcuc            Where to store the length in unicode code points.
 * @param   pcwcActual      Where to store the actual size of the UTF-16 string
 *                          on success. Optional.
 */
static int rtUtf16Length(PCRTUTF16 pwsz, size_t cwc, size_t *pcuc, size_t *pcwcActual)
{
    PCRTUTF16 pwszStart   = pwsz;
    size_t    cCodePoints = 0;
    while (cwc > 0)
    {
        RTUTF16 wc = *pwsz;
        if (!wc)
            break;
        if (wc < 0xd800 || wc > 0xdfff)
        {
            cCodePoints++;
            pwsz++;
            cwc--;
        }
        /* Surrogate pair: */
        else if (wc >= 0xdc00)
        {
            RTStrAssertMsgFailed(("Lone UTF-16 trail surrogate: %#x (%.*Rhxs)\n", wc, RT_MIN(cwc * 2, 10), pwsz));
            return VERR_INVALID_UTF16_ENCODING;
        }
        else if (cwc < 2)
        {
            RTStrAssertMsgFailed(("Lone UTF-16 lead surrogate: %#x\n", wc));
            return VERR_INVALID_UTF16_ENCODING;
        }
        else
        {
            RTUTF16 wcTrail = pwsz[1];
            if (wcTrail < 0xdc00 || wcTrail > 0xdfff)
            {
                RTStrAssertMsgFailed(("Invalid UTF-16 trail surrogate: %#x (lead %#x)\n", wcTrail, wc));
                return VERR_INVALID_UTF16_ENCODING;
            }

            cCodePoints++;
            pwsz += 2;
            cwc -= 2;
        }
    }

    /* done */
    *pcuc = cCodePoints;
    if (pcwcActual)
        *pcwcActual = pwsz - pwszStart;
    return VINF_SUCCESS;
}
Esempio n. 4
0
/**
 * Discards the given ranges from the disk.
 *
 * @returns VBox status code.
 * @param   pThis    Disk integrity driver instance data.
 * @param   paRanges Array of ranges to discard.
 * @param   cRanges  Number of ranges in the array.
 */
static int drvdiskintDiscardRecords(PDRVDISKINTEGRITY pThis, PCRTRANGE paRanges, unsigned cRanges)
{
    int rc = VINF_SUCCESS;

    LogFlowFunc(("pThis=%#p paRanges=%#p cRanges=%u\n", pThis, paRanges, cRanges));

    for (unsigned i = 0; i < cRanges; i++)
    {
        uint64_t offStart = paRanges[i].offStart;
        size_t cbLeft = paRanges[i].cbRange;

        LogFlowFunc(("Discarding off=%llu cbRange=%zu\n", offStart, cbLeft));

        while (cbLeft)
        {
            size_t cbRange;
            PDRVDISKSEGMENT pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetRangeGet(pThis->pTreeSegments, offStart);

            if (!pSeg)
            {
                /* Get next segment */
                pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetGetBestFit(pThis->pTreeSegments, offStart, true);
                if (   !pSeg
                    || (RTFOFF)offStart + (RTFOFF)cbLeft <= pSeg->Core.Key)
                    cbRange = cbLeft;
                else
                    cbRange = pSeg->Core.Key - offStart;

                Assert(!(cbRange % 512));
            }
            else
            {
                size_t cbPreLeft, cbPostLeft;

                cbRange    = RT_MIN(cbLeft, pSeg->Core.KeyLast - offStart + 1);
                cbPreLeft  = offStart - pSeg->Core.Key;
                cbPostLeft = pSeg->cbSeg - cbRange - cbPreLeft;

                Assert(!(cbRange % 512));
                Assert(!(cbPreLeft % 512));
                Assert(!(cbPostLeft % 512));

                LogFlowFunc(("cbRange=%zu cbPreLeft=%zu cbPostLeft=%zu\n",
                             cbRange, cbPreLeft, cbPostLeft));

                RTAvlrFileOffsetRemove(pThis->pTreeSegments, pSeg->Core.Key);

                if (!cbPreLeft && !cbPostLeft)
                {
                    /* Just free the whole segment. */
                    LogFlowFunc(("Freeing whole segment pSeg=%#p\n", pSeg));
                    RTMemFree(pSeg->pbSeg);
                    for (unsigned idx = 0; idx < pSeg->cIoLogEntries; idx++)
                        drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]);
                    RTMemFree(pSeg);
                }
                else if (cbPreLeft && !cbPostLeft)
                {
                    /* Realloc to new size and insert. */
                    LogFlowFunc(("Realloc segment pSeg=%#p\n", pSeg));
                    pSeg->pbSeg = (uint8_t *)RTMemRealloc(pSeg->pbSeg, cbPreLeft);
                    for (unsigned idx = cbPreLeft / 512; idx < pSeg->cIoLogEntries; idx++)
                        drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]);
                    pSeg = (PDRVDISKSEGMENT)RTMemRealloc(pSeg, RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPreLeft / 512]));
                    pSeg->Core.KeyLast = pSeg->Core.Key + cbPreLeft - 1;
                    pSeg->cbSeg = cbPreLeft;
                    pSeg->cIoLogEntries = cbPreLeft / 512;
                    bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core);
                    Assert(fInserted);
                }
                else if (!cbPreLeft && cbPostLeft)
                {
                    /* Move data to the front and realloc. */
                    LogFlowFunc(("Move data and realloc segment pSeg=%#p\n", pSeg));
                    memmove(pSeg->pbSeg, pSeg->pbSeg + cbRange, cbPostLeft);
                    for (unsigned idx = 0; idx < cbRange / 512; idx++)
                        drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]);
                    for (unsigned idx = 0; idx < cbPostLeft /512; idx++)
                        pSeg->apIoLog[idx] = pSeg->apIoLog[(cbRange / 512) + idx];
                    pSeg = (PDRVDISKSEGMENT)RTMemRealloc(pSeg, RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPostLeft / 512]));
                    pSeg->pbSeg = (uint8_t *)RTMemRealloc(pSeg->pbSeg, cbPostLeft);
                    pSeg->Core.Key += cbRange;
                    pSeg->cbSeg = cbPostLeft;
                    pSeg->cIoLogEntries = cbPostLeft / 512;
                    bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core);
                    Assert(fInserted);
                }
                else
                {
                    /* Split the segment into 2 new segments. */
                    LogFlowFunc(("Split segment pSeg=%#p\n", pSeg));
                    PDRVDISKSEGMENT pSegPost = (PDRVDISKSEGMENT)RTMemAllocZ(RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPostLeft / 512]));
                    if (pSegPost)
                    {
                        pSegPost->Core.Key      = pSeg->Core.Key + cbPreLeft + cbRange;
                        pSegPost->Core.KeyLast  = pSeg->Core.KeyLast;
                        pSegPost->cbSeg         = cbPostLeft;
                        pSegPost->pbSeg         = (uint8_t *)RTMemAllocZ(cbPostLeft);
                        pSegPost->cIoLogEntries = cbPostLeft / 512;
                        if (!pSegPost->pbSeg)
                            RTMemFree(pSegPost);
                        else
                        {
                            memcpy(pSegPost->pbSeg, pSeg->pbSeg + cbPreLeft + cbRange, cbPostLeft);
                            for (unsigned idx = 0; idx < cbPostLeft / 512; idx++)
                                pSegPost->apIoLog[idx] = pSeg->apIoLog[((cbPreLeft + cbRange) / 512) + idx];

                            bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSegPost->Core);
                            Assert(fInserted);
                        }
                    }

                    /* Shrink the current segment. */
                    pSeg->pbSeg = (uint8_t *)RTMemRealloc(pSeg->pbSeg, cbPreLeft);
                    for (unsigned idx = cbPreLeft / 512; idx < (cbPreLeft + cbRange) / 512; idx++)
                        drvdiskintIoLogEntryRelease(pSeg->apIoLog[idx]);
                    pSeg = (PDRVDISKSEGMENT)RTMemRealloc(pSeg, RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbPreLeft / 512]));
                    pSeg->Core.KeyLast = pSeg->Core.Key + cbPreLeft - 1;
                    pSeg->cbSeg = cbPreLeft;
                    pSeg->cIoLogEntries = cbPreLeft / 512;
                    bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core);
                    Assert(fInserted);
                } /* if (cbPreLeft && cbPostLeft) */
            }

            offStart += cbRange;
            cbLeft   -= cbRange;
        }
    }

    LogFlowFunc(("returns rc=%Rrc\n", rc));
    return rc;
}
Esempio n. 5
0
RTDECL(int) RTSymlinkCreate(const char *pszSymlink, const char *pszTarget, RTSYMLINKTYPE enmType, uint32_t fCreate)
{
    /*
     * Validate the input.
     */
    AssertReturn(enmType > RTSYMLINKTYPE_INVALID && enmType < RTSYMLINKTYPE_END, VERR_INVALID_PARAMETER);
    AssertPtrReturn(pszSymlink, VERR_INVALID_POINTER);
    AssertPtrReturn(pszTarget, VERR_INVALID_POINTER);

    /*
     * Resolve the API.
     */
    typedef BOOLEAN (WINAPI *PFNCREATESYMBOLICLINKW)(LPCWSTR, LPCWSTR, DWORD);
    static PFNCREATESYMBOLICLINKW   s_pfnCreateSymbolicLinkW = NULL;
    static bool                     s_fTried = FALSE;
    if (!s_fTried)
    {
        HMODULE hmod = LoadLibrary("KERNEL32.DLL");
        if (hmod)
        {
            PFNCREATESYMBOLICLINKW pfn = (PFNCREATESYMBOLICLINKW)GetProcAddress(hmod, "CreateSymbolicLinkW");
            if (pfn)
                s_pfnCreateSymbolicLinkW = pfn;
        }
        s_fTried = true;
    }
    if (!s_pfnCreateSymbolicLinkW)
    {
        LogFlow(("RTSymlinkCreate(%p={%s}, %p={%s}, %d, %#x): returns VERR_NOT_SUPPORTED - Windows API not found\n",
                 pszSymlink, pszSymlink, pszTarget, pszTarget, enmType, fCreate));
        return VERR_NOT_SUPPORTED;
    }

    /*
     * Convert the paths.
     */
    PRTUTF16 pwszNativeSymlink;
    int rc = RTStrToUtf16(pszSymlink, &pwszNativeSymlink);
    if (RT_SUCCESS(rc))
    {
        PRTUTF16 pwszNativeTarget;
        rc = RTStrToUtf16(pszTarget, &pwszNativeTarget);
        if (RT_SUCCESS(rc))
        {
            /*
             * Massage the target path, determin the link type.
             */
            size_t cchTarget        = strlen(pszTarget);
            size_t cchVolSpecTarget = rtPathVolumeSpecLen(pszTarget);
#if 0 /* looks like this isn't needed after all. That makes everything much simper :-) */
            if (   cchTarget > RT_MIN(cchVolSpecTarget, 1)
                && RTPATH_IS_SLASH(pszTarget[cchTarget - 1]))
            {
                size_t cwcNativeTarget = RTUtf16Len(pwszNativeTarget);
                size_t offFromEnd = 1;
                while (   offFromEnd < cchTarget
                       && cchTarget - offFromEnd >= cchVolSpecTarget
                       && RTPATH_IS_SLASH(pszTarget[cchTarget - offFromEnd]))
                {
                    Assert(offFromEnd < cwcNativeTarget);
                    pwszNativeTarget[cwcNativeTarget - offFromEnd] = 0;
                    offFromEnd++;
                }
            }
#endif

            if (enmType == RTSYMLINKTYPE_UNKNOWN)
            {
                if (   cchTarget > cchVolSpecTarget
                    && RTPATH_IS_SLASH(pszTarget[cchTarget - 1]))
                    enmType = RTSYMLINKTYPE_DIR;
                else if (cchVolSpecTarget)
                {
                    /** @todo this is subject to sharing violations. */
                    DWORD dwAttr = GetFileAttributesW(pwszNativeTarget);
                    if (   dwAttr != INVALID_FILE_ATTRIBUTES
                        && (dwAttr & FILE_ATTRIBUTE_DIRECTORY))
                        enmType = RTSYMLINKTYPE_DIR;
                }
                else
                {
                    /** @todo Join the symlink directory with the target and
                     *        look up the attributes on that. -lazy bird. */
                }
            }

            /*
             * Create the link.
             */
            if (s_pfnCreateSymbolicLinkW(pwszNativeSymlink, pwszNativeTarget, enmType == RTSYMLINKTYPE_DIR))
                rc = VINF_SUCCESS;
            else
                rc = RTErrConvertFromWin32(GetLastError());

            RTUtf16Free(pwszNativeTarget);
        }
        RTUtf16Free(pwszNativeSymlink);
    }

    LogFlow(("RTSymlinkCreate(%p={%s}, %p={%s}, %d, %#x): returns %Rrc\n", pszSymlink, pszSymlink, pszTarget, pszTarget, enmType, fCreate, rc));
    return rc;
}
DECLHIDDEN(int) drvHostBaseScsiCmdOs(PDRVHOSTBASE pThis, const uint8_t *pbCmd, size_t cbCmd, PDMMEDIATXDIR enmTxDir,
                                     void *pvBuf, uint32_t *pcbBuf, uint8_t *pbSense, size_t cbSense, uint32_t cTimeoutMillies)
{
    /*
     * Minimal input validation.
     */
    Assert(enmTxDir == PDMMEDIATXDIR_NONE || enmTxDir == PDMMEDIATXDIR_FROM_DEVICE || enmTxDir == PDMMEDIATXDIR_TO_DEVICE);
    Assert(!pvBuf || pcbBuf);
    Assert(pvBuf || enmTxDir == PDMMEDIATXDIR_NONE);
    Assert(pbSense || !cbSense);
    RT_NOREF(cbSense);
    AssertPtr(pbCmd);
    Assert(cbCmd <= 16 && cbCmd >= 1);

    /* Allocate the temporary buffer lazily. */
    if(RT_UNLIKELY(!pThis->Os.pbDoubleBuffer))
    {
        pThis->Os.pbDoubleBuffer = (uint8_t *)RTMemAlloc(SCSI_MAX_BUFFER_SIZE);
        if (!pThis->Os.pbDoubleBuffer)
            return VERR_NO_MEMORY;
    }

    int rc = VERR_GENERAL_FAILURE;
    int direction;
    struct cdrom_generic_command cgc;

    switch (enmTxDir)
    {
    case PDMMEDIATXDIR_NONE:
        Assert(*pcbBuf == 0);
        direction = CGC_DATA_NONE;
        break;
    case PDMMEDIATXDIR_FROM_DEVICE:
        Assert(*pcbBuf != 0);
        Assert(*pcbBuf <= SCSI_MAX_BUFFER_SIZE);
        /* Make sure that the buffer is clear for commands reading
         * data. The actually received data may be shorter than what
         * we expect, and due to the unreliable feedback about how much
         * data the ioctl actually transferred, it's impossible to
         * prevent that. Returning previous buffer contents may cause
         * security problems inside the guest OS, if users can issue
         * commands to the CDROM device. */
        memset(pThis->Os.pbDoubleBuffer, '\0', *pcbBuf);
        direction = CGC_DATA_READ;
        break;
    case PDMMEDIATXDIR_TO_DEVICE:
        Assert(*pcbBuf != 0);
        Assert(*pcbBuf <= SCSI_MAX_BUFFER_SIZE);
        memcpy(pThis->Os.pbDoubleBuffer, pvBuf, *pcbBuf);
        direction = CGC_DATA_WRITE;
        break;
    default:
        AssertMsgFailed(("enmTxDir invalid!\n"));
        direction = CGC_DATA_NONE;
    }
    memset(&cgc, '\0', sizeof(cgc));
    memcpy(cgc.cmd, pbCmd, RT_MIN(CDROM_PACKET_SIZE, cbCmd));
    cgc.buffer = (unsigned char *)pThis->Os.pbDoubleBuffer;
    cgc.buflen = *pcbBuf;
    cgc.stat = 0;
    Assert(cbSense >= sizeof(struct request_sense));
    cgc.sense = (struct request_sense *)pbSense;
    cgc.data_direction = direction;
    cgc.quiet = false;
    cgc.timeout = cTimeoutMillies;
    rc = ioctl(RTFileToNative(pThis->Os.hFileDevice), CDROM_SEND_PACKET, &cgc);
    if (rc < 0)
    {
        if (errno == EBUSY)
            rc = VERR_PDM_MEDIA_LOCKED;
        else if (errno == ENOSYS)
            rc = VERR_NOT_SUPPORTED;
        else
        {
            rc = RTErrConvertFromErrno(errno);
            if (rc == VERR_ACCESS_DENIED && cgc.sense->sense_key == SCSI_SENSE_NONE)
                cgc.sense->sense_key = SCSI_SENSE_ILLEGAL_REQUEST;
            Log2(("%s: error status %d, rc=%Rrc\n", __FUNCTION__, cgc.stat, rc));
        }
    }
    switch (enmTxDir)
    {
    case PDMMEDIATXDIR_FROM_DEVICE:
        memcpy(pvBuf, pThis->Os.pbDoubleBuffer, *pcbBuf);
        break;
    default:
        ;
    }
    Log2(("%s: after ioctl: cgc.buflen=%d txlen=%d\n", __FUNCTION__, cgc.buflen, *pcbBuf));
    /* The value of cgc.buflen does not reliably reflect the actual amount
     * of data transferred (for packet commands with little data transfer
     * it's 0). So just assume that everything worked ok. */

    return rc;
}
Esempio n. 7
0
/**
 * Record a successful write to the virtual disk.
 *
 * @returns VBox status code.
 * @param   pThis    Disk integrity driver instance data.
 * @param   paSeg    Segment array of the write to record.
 * @param   cSeg     Number of segments.
 * @param   off      Start offset.
 * @param   cbWrite  Number of bytes to record.
 */
static int drvdiskintWriteRecord(PDRVDISKINTEGRITY pThis, PCRTSGSEG paSeg, unsigned cSeg,
                                 uint64_t off, size_t cbWrite)
{
    int rc = VINF_SUCCESS;

    LogFlowFunc(("pThis=%#p paSeg=%#p cSeg=%u off=%llx cbWrite=%u\n",
                 pThis, paSeg, cSeg, off, cbWrite));

    /* Update the segments */
    size_t cbLeft   = cbWrite;
    RTFOFF offCurr  = (RTFOFF)off;
    RTSGBUF SgBuf;
    PIOLOGENT pIoLogEnt = (PIOLOGENT)RTMemAllocZ(sizeof(IOLOGENT));
    if (!pIoLogEnt)
        return VERR_NO_MEMORY;

    pIoLogEnt->off     = off;
    pIoLogEnt->cbWrite = cbWrite;
    pIoLogEnt->cRefs   = 0;

    RTSgBufInit(&SgBuf, paSeg, cSeg);

    while (cbLeft)
    {
        PDRVDISKSEGMENT pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetRangeGet(pThis->pTreeSegments, offCurr);
        size_t cbRange  = 0;
        bool fSet       = false;
        unsigned offSeg = 0;

        if (!pSeg)
        {
            /* Get next segment */
            pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetGetBestFit(pThis->pTreeSegments, offCurr, true);
            if (   !pSeg
                || offCurr + (RTFOFF)cbLeft <= pSeg->Core.Key)
                cbRange = cbLeft;
            else
                cbRange = pSeg->Core.Key - offCurr;

            Assert(cbRange % 512 == 0);

            /* Create new segment */
            pSeg = (PDRVDISKSEGMENT)RTMemAllocZ(RT_OFFSETOF(DRVDISKSEGMENT, apIoLog[cbRange / 512]));
            if (pSeg)
            {
                pSeg->Core.Key      = offCurr;
                pSeg->Core.KeyLast  = offCurr + (RTFOFF)cbRange - 1;
                pSeg->cbSeg         = cbRange;
                pSeg->pbSeg         = (uint8_t *)RTMemAllocZ(cbRange);
                pSeg->cIoLogEntries = cbRange / 512;
                if (!pSeg->pbSeg)
                    RTMemFree(pSeg);
                else
                {
                    bool fInserted = RTAvlrFileOffsetInsert(pThis->pTreeSegments, &pSeg->Core);
                    AssertMsg(fInserted, ("Bug!\n"));
                    fSet = true;
                }
            }
        }
        else
        {
            fSet    = true;
            offSeg  = offCurr - pSeg->Core.Key;
            cbRange = RT_MIN(cbLeft, (size_t)(pSeg->Core.KeyLast + 1 - offCurr));
        }

        if (fSet)
        {
            AssertPtr(pSeg);
            size_t cbCopied = RTSgBufCopyToBuf(&SgBuf, pSeg->pbSeg + offSeg, cbRange);
            Assert(cbCopied == cbRange);

            /* Update the I/O log pointers */
            Assert(offSeg % 512 == 0);
            Assert(cbRange % 512 == 0);
            while (offSeg < cbRange)
            {
                uint32_t uSector = offSeg / 512;
                PIOLOGENT pIoLogOld = NULL;

                AssertMsg(uSector < pSeg->cIoLogEntries, ("Internal bug!\n"));

                pIoLogOld = pSeg->apIoLog[uSector];
                if (pIoLogOld)
                {
                    pIoLogOld->cRefs--;
                    if (!pIoLogOld->cRefs)
                        RTMemFree(pIoLogOld);
                }

                pSeg->apIoLog[uSector] = pIoLogEnt;
                pIoLogEnt->cRefs++;

                offSeg += 512;
            }
        }
        else
            RTSgBufAdvance(&SgBuf, cbRange);

        offCurr += cbRange;
        cbLeft  -= cbRange;
    }

    return rc;
}
static HRESULT WINAPI IDirect3D9Impl_GetDeviceCaps(LPDIRECT3D9EX iface, UINT Adapter, D3DDEVTYPE DeviceType, D3DCAPS9* pCaps) {
    IDirect3D9Impl *This = (IDirect3D9Impl *)iface;
    HRESULT hrc = D3D_OK;
    WINED3DCAPS *pWineCaps;

    TRACE("iface %p, adapter %u, device_type %#x, caps %p.\n", iface, Adapter, DeviceType, pCaps);

    if(NULL == pCaps){
        return D3DERR_INVALIDCALL;
    }
    pWineCaps = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(WINED3DCAPS));
    if(pWineCaps == NULL){
        return D3DERR_INVALIDCALL; /*well this is what MSDN says to return*/
    }
    memset(pCaps, 0, sizeof(*pCaps));

    wined3d_mutex_lock();
    hrc = IWineD3D_GetDeviceCaps(This->WineD3D, Adapter, DeviceType, pWineCaps);
    wined3d_mutex_unlock();

    WINECAPSTOD3D9CAPS(pCaps, pWineCaps)
    HeapFree(GetProcessHeap(), 0, pWineCaps);

    /* Some functionality is implemented in d3d9.dll, not wined3d.dll. Add the needed caps */
    pCaps->DevCaps2 |= D3DDEVCAPS2_CAN_STRETCHRECT_FROM_TEXTURES;

    filter_caps(pCaps);

    /* fixup caps  */
#ifdef VBOX_WITH_WDDM
    /* needed for Windows Media Player to work properly */
    pCaps->Caps |= D3DCAPS_READ_SCANLINE;
    pCaps->Caps2 |= 0x00080000 /*D3DCAPS2_CANRENDERWINDOWED*/;
    pCaps->Caps2 |= D3DCAPS2_CANSHARERESOURCE;
    pCaps->DevCaps |= D3DDEVCAPS_FLOATTLVERTEX /* <- must be set according to the docs */
            /*| D3DDEVCAPS_HWVERTEXBUFFER | D3DDEVCAPS_HWINDEXBUFFER |  D3DDEVCAPS_SUBVOLUMELOCK */;
    pCaps->PrimitiveMiscCaps |= D3DPMISCCAPS_INDEPENDENTWRITEMASKS
            | D3DPMISCCAPS_FOGINFVF
            | D3DPMISCCAPS_SEPARATEALPHABLEND | D3DPMISCCAPS_MRTINDEPENDENTBITDEPTHS;
    pCaps->RasterCaps |= D3DPRASTERCAPS_SUBPIXEL | D3DPRASTERCAPS_STIPPLE | D3DPRASTERCAPS_ZBIAS | D3DPRASTERCAPS_COLORPERSPECTIVE /* keep */;
    pCaps->TextureCaps |= D3DPTEXTURECAPS_TRANSPARENCY | D3DPTEXTURECAPS_TEXREPEATNOTSCALEDBYSIZE;
    pCaps->TextureAddressCaps |= D3DPTADDRESSCAPS_MIRRORONCE;
    pCaps->VolumeTextureAddressCaps |= D3DPTADDRESSCAPS_MIRRORONCE;
    pCaps->StencilCaps |= D3DSTENCILCAPS_TWOSIDED;
    pCaps->DeclTypes |= D3DDTCAPS_FLOAT16_2 | D3DDTCAPS_FLOAT16_4;
    pCaps->VertexTextureFilterCaps |= D3DPTFILTERCAPS_MINFPOINT | D3DPTFILTERCAPS_MAGFPOINT;
    pCaps->GuardBandLeft = -8192.;
    pCaps->GuardBandTop = -8192.;
    pCaps->GuardBandRight = 8192.;
    pCaps->GuardBandBottom = 8192.;
    pCaps->VS20Caps.DynamicFlowControlDepth = 24;
    pCaps->VS20Caps.NumTemps = D3DVS20_MAX_NUMTEMPS;
    pCaps->PS20Caps.DynamicFlowControlDepth = 24;
    pCaps->PS20Caps.NumTemps = D3DVS20_MAX_NUMTEMPS;
#endif
    /* workaround for wine not returning InstructionSlots correctly for  shaders v3.0 */
    if ((pCaps->VertexShaderVersion & 0xff00) == 0x0300)
    {
        pCaps->MaxVertexShader30InstructionSlots = RT_MIN(32768, pCaps->MaxVertexShader30InstructionSlots);
        pCaps->MaxPixelShader30InstructionSlots = RT_MIN(32768, pCaps->MaxPixelShader30InstructionSlots);
    }
#if defined(DEBUG)
    if ((pCaps->VertexShaderVersion & 0xff00) == 0x0300)
    {
        ASSERT_D3D(pCaps->MaxVertexShader30InstructionSlots >= 512);
        ASSERT_D3D(pCaps->MaxVertexShader30InstructionSlots <= 32768);
        ASSERT_D3D(pCaps->MaxPixelShader30InstructionSlots >= 512);
        ASSERT_D3D(pCaps->MaxPixelShader30InstructionSlots <= 32768);
    }
    else if ((pCaps->VertexShaderVersion & 0xff00) == 0x0200)
    {
        ASSERT_D3D(pCaps->MaxVertexShader30InstructionSlots == 0);
        ASSERT_D3D(pCaps->MaxPixelShader30InstructionSlots == 0);
    }
    else
    {
        ERR_D3D();
    }
#endif

    TRACE("(%p) returning %p\n", This, pCaps);

    ASSERT_D3D(hrc == S_OK);
    return hrc;
}
Esempio n. 9
0
/* Synchronously obtain a standard USB descriptor for a device, used in order
 * to grab configuration descriptors when we first add the device
 */
static void *GetStdDescSync(PUSBPROXYDEV pProxyDev, uint8_t iDescType, uint8_t iIdx, uint16_t LangId, uint16_t cbHint)
{
#define GET_DESC_RETRIES 6
    int cRetries = 0;
    uint16_t cbInitialHint = cbHint;

    LogFlow(("GetStdDescSync: pProxyDev=%s\n", pProxyDev->pUsbIns->pszName));
    for (;;)
    {
        /*
         * Setup a MSG URB, queue and reap it.
         */
        int rc = VINF_SUCCESS;
        VUSBURB Urb;
        AssertCompile(RT_SIZEOFMEMB(VUSBURB, abData) >= _4K);
        Urb.u32Magic      = VUSBURB_MAGIC;
        Urb.enmState      = VUSBURBSTATE_IN_FLIGHT;
        Urb.pszDesc       = (char*)"URB sync";
        Urb.pHci          = NULL;
        Urb.paTds         = NULL;
        Urb.Dev.pvPrivate = NULL;
        Urb.Dev.pNext     = NULL;
        Urb.DstAddress    = 0;
        Urb.EndPt         = 0;
        Urb.enmType       = VUSBXFERTYPE_MSG;
        Urb.enmDir        = VUSBDIRECTION_IN;
        Urb.fShortNotOk   = false;
        Urb.enmStatus     = VUSBSTATUS_INVALID;
        Urb.pVUsb         = NULL;
        cbHint = RT_MIN(cbHint, sizeof(Urb.abData) - sizeof(VUSBSETUP));
        Urb.cbData = cbHint + sizeof(VUSBSETUP);

        PVUSBSETUP pSetup = (PVUSBSETUP)Urb.abData;
        pSetup->bmRequestType = VUSB_DIR_TO_HOST | VUSB_REQ_STANDARD | VUSB_TO_DEVICE;
        pSetup->bRequest = VUSB_REQ_GET_DESCRIPTOR;
        pSetup->wValue = (iDescType << 8) | iIdx;
        pSetup->wIndex = LangId;
        pSetup->wLength = cbHint;

        uint8_t *pbDesc = (uint8_t *)(pSetup + 1);
        uint32_t cbDesc = 0;
        PVUSBURB pUrbReaped = NULL;

        rc = pProxyDev->pOps->pfnUrbQueue(pProxyDev, &Urb);
        if (RT_FAILURE(rc))
        {
            Log(("GetStdDescSync: pfnUrbReap failed, rc=%d\n", rc));
            goto err;
        }

        /* Don't wait forever, it's just a simple request that should
           return immediately. Since we're executing in the EMT thread
           it's important not to get stuck here. (Some of the builtin
           iMac devices may refuse to respond for instance.) */
        pUrbReaped = pProxyDev->pOps->pfnUrbReap(pProxyDev, 5000 /* ms */);
        if (!pUrbReaped)
        {
            rc = pProxyDev->pOps->pfnUrbCancel(pProxyDev, &Urb);
            AssertRC(rc);
            /** @todo: This breaks the comment above... */
            pUrbReaped = pProxyDev->pOps->pfnUrbReap(pProxyDev, RT_INDEFINITE_WAIT);
        }
        if (pUrbReaped != &Urb)
        {
            Log(("GetStdDescSync: pfnUrbReap failed, pUrbReaped=%p\n", pUrbReaped));
            goto err;
        }

        if (Urb.enmStatus != VUSBSTATUS_OK)
        {
            Log(("GetStdDescSync: Urb.enmStatus=%d\n", Urb.enmStatus));
            goto err;
        }

        /*
         * Check the length, config descriptors have total_length field
         */
        if (iDescType == VUSB_DT_CONFIG)
        {
            if (Urb.cbData < sizeof(VUSBSETUP) + 4)
            {
                Log(("GetStdDescSync: Urb.cbData=%#x (min 4)\n", Urb.cbData));
                goto err;
            }
            cbDesc = RT_LE2H_U16(((uint16_t *)pbDesc)[1]);
        }
        else
        {
            if (Urb.cbData < sizeof(VUSBSETUP) + 1)
            {
                Log(("GetStdDescSync: Urb.cbData=%#x (min 1)\n", Urb.cbData));
                goto err;
            }
            cbDesc = ((uint8_t *)pbDesc)[0];
        }

        Log(("GetStdDescSync: got Urb.cbData=%u, cbDesc=%u cbHint=%u\n", Urb.cbData, cbDesc, cbHint));

        if (    Urb.cbData == cbHint + sizeof(VUSBSETUP)
            &&  cbDesc > Urb.cbData - sizeof(VUSBSETUP))
        {
            cbHint = cbDesc;
            Log(("GetStdDescSync: Part descriptor, Urb.cbData=%u, cbDesc=%u cbHint=%u\n", Urb.cbData, cbDesc, cbHint));

            if (cbHint > sizeof(Urb.abData))
                Log(("GetStdDescSync: cbHint=%u, Urb.abData=%u\n", cbHint, sizeof(Urb.abData)));

            goto err;
        }

        if ((cbDesc > (Urb.cbData - sizeof(VUSBSETUP))))
        {
            Log(("GetStdDescSync: Descriptor length too short, cbDesc=%u, Urb.cbData=%u\n", cbDesc, Urb.cbData));
            goto err;
        }

        if ((cbInitialHint != cbHint) &&
        	((cbDesc != cbHint) || (Urb.cbData < cbInitialHint)))
        {
            Log(("GetStdDescSync: Descriptor length incorrect, cbDesc=%u, Urb.cbData=%u, cbHint=%u\n", cbDesc, Urb.cbData, cbHint));
            goto err;
        }

#ifdef LOG_ENABLED
        vusbUrbTrace(&Urb, "GetStdDescSync", true);
#endif

        /*
         * Fine, we got everything return a heap duplicate of the descriptor.
         */
        return RTMemDup(pbDesc, cbDesc);

err:
        cRetries++;
        if (cRetries < GET_DESC_RETRIES)
        {
            Log(("GetStdDescSync: Retrying %u/%u\n", cRetries, GET_DESC_RETRIES));
            RTThreadSleep(100);
            continue;
        }
        else
        {
            Log(("GetStdDescSync: Retries exceeded %u/%u. Giving up.\n", cRetries, GET_DESC_RETRIES));
            break;
        }
    }

    return NULL;
}
Esempio n. 10
0
static HRESULT vboxWddmGetD3D9Caps(PVBOXWDDMDISP_D3D pD3D, D3DCAPS9 *pCaps)
{
    HRESULT hr = pD3D->pD3D9If->GetDeviceCaps(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, pCaps);
    if (FAILED(hr))
    {
        WARN(("GetDeviceCaps failed hr(0x%x)",hr));
        return hr;
    }

    /* needed for Windows Media Player to work properly */
    pCaps->Caps |= D3DCAPS_READ_SCANLINE;
    pCaps->Caps2 |= 0x00080000 /*D3DCAPS2_CANRENDERWINDOWED*/;
    pCaps->Caps2 |= D3DCAPS2_CANSHARERESOURCE;
    pCaps->DevCaps |= D3DDEVCAPS_FLOATTLVERTEX /* <- must be set according to the docs */
            /*| D3DDEVCAPS_HWVERTEXBUFFER | D3DDEVCAPS_HWINDEXBUFFER |  D3DDEVCAPS_SUBVOLUMELOCK */;
    pCaps->PrimitiveMiscCaps |= D3DPMISCCAPS_INDEPENDENTWRITEMASKS
            | D3DPMISCCAPS_FOGINFVF
            | D3DPMISCCAPS_SEPARATEALPHABLEND | D3DPMISCCAPS_MRTINDEPENDENTBITDEPTHS;
    pCaps->RasterCaps |= D3DPRASTERCAPS_SUBPIXEL | D3DPRASTERCAPS_STIPPLE | D3DPRASTERCAPS_ZBIAS | D3DPRASTERCAPS_COLORPERSPECTIVE /* keep */;
    pCaps->TextureCaps |= D3DPTEXTURECAPS_TRANSPARENCY | D3DPTEXTURECAPS_TEXREPEATNOTSCALEDBYSIZE;
    pCaps->TextureAddressCaps |= D3DPTADDRESSCAPS_MIRRORONCE;
    pCaps->VolumeTextureAddressCaps |= D3DPTADDRESSCAPS_MIRRORONCE;
    pCaps->StencilCaps |= D3DSTENCILCAPS_TWOSIDED;
    pCaps->DeclTypes |= D3DDTCAPS_FLOAT16_2 | D3DDTCAPS_FLOAT16_4;
    pCaps->VertexTextureFilterCaps |= D3DPTFILTERCAPS_MINFPOINT | D3DPTFILTERCAPS_MAGFPOINT;
    pCaps->GuardBandLeft = -8192.;
    pCaps->GuardBandTop = -8192.;
    pCaps->GuardBandRight = 8192.;
    pCaps->GuardBandBottom = 8192.;
    pCaps->VS20Caps.DynamicFlowControlDepth = 24;
    pCaps->VS20Caps.NumTemps = D3DVS20_MAX_NUMTEMPS;
    pCaps->PS20Caps.DynamicFlowControlDepth = 24;
    pCaps->PS20Caps.NumTemps = D3DVS20_MAX_NUMTEMPS;

    /* workaround for wine not returning InstructionSlots correctly for  shaders v3.0 */
    if ((pCaps->VertexShaderVersion & 0xff00) == 0x0300)
    {
        pCaps->MaxVertexShader30InstructionSlots = RT_MIN(32768, pCaps->MaxVertexShader30InstructionSlots);
        pCaps->MaxPixelShader30InstructionSlots = RT_MIN(32768, pCaps->MaxPixelShader30InstructionSlots);
    }
#if defined(DEBUG)
    if ((pCaps->VertexShaderVersion & 0xff00) == 0x0300)
    {
        Assert(pCaps->MaxVertexShader30InstructionSlots >= 512);
        Assert(pCaps->MaxVertexShader30InstructionSlots <= 32768);
        Assert(pCaps->MaxPixelShader30InstructionSlots >= 512);
        Assert(pCaps->MaxPixelShader30InstructionSlots <= 32768);
    }
    else if ((pCaps->VertexShaderVersion & 0xff00) == 0x0200)
    {
        Assert(pCaps->MaxVertexShader30InstructionSlots == 0);
        Assert(pCaps->MaxPixelShader30InstructionSlots == 0);
    }
    else
    {
        WARN(("incorect shader caps!"));
    }
#endif

    vboxDispDumpD3DCAPS9(pCaps);

    return S_OK;
}
RTDECL(int) RTVfsIoStrmReadAll(RTVFSIOSTREAM hVfsIos, void **ppvBuf, size_t *pcbBuf)
{
    /*
     * Try query the object information and in case the stream has a known
     * size we could use for guidance.
     */
    RTFSOBJINFO ObjInfo;
    int    rc = RTVfsIoStrmQueryInfo(hVfsIos, &ObjInfo, RTFSOBJATTRADD_NOTHING);
    size_t cbAllocated = RT_SUCCESS(rc) && ObjInfo.cbObject > 0 && ObjInfo.cbObject < _1G
                       ? (size_t)ObjInfo.cbObject + 1 : _16K;
    cbAllocated += READ_ALL_HEADER_SIZE;
    void *pvBuf = RTMemAlloc(cbAllocated);
    if (pvBuf)
    {
        memset(pvBuf, 0xfe, READ_ALL_HEADER_SIZE);
        size_t off = 0;
        for (;;)
        {
            /*
             * Handle buffer growing and detecting the end of it all.
             */
            size_t cbToRead = cbAllocated - off - READ_ALL_HEADER_SIZE - 1;
            if (!cbToRead)
            {
                /* The end? */
                uint8_t bIgn;
                size_t cbIgn;
                rc = RTVfsIoStrmRead(hVfsIos, &bIgn, 0, true /*fBlocking*/, &cbIgn);
                if (rc == VINF_EOF)
                    break;

                /* Grow the buffer. */
                cbAllocated -= READ_ALL_HEADER_SIZE - 1;
                cbAllocated  = RT_MAX(RT_MIN(cbAllocated, _32M), _1K);
                cbAllocated  = RT_ALIGN_Z(cbAllocated, _4K);
                cbAllocated += READ_ALL_HEADER_SIZE + 1;

                void *pvNew = RTMemRealloc(pvBuf, cbAllocated);
                AssertBreakStmt(pvNew, rc = VERR_NO_MEMORY);
                pvBuf = pvNew;

                cbToRead = cbAllocated - off - READ_ALL_HEADER_SIZE - 1;
            }
            Assert(cbToRead < cbAllocated);

            /*
             * Read.
             */
            size_t cbActual;
            rc = RTVfsIoStrmRead(hVfsIos, (uint8_t *)pvBuf + READ_ALL_HEADER_SIZE + off, cbToRead,
                                 true /*fBlocking*/, &cbActual);
            if (RT_FAILURE(rc))
                break;
            Assert(cbActual > 0);
            Assert(cbActual <= cbToRead);
            off += cbActual;
            if (rc == VINF_EOF)
                break;
        }
        Assert(rc != VERR_EOF);
        if (RT_SUCCESS(rc))
        {
            ((size_t *)pvBuf)[0] = READ_ALL_HEADER_MAGIC;
            ((size_t *)pvBuf)[1] = off;
            ((uint8_t *)pvBuf)[READ_ALL_HEADER_SIZE + off] = 0;

            *ppvBuf = (uint8_t *)pvBuf + READ_ALL_HEADER_SIZE;
            *pcbBuf = off;
            return VINF_SUCCESS;
        }

        RTMemFree(pvBuf);
    }
    else
        rc = VERR_NO_MEMORY;
    *ppvBuf = NULL;
    *pcbBuf = 0;
    return rc;
}
Esempio n. 12
0
/**
 * Method that does the actual resize of the guest framebuffer and
 * then changes the SDL framebuffer setup.
 */
static void bench(unsigned long w, unsigned long h, unsigned long bpp)
{
    Uint32 Rmask,  Gmask,  Bmask, Amask = 0;
    Uint32 Rsize,  Gsize,  Bsize;
    Uint32 newWidth, newHeight;

    guGuestXRes = w;
    guGuestYRes = h;
    guGuestBpp  = bpp;

    RTPrintf("\n");

    /* a different format we support directly? */
    switch (guGuestBpp)
    {
        case 16:
        {
            Rmask = 0xF800;
            Gmask = 0x07E0;
            Bmask = 0x001F;
            Amask = 0x0000;
            Rsize  = 5;
            Gsize  = 6;
            Bsize  = 5;
            break;
        }

        case 24:
        {
            Rmask = 0x00FF0000;
            Gmask = 0x0000FF00;
            Bmask = 0x000000FF;
            Amask = 0x00000000;
            Rsize  = 8;
            Gsize  = 8;
            Bsize  = 8;
            break;
        }

        default:
            Rmask = 0x00FF0000;
            Gmask = 0x0000FF00;
            Bmask = 0x000000FF;
            Amask = 0x00000000;
            Rsize  = 8;
            Gsize  = 8;
            Bsize  = 8;
            break;
    }

    int sdlFlags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
#ifdef VBOX_OPENGL
    if (gfOpenGL)
        sdlFlags |= SDL_OPENGL;
#endif
    if (gfResizable)
        sdlFlags |= SDL_RESIZABLE;
    if (gfFullscreen)
        sdlFlags |= SDL_FULLSCREEN;

    /*
     * Now we have to check whether there are video mode restrictions
     */
    SDL_Rect **modes;
    /* Get available fullscreen/hardware modes */
    modes = SDL_ListModes(NULL, sdlFlags);
    if (modes == NULL)
    {
        RTPrintf("Error: SDL_ListModes failed with message '%s'\n", SDL_GetError());
        return;
    }

    /* -1 means that any mode is possible (usually non fullscreen) */
    if (modes != (SDL_Rect **)-1)
    {
        /*
         * according to the SDL documentation, the API guarantees that
         * the modes are sorted from larger to smaller, so we just
         * take the first entry as the maximum.
         */
        guMaxScreenWidth  = modes[0]->w;
        guMaxScreenHeight = modes[0]->h;
    }
    else
    {
        /* no restriction */
        guMaxScreenWidth  = ~0U;
        guMaxScreenHeight = ~0U;
    }

    newWidth  = RT_MIN(guMaxScreenWidth,  guGuestXRes);
    newHeight = RT_MIN(guMaxScreenHeight, guGuestYRes);

    /*
     * Now set the screen resolution and get the surface pointer
     * @todo BPP is not supported!
     */
#ifdef VBOX_OPENGL
    if (gfOpenGL)
    {
        checkSDL("SDL_GL_SetAttribute", SDL_GL_SetAttribute(SDL_GL_RED_SIZE,   Rsize));
        checkSDL("SDL_GL_SetAttribute", SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, Gsize));
        checkSDL("SDL_GL_SetAttribute", SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE,  Bsize));
        checkSDL("SDL_GL_SetAttribute", SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 0));
    }
#else
    NOREF(Rsize); NOREF(Gsize); NOREF(Bsize);
#endif

    RTPrintf("Testing " ESC_BOLD "%ldx%ld@%ld" ESC_NORM "\n", guGuestXRes, guGuestYRes, guGuestBpp);

    gScreen = SDL_SetVideoMode(newWidth, newHeight, 0, sdlFlags);
    if (!gScreen)
    {
        RTPrintf("SDL_SetVideoMode failed (%s)\n", SDL_GetError());
        return;
    }

    /* first free the current surface */
    if (gSurfVRAM)
    {
        SDL_FreeSurface(gSurfVRAM);
        gSurfVRAM = NULL;
    }
    if (gPtrVRAM)
    {
        free(gPtrVRAM);
        gPtrVRAM = NULL;
    }

    if (gScreen->format->BitsPerPixel != guGuestBpp)
    {
        /* Create a source surface from guest VRAM. */
        int bytes_per_pixel = (guGuestBpp + 7) / 8;
        gPtrVRAM  = malloc(guGuestXRes * guGuestYRes * bytes_per_pixel);
        gSurfVRAM = SDL_CreateRGBSurfaceFrom(gPtrVRAM, guGuestXRes, guGuestYRes, guGuestBpp,
                                             bytes_per_pixel * guGuestXRes,
                                             Rmask, Gmask, Bmask, Amask);
    }
    else
    {
        /* Create a software surface for which SDL allocates the RAM */
        gSurfVRAM = SDL_CreateRGBSurface(SDL_SWSURFACE, guGuestXRes, guGuestYRes, guGuestBpp,
                                         Rmask, Gmask, Bmask, Amask);
    }

    if (!gSurfVRAM)
    {
        RTPrintf("Failed to allocate surface %ldx%ld@%ld\n",
                guGuestXRes, guGuestYRes, guGuestBpp);
        return;
    }

    RTPrintf("  gScreen=%dx%d@%d (surface: %s)\n",
            gScreen->w, gScreen->h, gScreen->format->BitsPerPixel,
             (gScreen->flags & SDL_HWSURFACE) == 0 ? "software" : "hardware");

    SDL_Rect rect = { 0, 0, (Uint16)guGuestXRes, (Uint16)guGuestYRes };
    checkSDL("SDL_FillRect",
              SDL_FillRect(gSurfVRAM, &rect,
                           SDL_MapRGB(gSurfVRAM->format, 0x5F, 0x6F, 0x1F)));

#ifdef VBOX_OPENGL
    if (gfOpenGL)
    {
        int r, g, b, d, o;
        SDL_GL_GetAttribute(SDL_GL_RED_SIZE,     &r);
        SDL_GL_GetAttribute(SDL_GL_GREEN_SIZE,   &g);
        SDL_GL_GetAttribute(SDL_GL_BLUE_SIZE,    &b);
        SDL_GL_GetAttribute(SDL_GL_DEPTH_SIZE,   &d);
        SDL_GL_GetAttribute(SDL_GL_DOUBLEBUFFER, &o);
        RTPrintf("  OpenGL ctxt red=%d, green=%d, blue=%d, depth=%d, dbl=%d", r, g, b, d, o);

        glEnable(GL_TEXTURE_2D);
        glDisable(GL_BLEND);
        glDisable(GL_DEPTH_TEST);
        glDepthMask(GL_FALSE);
        glGenTextures(1, &gTexture);
        glBindTexture(GL_TEXTURE_2D, gTexture);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);

        for (guTextureWidth  = 32; guTextureWidth  < newWidth;  guTextureWidth  <<= 1)
            ;
        for (guTextureHeight = 32; guTextureHeight < newHeight; guTextureHeight <<= 1)
            ;
        RTPrintf(", tex %ldx%ld\n", guTextureWidth, guTextureHeight);

        switch (guGuestBpp)
        {
            case 16: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB5, guTextureWidth, guTextureHeight, 0,
                                  GL_RGB,  GL_UNSIGNED_SHORT_5_6_5, 0);
                     break;
            case 24: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,  guTextureWidth, guTextureHeight, 0,
                                  GL_BGR,  GL_UNSIGNED_BYTE, 0);
                     break;
            case 32: glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, guTextureWidth, guTextureHeight, 0,
                                  GL_BGRA, GL_UNSIGNED_BYTE, 0);
                     break;
            default: RTPrintf("guGuestBpp=%d?\n", guGuestBpp);
                     return;
        }

        glViewport(0, 0, newWidth, newHeight);
        glMatrixMode(GL_PROJECTION);
        glLoadIdentity();
        glOrtho(0.0, newWidth, newHeight, 0.0, -1.0, 1.0);
    }
#endif

    checkEvents();
    benchExecute();

#ifdef VBOX_OPENGL
    if (gfOpenGL)
    {
        glDeleteTextures(1, &gTexture);
    }
#endif
}
void VBoxGuestRAMSlider::init()
{
    ulong fullSize = vboxGlobal().host().GetMemorySize();
    CSystemProperties sys = vboxGlobal().virtualBox().GetSystemProperties();
    mMinRAM = sys.GetMinGuestRAM();
    mMaxRAM = RT_MIN (RT_ALIGN (fullSize, _1G / _1M), sys.GetMaxGuestRAM());

    /* Come up with some nice round percent boundaries relative to
     * the system memory. A max of 75% on a 256GB config is ridiculous,
     * even on an 8GB rig reserving 2GB for the OS is way to conservative.
     * The max numbers can be estimated using the following program:
     *
     *      double calcMaxPct(uint64_t cbRam)
     *      {
     *          double cbRamOverhead = cbRam * 0.0390625; // 160 bytes per page.
     *          double cbRamForTheOS = RT_MAX(RT_MIN(_512M, cbRam * 0.25), _64M);
     *          double OSPct  = (cbRamOverhead + cbRamForTheOS) * 100.0 / cbRam;
     *          double MaxPct = 100 - OSPct;
     *          return MaxPct;
     *      }
     *
     *      int main()
     *      {
     *          uint64_t cbRam = _1G;
     *          for (; !(cbRam >> 33); cbRam += _1G)
     *              printf("%8lluGB %.1f%% %8lluKB\n", cbRam >> 30, calcMaxPct(cbRam),
     *                     (uint64_t)(cbRam * calcMaxPct(cbRam) / 100.0) >> 20);
     *          for (; !(cbRam >> 51); cbRam <<= 1)
     *              printf("%8lluGB %.1f%% %8lluKB\n", cbRam >> 30, calcMaxPct(cbRam),
     *                     (uint64_t)(cbRam * calcMaxPct(cbRam) / 100.0) >> 20);
     *          return 0;
     *      }
     *
     * Note. We might wanna put these calculations somewhere global later. */

    /* System RAM amount test */
    mMaxRAMAlw  = (uint)(0.75 * fullSize);
    mMaxRAMOpt  = (uint)(0.50 * fullSize);
    if (fullSize < 3072)
        /* done */;
    else if (fullSize < 4096)   /* 3GB */
        mMaxRAMAlw = (uint)(0.80 * fullSize);
    else if (fullSize < 6144)   /* 4-5GB */
    {
        mMaxRAMAlw = (uint)(0.84 * fullSize);
        mMaxRAMOpt = (uint)(0.60 * fullSize);
    }
    else if (fullSize < 8192)   /* 6-7GB */
    {
        mMaxRAMAlw = (uint)(0.88 * fullSize);
        mMaxRAMOpt = (uint)(0.65 * fullSize);
    }
    else if (fullSize < 16384)  /* 8-15GB */
    {
        mMaxRAMAlw = (uint)(0.90 * fullSize);
        mMaxRAMOpt = (uint)(0.70 * fullSize);
    }
    else if (fullSize < 32768)  /* 16-31GB */
    {
        mMaxRAMAlw = (uint)(0.93 * fullSize);
        mMaxRAMOpt = (uint)(0.75 * fullSize);
    }
    else if (fullSize < 65536)  /* 32-63GB */
    {
        mMaxRAMAlw = (uint)(0.94 * fullSize);
        mMaxRAMOpt = (uint)(0.80 * fullSize);
    }
    else if (fullSize < 131072) /* 64-127GB */
    {
        mMaxRAMAlw = (uint)(0.95 * fullSize);
        mMaxRAMOpt = (uint)(0.85 * fullSize);
    }
    else                        /* 128GB- */
    {
        mMaxRAMAlw = (uint)(0.96 * fullSize);
        mMaxRAMOpt = (uint)(0.90 * fullSize);
    }
    /* Now check the calculated maximums are out of the range for the guest
     * RAM. If so change it accordingly. */
    mMaxRAMAlw  = RT_MIN (mMaxRAMAlw, mMaxRAM);
    mMaxRAMOpt  = RT_MIN (mMaxRAMOpt, mMaxRAM);

    setPageStep (calcPageStep (mMaxRAM));
    setSingleStep (pageStep() / 4);
    setTickInterval (pageStep());
    /* Setup the scale so that ticks are at page step boundaries */
    setMinimum ((mMinRAM / pageStep()) * pageStep());
    setMaximum (mMaxRAM);
    setSnappingEnabled (true);
    setOptimalHint (mMinRAM, mMaxRAMOpt);
    setWarningHint (mMaxRAMOpt, mMaxRAMAlw);
    setErrorHint (mMaxRAMAlw, mMaxRAM);
}
Esempio n. 14
0
/**
 * Initializes a SG list from an mbuf.
 *
 * @returns Number of segments.
 * @param   pThis               The instance.
 * @param   pMBuf               The mbuf.
 * @param   pSG                 The SG.
 * @param   pvFrame             The frame pointer, optional.
 * @param   cSegs               The number of segments allocated for the SG.
 *                              This should match the number in the mbuf exactly!
 * @param   fSrc                The source of the frame.
 */
DECLINLINE(void) vboxNetFltDarwinMBufToSG(PVBOXNETFLTINS pThis, mbuf_t pMBuf, void *pvFrame, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
{
    NOREF(pThis);

    /*
     * Walk the chain and convert the buffers to segments.  Works INTNETSG::cbTotal.
     */
    unsigned iSeg = 0;
    IntNetSgInitTempSegs(pSG, 0 /*cbTotal*/, cSegs, 0 /*cSegsUsed*/);
    for (mbuf_t pCur = pMBuf; pCur; pCur = mbuf_next(pCur))
    {
        size_t cbSeg = mbuf_len(pCur);
        if (cbSeg)
        {
            void *pvSeg = mbuf_data(pCur);

            /* deal with pvFrame */
            if (!iSeg && pvFrame && pvFrame != pvSeg)
            {
                void     *pvStart   = mbuf_datastart(pMBuf);
                uintptr_t offSeg    = (uintptr_t)pvSeg - (uintptr_t)pvStart;
                uintptr_t offSegEnd = offSeg + cbSeg;
                Assert(pvStart && pvSeg && offSeg < mbuf_maxlen(pMBuf) && offSegEnd <= mbuf_maxlen(pMBuf)); NOREF(offSegEnd);
                uintptr_t offFrame  = (uintptr_t)pvFrame - (uintptr_t)pvStart;
                if (RT_LIKELY(offFrame < offSeg))
                {
                    pvSeg = pvFrame;
                    cbSeg += offSeg - offFrame;
                }
                else
                    AssertMsgFailed(("pvFrame=%p pvStart=%p pvSeg=%p offSeg=%p cbSeg=%#zx offSegEnd=%p offFrame=%p maxlen=%#zx\n",
                                     pvFrame, pvStart, pvSeg, offSeg, cbSeg, offSegEnd, offFrame, mbuf_maxlen(pMBuf)));
                pvFrame = NULL;
            }

            AssertBreak(iSeg < cSegs);
            pSG->cbTotal += cbSeg;
            pSG->aSegs[iSeg].cb = cbSeg;
            pSG->aSegs[iSeg].pv = pvSeg;
            pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS;
            iSeg++;
        }
        /* The pvFrame might be in a now empty buffer. */
        else if (   !iSeg
                 && pvFrame
                 && (uintptr_t)pvFrame - (uintptr_t)mbuf_datastart(pMBuf) < mbuf_maxlen(pMBuf))
        {
            cbSeg = (uintptr_t)mbuf_datastart(pMBuf) + mbuf_maxlen(pMBuf) - (uintptr_t)pvFrame;
            pSG->cbTotal += cbSeg;
            pSG->aSegs[iSeg].cb = cbSeg;
            pSG->aSegs[iSeg].pv = pvFrame;
            pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS;
            iSeg++;
            pvFrame = NULL;
        }
    }

    Assert(iSeg && iSeg <= cSegs);
    pSG->cSegsUsed = iSeg;

#ifdef PADD_RUNT_FRAMES_FROM_HOST
    /*
     * Add a trailer if the frame is too small.
     *
     * Since we're getting to the packet before it is framed, it has not
     * yet been padded. The current solution is to add a segment pointing
     * to a buffer containing all zeros and pray that works for all frames...
     */
    if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
    {
        AssertReturnVoid(iSeg < cSegs);

        static uint8_t const s_abZero[128] = {0};
        pSG->aSegs[iSeg].Phys = NIL_RTHCPHYS;
        pSG->aSegs[iSeg].pv = (void *)&s_abZero[0];
        pSG->aSegs[iSeg].cb = 60 - pSG->cbTotal;
        pSG->cbTotal = 60;
        pSG->cSegsUsed++;
    }
#endif

#ifdef VBOXNETFLT_DARWIN_TEST_SEG_SIZE
    /*
     * Redistribute the segments.
     */
    if (pSG->cSegsUsed < pSG->cSegsAlloc)
    {
        /* copy the segments to the end. */
        int iSrc = pSG->cSegsUsed;
        int iDst = pSG->cSegsAlloc;
        while (iSrc > 0)
        {
            iDst--;
            iSrc--;
            pSG->aSegs[iDst] = pSG->aSegs[iSrc];
        }

        /* create small segments from the start. */
        pSG->cSegsUsed = pSG->cSegsAlloc;
        iSrc = iDst;
        iDst = 0;
        while (     iDst < iSrc
               &&   iDst < pSG->cSegsAlloc)
        {
            pSG->aSegs[iDst].Phys = NIL_RTHCPHYS;
            pSG->aSegs[iDst].pv = pSG->aSegs[iSrc].pv;
            pSG->aSegs[iDst].cb = RT_MIN(pSG->aSegs[iSrc].cb, VBOXNETFLT_DARWIN_TEST_SEG_SIZE);
            if (pSG->aSegs[iDst].cb != pSG->aSegs[iSrc].cb)
            {
                pSG->aSegs[iSrc].cb -= pSG->aSegs[iDst].cb;
                pSG->aSegs[iSrc].pv = (uint8_t *)pSG->aSegs[iSrc].pv + pSG->aSegs[iDst].cb;
            }
            else if (++iSrc >= pSG->cSegsAlloc)
            {
                pSG->cSegsUsed = iDst + 1;
                break;
            }
            iDst++;
        }
    }
#endif

    AssertMsg(!pvFrame, ("pvFrame=%p pMBuf=%p iSeg=%d\n", pvFrame, pMBuf, iSeg));
}
Esempio n. 15
0
static int tstPDMACStressTestFileWrite(PPDMACTESTFILE pTestFile, PPDMACTESTFILETASK pTestTask)
{
    int rc = VINF_SUCCESS;

    Assert(!pTestTask->fActive);

    pTestTask->fActive       = true;
    pTestTask->fWrite        = true;
    pTestTask->DataSeg.cbSeg = RTRandU32Ex(512, TASK_TRANSFER_SIZE_MAX) & ~511;

    uint64_t offMax;

    /* Did we reached the maximum file size */
    if (pTestFile->cbFileCurr < pTestFile->cbFileMax)
    {
        offMax =   (pTestFile->cbFileMax - pTestFile->cbFileCurr) < pTestTask->DataSeg.cbSeg
                 ? pTestFile->cbFileMax - pTestTask->DataSeg.cbSeg
                 : pTestFile->cbFileCurr;
    }
    else
        offMax = pTestFile->cbFileMax - pTestTask->DataSeg.cbSeg;

    uint64_t offMin;

    /*
     * If we reached the maximum file size write in the whole file
     * otherwise we will enforce the range for random offsets to let it grow
     * more quickly.
     */
    if (pTestFile->cbFileCurr == pTestFile->cbFileMax)
        offMin = 0;
    else
        offMin = RT_MIN(pTestFile->cbFileCurr, offMax);


    pTestTask->off = RTRandU64Ex(offMin, offMax) & ~511;

    /* Set new file size of required */
    if ((uint64_t)pTestTask->off + pTestTask->DataSeg.cbSeg > pTestFile->cbFileCurr)
        pTestFile->cbFileCurr = pTestTask->off + pTestTask->DataSeg.cbSeg;

    AssertMsg(pTestFile->cbFileCurr <= pTestFile->cbFileMax,
              ("Current file size (%llu) exceeds final size (%llu)\n",
              pTestFile->cbFileCurr, pTestFile->cbFileMax));

    /* Allocate data buffer. */
    pTestTask->DataSeg.pvSeg = RTMemAlloc(pTestTask->DataSeg.cbSeg);
    if (!pTestTask->DataSeg.pvSeg)
        return VERR_NO_MEMORY;

    /* Fill data into buffer. */
    tstPDMACStressTestFileFillBuffer(pTestFile, pTestTask);

    /* Engage */
    rc = PDMR3AsyncCompletionEpWrite(pTestFile->hEndpoint, pTestTask->off,
                                     &pTestTask->DataSeg, 1,
                                     pTestTask->DataSeg.cbSeg,
                                     pTestTask,
                                     &pTestTask->hTask);

    return rc;
}
/**
 * The old halt loop.
 */
static DECLCALLBACK(int) vmR3HaltOldDoHalt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t /* u64Now*/)
{
    /*
     * Halt loop.
     */
    PVM    pVM   = pUVCpu->pVM;
    PVMCPU pVCpu = pUVCpu->pVCpu;

    int rc = VINF_SUCCESS;
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    //unsigned cLoops = 0;
    for (;;)
    {
        /*
         * Work the timers and check if we can exit.
         * The poll call gives us the ticks left to the next event in
         * addition to perhaps set an FF.
         */
        uint64_t const u64StartTimers   = RTTimeNanoTS();
        TMR3TimerQueuesDo(pVM);
        uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
        STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
        if (    VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, fMask))
            break;
        uint64_t u64NanoTS;
        TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
        if (    VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, fMask))
            break;

        /*
         * Wait for a while. Someone will wake us up or interrupt the call if
         * anything needs our attention.
         */
        if (u64NanoTS < 50000)
        {
            //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d spin\n", u64NanoTS, cLoops++);
            /* spin */;
        }
        else
        {
            VMMR3YieldStop(pVM);
            //uint64_t u64Start = RTTimeNanoTS();
            if (u64NanoTS <  870000) /* this is a bit speculative... works fine on linux. */
            {
                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d yield", u64NanoTS, cLoops++);
                uint64_t const u64StartSchedYield   = RTTimeNanoTS();
                RTThreadYield(); /* this is the best we can do here */
                uint64_t const cNsElapsedSchedYield = RTTimeNanoTS() - u64StartSchedYield;
                STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltYield, cNsElapsedSchedYield);
            }
            else if (u64NanoTS < 2000000)
            {
                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep 1ms", u64NanoTS, cLoops++);
                uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
                rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, 1);
                uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
                STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
            }
            else
            {
                //RTLogPrintf("u64NanoTS=%RI64 cLoops=%d sleep %dms", u64NanoTS, cLoops++, (uint32_t)RT_MIN((u64NanoTS - 500000) / 1000000, 15));
                uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
                rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, RT_MIN((u64NanoTS - 1000000) / 1000000, 15));
                uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
                STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);
            }
            //uint64_t u64Slept = RTTimeNanoTS() - u64Start;
            //RTLogPrintf(" -> rc=%Rrc in %RU64 ns / %RI64 ns delta\n", rc, u64Slept, u64NanoTS - u64Slept);
        }
        if (rc == VERR_TIMEOUT)
            rc = VINF_SUCCESS;
        else if (RT_FAILURE(rc))
        {
            rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
            break;
        }
    }

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
Esempio n. 17
0
/**
 * Sets up a test file creating the I/O thread.
 *
 * @returns VBox status code.
 * @param   pVM          Pointer to the shared VM instance structure.
 * @param   pTestFile    Pointer to the uninitialized test file structure.
 * @param   iTestId      Unique test id.
 */
static int tstPDMACStressTestFileOpen(PVM pVM, PPDMACTESTFILE pTestFile, unsigned iTestId)
{
    int rc = VERR_NO_MEMORY;

    /* Size is a multiple of 512 */
    pTestFile->cbFileMax     = RTRandU64Ex(FILE_SIZE_MIN, FILE_SIZE_MAX) & ~(511UL);
    pTestFile->cbFileCurr    = 0;
    pTestFile->cbFileSegment = RTRandU32Ex(SEGMENT_SIZE_MIN, RT_MIN(pTestFile->cbFileMax, SEGMENT_SIZE_MAX)) & ~((size_t)511);

    Assert(pTestFile->cbFileMax >= pTestFile->cbFileSegment);

    /* Set up the segments array. */
    pTestFile->cSegments  = pTestFile->cbFileMax / pTestFile->cbFileSegment;
    pTestFile->cSegments += ((pTestFile->cbFileMax % pTestFile->cbFileSegment) > 0) ? 1 : 0;

    pTestFile->paSegs = (PPDMACTESTFILESEG)RTMemAllocZ(pTestFile->cSegments * sizeof(PDMACTESTFILESEG));
    if (pTestFile->paSegs)
    {
        /* Init the segments */
        for (unsigned i = 0; i < pTestFile->cSegments; i++)
        {
            PPDMACTESTFILESEG pSeg = &pTestFile->paSegs[i];

            pSeg->off       = (RTFOFF)i * pTestFile->cbFileSegment;
            pSeg->cbSegment = pTestFile->cbFileSegment;

            /* Let the buffer point to a random position in the test pattern. */
            uint32_t offTestPattern = RTRandU64Ex(0, g_cbTestPattern - pSeg->cbSegment);

            pSeg->pbData = g_pbTestPattern + offTestPattern;
        }

        /* Init task array. */
        pTestFile->cTasksActiveMax = RTRandU32Ex(1, TASK_ACTIVE_MAX);
        pTestFile->paTasks         = (PPDMACTESTFILETASK)RTMemAllocZ(pTestFile->cTasksActiveMax * sizeof(PDMACTESTFILETASK));
        if (pTestFile->paTasks)
        {
            /* Create the template */
            char szDesc[256];

            RTStrPrintf(szDesc, sizeof(szDesc), "Template-%d", iTestId);
            rc = PDMR3AsyncCompletionTemplateCreateInternal(pVM, &pTestFile->pTemplate, tstPDMACStressTestFileTaskCompleted, pTestFile, szDesc);
            if (RT_SUCCESS(rc))
            {
                /* Open the endpoint now. Because async completion endpoints cannot create files we have to do it before. */
                char szFile[RTPATH_MAX];

                RTStrPrintf(szFile, sizeof(szFile), "tstPDMAsyncCompletionStress-%d.tmp", iTestId);

                RTFILE FileTmp;
                rc = RTFileOpen(&FileTmp, szFile, RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_NONE);
                if (RT_SUCCESS(rc))
                {
                    RTFileClose(FileTmp);

                    rc = PDMR3AsyncCompletionEpCreateForFile(&pTestFile->hEndpoint, szFile, 0, pTestFile->pTemplate);
                    if (RT_SUCCESS(rc))
                    {
                        char szThreadDesc[256];

                        pTestFile->fRunning = true;

                        /* Create the thread creating the I/O for the given file. */
                        RTStrPrintf(szThreadDesc, sizeof(szThreadDesc), "PDMACThread-%d", iTestId);
                        rc = PDMR3ThreadCreate(pVM, &pTestFile->hThread, pTestFile, tstPDMACTestFileThread,
                                               NULL, 0, RTTHREADTYPE_IO, szThreadDesc);
                        if (RT_SUCCESS(rc))
                        {
                            rc = PDMR3ThreadResume(pTestFile->hThread);
                            AssertRC(rc);

                            RTPrintf(TESTCASE ": Created test file %s cbFileMax=%llu cbFileSegment=%u cSegments=%u cTasksActiveMax=%u\n",
                                     szFile, pTestFile->cbFileMax, pTestFile->cbFileSegment, pTestFile->cSegments, pTestFile->cTasksActiveMax);
                            return VINF_SUCCESS;
                        }

                        PDMR3AsyncCompletionEpClose(pTestFile->hEndpoint);
                    }

                    RTFileDelete(szFile);
                }

                PDMR3AsyncCompletionTemplateDestroy(pTestFile->pTemplate);
            }

            RTMemFree(pTestFile->paTasks);
        }
        else
            rc = VERR_NO_MEMORY;

        RTMemFree(pTestFile->paSegs);
    }
    else
        rc = VERR_NO_MEMORY;

    RTPrintf(TESTCASE ": Opening test file with id %d failed rc=%Rrc\n", iTestId, rc);

    return rc;
}
/**
 * Method 1 - Block whenever possible, and when lagging behind
 * switch to spinning for 10-30ms with occasional blocking until
 * the lag has been eliminated.
 */
static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
{
    PUVM    pUVM    = pUVCpu->pUVM;
    PVMCPU  pVCpu   = pUVCpu->pVCpu;
    PVM     pVM     = pUVCpu->pVM;

    /*
     * To simplify things, we decide up-front whether we should switch to spinning or
     * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
     * and that it will generate interrupts or other events that will cause us to exit
     * the halt loop.
     */
    bool fBlockOnce = false;
    bool fSpinning = false;
    uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
    if (u32CatchUpPct /* non-zero if catching up */)
    {
        if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
        {
            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
            if (fSpinning)
            {
                uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
                fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
                           > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
                                    RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
                                           pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
            }
            else
            {
                //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
            }
        }
        else
        {
            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
            if (fSpinning)
                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
        }
    }
    else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
    {
        //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
        pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
    }

    /*
     * Halt loop.
     */
    int rc = VINF_SUCCESS;
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    unsigned cLoops = 0;
    for (;; cLoops++)
    {
        /*
         * Work the timers and check if we can exit.
         */
        uint64_t const u64StartTimers   = RTTimeNanoTS();
        TMR3TimerQueuesDo(pVM);
        uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
        STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
        if (    VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, fMask))
            break;

        /*
         * Estimate time left to the next event.
         */
        uint64_t u64NanoTS;
        TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
        if (    VM_FF_IS_PENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_IS_PENDING(pVCpu, fMask))
            break;

        /*
         * Block if we're not spinning and the interval isn't all that small.
         */
        if (    (   !fSpinning
                 || fBlockOnce)
#if 1 /* DEBUGGING STUFF - REMOVE LATER */
            &&  u64NanoTS >= 100000) /* 0.100 ms */
#else
            &&  u64NanoTS >= 250000) /* 0.250 ms */
#endif
        {
            const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
            VMMR3YieldStop(pVM);

            uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
            if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
                cMilliSecs = 1;
            else
                cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;

            //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
            uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
            rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
            uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
            STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);

            if (rc == VERR_TIMEOUT)
                rc = VINF_SUCCESS;
            else if (RT_FAILURE(rc))
            {
                rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
                break;
            }

            /*
             * Calc the statistics.
             * Update averages every 16th time, and flush parts of the history every 64th time.
             */
            const uint64_t Elapsed = RTTimeNanoTS() - Start;
            pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
            if (Elapsed > u64NanoTS)
                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
            pUVCpu->vm.s.Halt.Method12.cBlocks++;
            if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
            {
                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
                if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
                {
                    pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
                    pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
                }
            }
            //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");

            /*
             * Clear the block once flag if we actually blocked.
             */
            if (    fBlockOnce
                &&  Elapsed > 100000 /* 0.1 ms */)
                fBlockOnce = false;
        }
    }
    //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
Esempio n. 19
0
/**
 * Internal helper.
 */
static void pcapUpdateHeader(struct pcaprec_hdr *pHdr, size_t cbFrame, size_t cbMax)
{
    pHdr->incl_len = (uint32_t)RT_MIN(cbFrame, cbMax);
    pHdr->orig_len = (uint32_t)cbFrame;
}
Esempio n. 20
0
/**
 * Grows the cache.
 *
 * @returns IPRT status code.
 * @param   pThis               The memory cache instance.
 */
static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
{
    /*
     * Enter the critical section here to avoid allocation races leading to
     * wasted memory (++) and make it easier to link in the new page.
     */
    RTCritSectEnter(&pThis->CritSect);
    int rc = VINF_SUCCESS;
    if (pThis->cFree < 0)
    {
        /*
         * Allocate and initialize the new page.
         *
         * We put the constructor bitmap at the lower end right after cFree.
         * We then push the object array to the end of the page and place the
         * allocation bitmap below it.  The hope is to increase the chance that
         * the allocation bitmap is in a different cache line than cFree since
         * this increases performance markably when lots of threads are beating
         * on the cache.
         */
        PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
        if (pPage)
        {
            uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);

            ASMMemZeroPage(pPage);
            pPage->pCache       = pThis;
            pPage->pNext        = NULL;
            pPage->cFree        = cObjects;
            pPage->cObjects     = cObjects;
            uint8_t *pb = (uint8_t *)(pPage + 1);
            pb = RT_ALIGN_PT(pb, 8, uint8_t *);
            pPage->pbmCtor      = pb;
            pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects;
            pPage->pbObjects    = pb;   Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb);
            pb -= pThis->cBits / 8;
            pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7);
            pPage->pbmAlloc     = pb;
            Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc);

            /* Mark the bitmap padding and any unused objects as allocated. */
            for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
                ASMBitSet(pPage->pbmAlloc, iBit);

            /* Make it the hint. */
            ASMAtomicWritePtr(&pThis->pPageHint, pPage);

            /* Link the page. */
            PRTMEMCACHEPAGE pPrevPage = pThis->pPageHead;
            if (!pPrevPage)
                ASMAtomicWritePtr(&pThis->pPageHead, pPage);
            else
            {
                while (pPrevPage->pNext)
                    pPrevPage = pPrevPage->pNext;
                ASMAtomicWritePtr(&pPrevPage->pNext, pPage);
            }

            /* Add it to the page counts. */
            ASMAtomicAddS32(&pThis->cFree, cObjects);
            ASMAtomicAddU32(&pThis->cTotal, cObjects);
        }
        else
Esempio n. 21
0
/**
 * Verifies a read request.
 *
 * @returns VBox status code.
 * @param   pThis    Disk integrity driver instance data.
 * @param   paSeg    Segment array of the containing the data buffers to verify.
 * @param   cSeg     Number of segments.
 * @param   off      Start offset.
 * @param   cbWrite  Number of bytes to verify.
 */
static int drvdiskintReadVerify(PDRVDISKINTEGRITY pThis, PCRTSGSEG paSeg, unsigned cSeg,
                                uint64_t off, size_t cbRead)
{
    int rc = VINF_SUCCESS;

    LogFlowFunc(("pThis=%#p paSeg=%#p cSeg=%u off=%llx cbRead=%u\n",
                 pThis, paSeg, cSeg, off, cbRead));

    Assert(off % 512 == 0);
    Assert(cbRead % 512 == 0);

    /* Compare read data */
    size_t cbLeft   = cbRead;
    RTFOFF offCurr  = (RTFOFF)off;
    RTSGBUF SgBuf;

    RTSgBufInit(&SgBuf, paSeg, cSeg);

    while (cbLeft)
    {
        PDRVDISKSEGMENT pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetRangeGet(pThis->pTreeSegments, offCurr);
        size_t cbRange  = 0;
        bool fCmp       = false;
        unsigned offSeg = 0;

        if (!pSeg)
        {
            /* Get next segment */
            pSeg = (PDRVDISKSEGMENT)RTAvlrFileOffsetGetBestFit(pThis->pTreeSegments, offCurr, true);
            if (!pSeg)
            {
                /* No data in the tree for this read. Assume everything is ok. */
                cbRange = cbLeft;
            }
            else if (offCurr + (RTFOFF)cbLeft <= pSeg->Core.Key)
                cbRange = cbLeft;
            else
                cbRange = pSeg->Core.Key - offCurr;
        }
        else
        {
            fCmp    = true;
            offSeg  = offCurr - pSeg->Core.Key;
            cbRange = RT_MIN(cbLeft, (size_t)(pSeg->Core.KeyLast + 1 - offCurr));
        }

        if (fCmp)
        {
            RTSGSEG Seg;
            RTSGBUF SgBufCmp;
            size_t cbOff = 0;

            Seg.cbSeg = cbRange;
            Seg.pvSeg = pSeg->pbSeg + offSeg;

            RTSgBufInit(&SgBufCmp, &Seg, 1);
            if (RTSgBufCmpEx(&SgBuf, &SgBufCmp, cbRange, &cbOff, true))
            {
                /* Corrupted disk, print I/O log entry of the last write which accessed this range. */
                uint32_t cSector = (offSeg + cbOff) / 512;
                AssertMsg(cSector < pSeg->cIoLogEntries, ("Internal bug!\n"));

                RTMsgError("Corrupted disk at offset %llu (%u bytes in the current read buffer)!\n",
                           offCurr + cbOff, cbOff);
                RTMsgError("Last write to this sector started at offset %llu with %u bytes (%u references to this log entry)\n",
                           pSeg->apIoLog[cSector]->off,
                           pSeg->apIoLog[cSector]->cbWrite,
                           pSeg->apIoLog[cSector]->cRefs);
                RTAssertDebugBreak();
            }
        }
        else
            RTSgBufAdvance(&SgBuf, cbRange);

        offCurr += cbRange;
        cbLeft  -= cbRange;
    }

    return rc;
}
Esempio n. 22
0
/**
 * Do the bi-directional transfer test.
 */
static void tstBidirectionalTransfer(PTSTSTATE pThis, uint32_t cbFrame)
{
    MYARGS Args0;
    RT_ZERO(Args0);
    Args0.hIf         = pThis->hIf0;
    Args0.pBuf        = pThis->pBuf0;
    Args0.Mac.au16[0] = 0x8086;
    Args0.Mac.au16[1] = 0;
    Args0.Mac.au16[2] = 0;
    Args0.cbFrame     = cbFrame;

    MYARGS Args1;
    RT_ZERO(Args1);
    Args1.hIf         = pThis->hIf1;
    Args1.pBuf        = pThis->pBuf1;
    Args1.Mac.au16[0] = 0x8086;
    Args1.Mac.au16[1] = 0;
    Args1.Mac.au16[2] = 1;
    Args1.cbFrame     = cbFrame;

    RTTHREAD ThreadRecv0 = NIL_RTTHREAD;
    RTTHREAD ThreadRecv1 = NIL_RTTHREAD;
    RTTHREAD ThreadSend0 = NIL_RTTHREAD;
    RTTHREAD ThreadSend1 = NIL_RTTHREAD;
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadRecv0, ReceiveThread, &Args0, 0, RTTHREADTYPE_IO,        RTTHREADFLAGS_WAITABLE, "RECV0"));
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadRecv1, ReceiveThread, &Args1, 0, RTTHREADTYPE_IO,        RTTHREADFLAGS_WAITABLE, "RECV1"));
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadSend0, SendThread,    &Args0, 0, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "SEND0"));
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadSend1, SendThread,    &Args1, 0, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "SEND1"));

    int rc2 = VINF_SUCCESS;
    int rc;
    RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadSend0, 5*60*1000, &rc2));
    if (RT_SUCCESS(rc))
    {
        RTTESTI_CHECK_RC_OK(rc2);
        ThreadSend0 = NIL_RTTHREAD;
        RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadSend1, 5*60*1000, RT_SUCCESS(rc2) ? &rc2 : NULL));
        if (RT_SUCCESS(rc))
        {
            ThreadSend1 = NIL_RTTHREAD;
            RTTESTI_CHECK_RC_OK(rc2);
        }
    }
    if (RTTestErrorCount(g_hTest) == 0)
    {
        /*
         * Wait a bit for the receivers to finish up.
         */
        unsigned cYields = 100000;
        while (     (  IntNetRingHasMoreToRead(&pThis->pBuf0->Recv)
                    || IntNetRingHasMoreToRead(&pThis->pBuf1->Recv))
               &&   cYields-- > 0)
            RTThreadYield();

        uint64_t u64Elapsed = RT_MAX(Args0.u64End, Args1.u64End) - RT_MIN(Args0.u64Start, Args1.u64Start);
        uint64_t u64Speed = (uint64_t)((2 * g_cbTransfer / 1024) / (u64Elapsed / 1000000000.0));
        RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                     "transferred %u bytes in %'RU64 ns (%'RU64 KB/s)\n",
                     2 * g_cbTransfer, u64Elapsed, u64Speed);

        /*
         * Wait for the threads to finish up...
         */
        RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadRecv0, 5000, &rc2));
        if (RT_SUCCESS(rc))
        {
            RTTESTI_CHECK_RC_OK(rc2);
            ThreadRecv0 = NIL_RTTHREAD;
        }

        RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadRecv1, 5000, &rc2));
        if (RT_SUCCESS(rc))
        {
            RTTESTI_CHECK_RC_OK(rc2);
            ThreadRecv1 = NIL_RTTHREAD;
        }
    }

    /*
     * Give them a chance to complete...
     */
    RTThreadWait(ThreadRecv0, 5000, NULL);
    RTThreadWait(ThreadRecv1, 5000, NULL);
    RTThreadWait(ThreadSend0, 5000, NULL);
    RTThreadWait(ThreadSend1, 5000, NULL);


    /*
     * Display statistics.
     */
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf0: Yields-OK=%llu Yields-NOK=%llu Lost=%llu Bad=%llu\n",
                 pThis->pBuf0->cStatYieldsOk.c,
                 pThis->pBuf0->cStatYieldsNok.c,
                 pThis->pBuf0->cStatLost.c,
                 pThis->pBuf0->cStatBadFrames.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf0.Recv: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf0->Recv.cStatFrames,
                 pThis->pBuf0->Recv.cbStatWritten.c,
                 pThis->pBuf0->Recv.cOverflows.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf0.Send: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf0->Send.cStatFrames,
                 pThis->pBuf0->Send.cbStatWritten.c,
                 pThis->pBuf0->Send.cOverflows.c);

    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf1: Yields-OK=%llu Yields-NOK=%llu Lost=%llu Bad=%llu\n",
                 pThis->pBuf1->cStatYieldsOk.c,
                 pThis->pBuf1->cStatYieldsNok.c,
                 pThis->pBuf1->cStatLost.c,
                 pThis->pBuf1->cStatBadFrames.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf1.Recv: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf1->Recv.cStatFrames,
                 pThis->pBuf1->Recv.cbStatWritten.c,
                 pThis->pBuf1->Recv.cOverflows.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf1.Send: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf1->Send.cStatFrames,
                 pThis->pBuf1->Send.cbStatWritten.c,
                 pThis->pBuf1->Send.cOverflows.c);

}
/* XXX: Having fIPv6 we might emprove adress verification comparing address length
 * with INET[6]_ADDRLEN
 */
int netPfStrToPf(const char *pcszStrPortForward, int fIPv6, PPORTFORWARDRULE pPfr)
{
    char *pszName;
    int  proto;
    char *pszHostAddr;
    char *pszGuestAddr;
    uint16_t u16HostPort;
    uint16_t u16GuestPort;
    bool fTcpProto = false;

    char *pszRawBegin = NULL;
    char *pszRaw = NULL;
    int idxRaw = 0;
    int cbToken = 0;
    int cbRaw = 0;
    int rc = VINF_SUCCESS;

    AssertPtrReturn(pcszStrPortForward, VERR_INVALID_PARAMETER);
    AssertPtrReturn(pPfr, VERR_INVALID_PARAMETER);

    memset(pPfr, 0, sizeof(PORTFORWARDRULE));

    pszHostAddr = &pPfr->szPfrHostAddr[0];
    pszGuestAddr = &pPfr->szPfrGuestAddr[0];
    pszName = &pPfr->szPfrName[0];

    cbRaw = strlen(pcszStrPortForward);

    /* Minimal rule ":tcp:[]:0:[]:0" has got lenght 14 */
    AssertReturn(cbRaw > 14, VERR_INVALID_PARAMETER);

    pszRaw = RTStrDup(pcszStrPortForward);

    AssertPtrReturn(pszRaw, VERR_NO_MEMORY);

    pszRawBegin = pszRaw;

    /* name */
    if (pszRaw[idxRaw] == PF_FIELD_SEPARATOR)
        idxRaw = 1; /* begin of the next segment */
    else
    {
        char *pszEndOfName = RTStrStr(pszRaw + 1, PF_STR_FIELD_SEPARATOR);
        if (!pszEndOfName)
            goto invalid_parameter;

        cbToken = (pszEndOfName) - pszRaw; /* don't take : into account */
        /* XXX it's unacceptable to have only name entry in PF */
        AssertReturn(cbToken < cbRaw, VERR_INVALID_PARAMETER);

        if (   cbToken < 0
            || (size_t)cbToken >= PF_NAMELEN)
            goto invalid_parameter;

        RTStrCopy(pszName,
                  RT_MIN((size_t)cbToken + 1, PF_NAMELEN),
                  pszRaw);
        pszRaw += cbToken; /* move to separator */
    }

    AssertReturn(pszRaw[0] == PF_FIELD_SEPARATOR, VERR_INVALID_PARAMETER);
    /* protocol */

    pszRaw++; /* skip separator */
    idxRaw = 0;

    cbRaw--;

    if (  (  (fTcpProto = (RTStrNICmp(pszRaw, "tcp", 3) == 0))
           ||              RTStrNICmp(pszRaw, "udp", 3) == 0)
        && pszRaw[3] == PF_FIELD_SEPARATOR)
    {
        proto = (fTcpProto ? IPPROTO_TCP : IPPROTO_UDP);
        idxRaw = 3;
    }
    else
        goto invalid_parameter;

    pszRaw += idxRaw;
    cbRaw -= idxRaw;
    idxRaw = 0;

    idxRaw = netPfStrAddressPortPairParse(pszRaw, cbRaw,
                                         pszHostAddr, INET6_ADDRSTRLEN,
                                         true, &u16HostPort);
    if (idxRaw < 0)
        return VERR_INVALID_PARAMETER;

    pszRaw += idxRaw;
    cbRaw -= idxRaw;

    Assert(pszRaw[0] == PF_FIELD_SEPARATOR);

    idxRaw = 0;

    idxRaw = netPfStrAddressPortPairParse(pszRaw, cbRaw,
                                          pszGuestAddr,
                                          INET6_ADDRSTRLEN,
                                          false,
                                          &u16GuestPort);

    if (idxRaw < 0)
        goto invalid_parameter;

    /* XXX: fill the rule */
    pPfr->fPfrIPv6 = fIPv6;
    pPfr->iPfrProto = proto;

    pPfr->u16PfrHostPort = u16HostPort;

    if (*pszGuestAddr == '\0')
        goto invalid_parameter; /* guest address should be defined */

    pPfr->u16PfrGuestPort = u16GuestPort;

    Log(("name: %s\n"
         "proto: %d\n"
         "host address: %s\n"
         "host port: %d\n"
         "guest address: %s\n"
         "guest port:%d\n",
         pszName, proto,
         pszHostAddr, u16HostPort,
         pszGuestAddr, u16GuestPort));

    RTStrFree(pszRawBegin);
    return VINF_SUCCESS;

invalid_parameter:
    RTStrFree(pszRawBegin);
    if (pPfr)
        memset(pPfr, 0, sizeof(PORTFORWARDRULE));
    return VERR_INVALID_PARAMETER;
}
Esempio n. 24
0
int NetIfList(std::list <ComObjPtr<HostNetworkInterface> > &list)
{
    int rc = VINF_SUCCESS;
    size_t cbNeeded;
    char *pBuf, *pNext;
    int aiMib[6];
    unsigned short u16DefaultIface = 0; /* initialized to shut up gcc */

    /* Get the index of the interface associated with default route. */
    rc = getDefaultIfaceIndex(&u16DefaultIface);
    if (RT_FAILURE(rc))
        return rc;

    aiMib[0] = CTL_NET;
    aiMib[1] = PF_ROUTE;
    aiMib[2] = 0;
    aiMib[3] = 0;       /* address family */
    aiMib[4] = NET_RT_IFLIST;
    aiMib[5] = 0;

    if (sysctl(aiMib, 6, NULL, &cbNeeded, NULL, 0) < 0)
    {
        Log(("NetIfList: Failed to get estimate for list size (errno=%d).\n", errno));
        return RTErrConvertFromErrno(errno);
    }
    if ((pBuf = (char*)RTMemAlloc(cbNeeded)) == NULL)
        return VERR_NO_MEMORY;
    if (sysctl(aiMib, 6, pBuf, &cbNeeded, NULL, 0) < 0)
    {
        RTMemFree(pBuf);
        Log(("NetIfList: Failed to retrieve interface table (errno=%d).\n", errno));
        return RTErrConvertFromErrno(errno);
    }

    int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
    if (sock < 0)
    {
        RTMemFree(pBuf);
        Log(("NetIfList: socket() -> %d\n", errno));
        return RTErrConvertFromErrno(errno);
    }

    PDARWINETHERNIC pNIC;
    PDARWINETHERNIC pEtherNICs = DarwinGetEthernetControllers();

    char *pEnd = pBuf + cbNeeded;
    for (pNext = pBuf; pNext < pEnd;)
    {
        struct if_msghdr *pIfMsg = (struct if_msghdr *)pNext;

        if (pIfMsg->ifm_type != RTM_IFINFO)
        {
            Log(("NetIfList: Got message %u while expecting %u.\n",
                 pIfMsg->ifm_type, RTM_IFINFO));
            rc = VERR_INTERNAL_ERROR;
            break;
        }
        struct sockaddr_dl *pSdl = (struct sockaddr_dl *)(pIfMsg + 1);

        size_t cbNameLen = pSdl->sdl_nlen + 1;
        Assert(pSdl->sdl_nlen < sizeof(pNIC->szBSDName));
        for (pNIC = pEtherNICs; pNIC; pNIC = pNIC->pNext)
            if (   !strncmp(pSdl->sdl_data, pNIC->szBSDName, pSdl->sdl_nlen)
                && pNIC->szBSDName[pSdl->sdl_nlen] == '\0')
            {
                cbNameLen = strlen(pNIC->szName) + 1;
                break;
            }
        PNETIFINFO pNew = (PNETIFINFO)RTMemAllocZ(RT_OFFSETOF(NETIFINFO, szName[cbNameLen]));
        if (!pNew)
        {
            rc = VERR_NO_MEMORY;
            break;
        }
        memcpy(pNew->MACAddress.au8, LLADDR(pSdl), sizeof(pNew->MACAddress.au8));
        pNew->enmMediumType = NETIF_T_ETHERNET;
        Assert(sizeof(pNew->szShortName) > pSdl->sdl_nlen);
        memcpy(pNew->szShortName, pSdl->sdl_data, RT_MIN(pSdl->sdl_nlen, sizeof(pNew->szShortName) - 1));

        /*
         * If we found the adapter in the list returned by
         * DarwinGetEthernetControllers() copy the name and UUID from there.
         */
        if (pNIC)
        {
            memcpy(pNew->szName, pNIC->szName, cbNameLen);
            pNew->Uuid = pNIC->Uuid;
        }
        else
        {
            memcpy(pNew->szName, pSdl->sdl_data, pSdl->sdl_nlen);
            /* Generate UUID from name and MAC address. */
            RTUUID uuid;
            RTUuidClear(&uuid);
            memcpy(&uuid, pNew->szShortName, RT_MIN(cbNameLen, sizeof(uuid)));
            uuid.Gen.u8ClockSeqHiAndReserved = (uuid.Gen.u8ClockSeqHiAndReserved & 0x3f) | 0x80;
            uuid.Gen.u16TimeHiAndVersion = (uuid.Gen.u16TimeHiAndVersion & 0x0fff) | 0x4000;
            memcpy(uuid.Gen.au8Node, pNew->MACAddress.au8, sizeof(uuid.Gen.au8Node));
            pNew->Uuid = uuid;
        }

        pNext += pIfMsg->ifm_msglen;
        while (pNext < pEnd)
        {
            struct ifa_msghdr *pIfAddrMsg = (struct ifa_msghdr *)pNext;

            if (pIfAddrMsg->ifam_type != RTM_NEWADDR)
                break;
            extractAddressesToNetInfo(pIfAddrMsg->ifam_addrs,
                                      (char *)(pIfAddrMsg + 1),
                                      pIfAddrMsg->ifam_msglen + (char *)pIfAddrMsg,
                                      pNew);
            pNext += pIfAddrMsg->ifam_msglen;
        }

        if (pSdl->sdl_type == IFT_ETHER)
        {
            struct ifreq IfReq;
            RTStrCopy(IfReq.ifr_name, sizeof(IfReq.ifr_name), pNew->szShortName);
            if (ioctl(sock, SIOCGIFFLAGS, &IfReq) < 0)
            {
                Log(("NetIfList: ioctl(SIOCGIFFLAGS) -> %d\n", errno));
                pNew->enmStatus = NETIF_S_UNKNOWN;
            }
            else
                pNew->enmStatus = (IfReq.ifr_flags & IFF_UP) ? NETIF_S_UP : NETIF_S_DOWN;

            HostNetworkInterfaceType_T enmType;
            if (strncmp(pNew->szName, RT_STR_TUPLE("vboxnet")))
                enmType = HostNetworkInterfaceType_Bridged;
            else
                enmType = HostNetworkInterfaceType_HostOnly;

            ComObjPtr<HostNetworkInterface> IfObj;
            IfObj.createObject();
            if (SUCCEEDED(IfObj->init(Bstr(pNew->szName), enmType, pNew)))
            {
                /* Make sure the default interface gets to the beginning. */
                if (pIfMsg->ifm_index == u16DefaultIface)
                    list.push_front(IfObj);
                else
                    list.push_back(IfObj);
            }
        }
        RTMemFree(pNew);
    }
    for (pNIC = pEtherNICs; pNIC;)
    {
        void *pvFree = pNIC;
        pNIC = pNIC->pNext;
        RTMemFree(pvFree);
    }
    close(sock);
    RTMemFree(pBuf);
    return rc;
}
Esempio n. 25
0
/**
 * Thread method to wait for XPCOM events and notify the SDL thread.
 *
 * @returns Error code
 * @param   thread  Thread ID
 * @param   pvUser  User specific parameter, the file descriptor
 *                  of the event queue socket
 */
DECLCALLBACK(int) xpcomEventThread(RTTHREAD hThreadSelf, void *pvUser)
{
    RT_NOREF(hThreadSelf);
    int eqFD = (intptr_t)pvUser;
    unsigned cErrors = 0;
    int rc;

    /* Wait with the processing till the main thread needs it. */
    RTSemEventWait(g_EventSemXPCOMQueueThread, 2500);

    do
    {
        fd_set fdset;
        FD_ZERO(&fdset);
        FD_SET(eqFD, &fdset);
        int n = select(eqFD + 1, &fdset, NULL, NULL, NULL);

        /* are there any events to process? */
        if ((n > 0) && !g_fTerminateXPCOMQueueThread)
        {
            /*
             * Wait until all XPCOM events are processed. 1s just for sanity.
             */
            int iWait = 1000;
            /*
             * Don't post an event if there is a pending XPCOM event to prevent an
             * overflow of the SDL event queue.
             */
            if (g_s32XPCOMEventsPending < 1)
            {
                /*
                 * Post the event and wait for it to be processed. If we don't wait,
                 * we'll flood the queue on SMP systems and when the main thread is busy.
                 * In the event of a push error, we'll yield the timeslice and retry.
                 */
                SDL_Event event = {0};
                event.type = SDL_USEREVENT;
                event.user.type = SDL_USER_EVENT_XPCOM_EVENTQUEUE;
                rc = SDL_PushEvent(&event);
                if (!rc)
                {
                    /* success */
                    ASMAtomicIncS32(&g_s32XPCOMEventsPending);
                    cErrors = 0;
                }
                else
                {
                    /* failure */
                    cErrors++;
                    if (!RTThreadYield())
                        RTThreadSleep(2);
                    iWait = (cErrors >= 10) ? RT_MIN(cErrors - 8, 50) : 0;
                }
            }
            else
                Log2(("not enqueueing SDL XPCOM event (%d)\n", g_s32XPCOMEventsPending));

            if (iWait)
                RTSemEventWait(g_EventSemXPCOMQueueThread, iWait);
        }
    } while (!g_fTerminateXPCOMQueueThread);
    return VINF_SUCCESS;
}
Esempio n. 26
0
int NetIfGetConfigByName(PNETIFINFO pInfo)
{
    int rc = VINF_SUCCESS;
    size_t cbNeeded;
    char *pBuf, *pNext;
    int aiMib[6];

    aiMib[0] = CTL_NET;
    aiMib[1] = PF_ROUTE;
    aiMib[2] = 0;
    aiMib[3] = 0;       /* address family */
    aiMib[4] = NET_RT_IFLIST;
    aiMib[5] = 0;

    if (sysctl(aiMib, 6, NULL, &cbNeeded, NULL, 0) < 0)
    {
        Log(("NetIfList: Failed to get estimate for list size (errno=%d).\n", errno));
        return RTErrConvertFromErrno(errno);
    }
    if ((pBuf = (char*)RTMemAlloc(cbNeeded)) == NULL)
        return VERR_NO_MEMORY;
    if (sysctl(aiMib, 6, pBuf, &cbNeeded, NULL, 0) < 0)
    {
        RTMemFree(pBuf);
        Log(("NetIfList: Failed to retrieve interface table (errno=%d).\n", errno));
        return RTErrConvertFromErrno(errno);
    }

    int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
    if (sock < 0)
    {
        RTMemFree(pBuf);
        Log(("NetIfList: socket() -> %d\n", errno));
        return RTErrConvertFromErrno(errno);
    }

    char *pEnd = pBuf + cbNeeded;
    for (pNext = pBuf; pNext < pEnd;)
    {
        struct if_msghdr *pIfMsg = (struct if_msghdr *)pNext;

        if (pIfMsg->ifm_type != RTM_IFINFO)
        {
            Log(("NetIfList: Got message %u while expecting %u.\n",
                 pIfMsg->ifm_type, RTM_IFINFO));
            rc = VERR_INTERNAL_ERROR;
            break;
        }
        struct sockaddr_dl *pSdl = (struct sockaddr_dl *)(pIfMsg + 1);

        bool fSkip = !!strncmp(pInfo->szShortName, pSdl->sdl_data, pSdl->sdl_nlen)
            || pInfo->szShortName[pSdl->sdl_nlen] != '\0';

        pNext += pIfMsg->ifm_msglen;
        while (pNext < pEnd)
        {
            struct ifa_msghdr *pIfAddrMsg = (struct ifa_msghdr *)pNext;

            if (pIfAddrMsg->ifam_type != RTM_NEWADDR)
                break;
            if (!fSkip)
                extractAddressesToNetInfo(pIfAddrMsg->ifam_addrs,
                                          (char *)(pIfAddrMsg + 1),
                                          pIfAddrMsg->ifam_msglen + (char *)pIfAddrMsg,
                                          pInfo);
            pNext += pIfAddrMsg->ifam_msglen;
        }

        if (!fSkip && pSdl->sdl_type == IFT_ETHER)
        {
            size_t cbNameLen = pSdl->sdl_nlen + 1;
            memcpy(pInfo->MACAddress.au8, LLADDR(pSdl), sizeof(pInfo->MACAddress.au8));
            pInfo->enmMediumType = NETIF_T_ETHERNET;
            /* Generate UUID from name and MAC address. */
            RTUUID uuid;
            RTUuidClear(&uuid);
            memcpy(&uuid, pInfo->szShortName, RT_MIN(cbNameLen, sizeof(uuid)));
            uuid.Gen.u8ClockSeqHiAndReserved = (uuid.Gen.u8ClockSeqHiAndReserved & 0x3f) | 0x80;
            uuid.Gen.u16TimeHiAndVersion = (uuid.Gen.u16TimeHiAndVersion & 0x0fff) | 0x4000;
            memcpy(uuid.Gen.au8Node, pInfo->MACAddress.au8, sizeof(uuid.Gen.au8Node));
            pInfo->Uuid = uuid;

            struct ifreq IfReq;
            RTStrCopy(IfReq.ifr_name, sizeof(IfReq.ifr_name), pInfo->szShortName);
            if (ioctl(sock, SIOCGIFFLAGS, &IfReq) < 0)
            {
                Log(("NetIfList: ioctl(SIOCGIFFLAGS) -> %d\n", errno));
                pInfo->enmStatus = NETIF_S_UNKNOWN;
            }
            else
                pInfo->enmStatus = (IfReq.ifr_flags & IFF_UP) ? NETIF_S_UP : NETIF_S_DOWN;

            return VINF_SUCCESS;
        }
    }
    close(sock);
    RTMemFree(pBuf);
    return rc;
}
Esempio n. 27
0
RTR3DECL(int)  RTFileWrite(RTFILE hFile, const void *pvBuf, size_t cbToWrite, size_t *pcbWritten)
{
    if (cbToWrite <= 0)
        return VINF_SUCCESS;
    ULONG cbToWriteAdj = (ULONG)cbToWrite;
    AssertReturn(cbToWriteAdj == cbToWrite, VERR_NUMBER_TOO_BIG);

    ULONG cbWritten = 0;
    if (WriteFile((HANDLE)RTFileToNative(hFile), pvBuf, cbToWriteAdj, &cbWritten, NULL))
    {
        if (pcbWritten)
            /* Caller can handle partial writes. */
            *pcbWritten = cbWritten;
        else
        {
            /* Caller expects everything to be written. */
            while (cbToWriteAdj > cbWritten)
            {
                ULONG cbWrittenPart = 0;
                if (!WriteFile((HANDLE)RTFileToNative(hFile), (char*)pvBuf + cbWritten,
                               cbToWriteAdj - cbWritten, &cbWrittenPart, NULL))
                {
                    int rc = RTErrConvertFromWin32(GetLastError());
                    if (   rc == VERR_DISK_FULL
                        && IsBeyondLimit(hFile, cbToWriteAdj - cbWritten, FILE_CURRENT)
                       )
                        rc = VERR_FILE_TOO_BIG;
                    return rc;
                }
                if (cbWrittenPart == 0)
                    return VERR_WRITE_ERROR;
                cbWritten += cbWrittenPart;
            }
        }
        return VINF_SUCCESS;
    }

    /*
     * If it's a console, we might bump into out of memory conditions in the
     * WriteConsole call.
     */
    DWORD dwErr = GetLastError();
    if (dwErr == ERROR_NOT_ENOUGH_MEMORY)
    {
        ULONG cbChunk = cbToWriteAdj / 2;
        if (cbChunk > _32K)
            cbChunk = _32K;
        else
            cbChunk = RT_ALIGN_32(cbChunk, 256);

        cbWritten = 0;
        while (cbToWriteAdj > cbWritten)
        {
            ULONG cbToWrite     = RT_MIN(cbChunk, cbToWriteAdj - cbWritten);
            ULONG cbWrittenPart = 0;
            if (!WriteFile((HANDLE)RTFileToNative(hFile), (const char *)pvBuf + cbWritten, cbToWrite, &cbWrittenPart, NULL))
            {
                /* If we failed because the buffer is too big, shrink it and
                   try again. */
                dwErr = GetLastError();
                if (   dwErr == ERROR_NOT_ENOUGH_MEMORY
                    && cbChunk > 8)
                {
                    cbChunk /= 2;
                    continue;
                }
                int rc = RTErrConvertFromWin32(dwErr);
                if (   rc == VERR_DISK_FULL
                    && IsBeyondLimit(hFile, cbToWriteAdj - cbWritten, FILE_CURRENT))
                    rc = VERR_FILE_TOO_BIG;
                return rc;
            }
            cbWritten += cbWrittenPart;

            /* Return if the caller can handle partial writes, otherwise try
               write out everything. */
            if (pcbWritten)
            {
                *pcbWritten = cbWritten;
                break;
            }
            if (cbWrittenPart == 0)
                return VERR_WRITE_ERROR;
        }
        return VINF_SUCCESS;
    }

    int rc = RTErrConvertFromWin32(dwErr);
    if (   rc == VERR_DISK_FULL
        && IsBeyondLimit(hFile, cbToWriteAdj - cbWritten, FILE_CURRENT))
        rc = VERR_FILE_TOO_BIG;
    return rc;
}
Esempio n. 28
0
int main(int argc, char *argv[])
{
    int rc;

    RTR3InitExe(argc, &argv, 0);

    if (argc != 3)
    {
        RTPrintf("Usage: ./tstVDCopy <hdd1> <hdd2>\n");
        return 1;
    }

    RTPrintf("tstVDCopy: TESTING...\n");

    PVBOXHDD         pVD1 = NULL;
    PVBOXHDD         pVD2 = NULL;
    PVDINTERFACE     pVDIfs = NULL;
    VDINTERFACEERROR VDIfError;
    char *pszVD1 = NULL;
    char *pszVD2 = NULL;
    char *pbBuf1 = NULL;
    char *pbBuf2 = NULL;
    VDTYPE enmTypeVD1 = VDTYPE_INVALID;
    VDTYPE enmTypeVD2 = VDTYPE_INVALID;

#define CHECK(str) \
    do \
    { \
        if (RT_FAILURE(rc)) \
        { \
            RTPrintf("%s rc=%Rrc\n", str, rc); \
            if (pVD1) \
                VDCloseAll(pVD1); \
            if (pVD2) \
                VDCloseAll(pVD2); \
            return rc; \
        } \
    } while (0)

    pbBuf1 = (char *)RTMemAllocZ(VD_MERGE_BUFFER_SIZE);
    pbBuf2 = (char *)RTMemAllocZ(VD_MERGE_BUFFER_SIZE);

    /* Create error interface. */
    VDIfError.pfnError = tstVDError;

    rc = VDInterfaceAdd(&VDIfError.Core, "tstVD_Error", VDINTERFACETYPE_ERROR,
                        NULL, sizeof(VDINTERFACEERROR), &pVDIfs);
    AssertRC(rc);

    rc = VDGetFormat(NULL /* pVDIfsDisk */, NULL /* pVDIfsImage */,
                     argv[1], &pszVD1, &enmTypeVD1);
    CHECK("VDGetFormat() hdd1");

    rc = VDGetFormat(NULL /* pVDIfsDisk */, NULL /* pVDIfsImage */,
                     argv[2], &pszVD2, &enmTypeVD2);
    CHECK("VDGetFormat() hdd2");

    rc = VDCreate(pVDIfs, VDTYPE_HDD, &pVD1);
    CHECK("VDCreate() hdd1");

    rc = VDCreate(pVDIfs, VDTYPE_HDD, &pVD2);
    CHECK("VDCreate() hdd1");

    rc = VDOpen(pVD1, pszVD1, argv[1], VD_OPEN_FLAGS_NORMAL, NULL);
    CHECK("VDOpen() hdd1");

    rc = VDOpen(pVD2, pszVD2, argv[2], VD_OPEN_FLAGS_NORMAL, NULL);
    CHECK("VDOpen() hdd2");

    uint64_t cbSize1 = 0;
    uint64_t cbSize2 = 0;

    cbSize1 = VDGetSize(pVD1, 0);
    Assert(cbSize1 != 0);
    cbSize2 = VDGetSize(pVD1, 0);
    Assert(cbSize1 != 0);

    if (cbSize1 == cbSize2)
    {
        uint64_t uOffCurr = 0;

        /* Compare block by block. */
        while (uOffCurr < cbSize1)
        {
            size_t cbRead = RT_MIN((cbSize1 - uOffCurr), VD_MERGE_BUFFER_SIZE);

            rc = VDRead(pVD1, uOffCurr, pbBuf1, cbRead);
            CHECK("VDRead() hdd1");

            rc = VDRead(pVD2, uOffCurr, pbBuf2, cbRead);
            CHECK("VDRead() hdd2");

            if (memcmp(pbBuf1, pbBuf2, cbRead))
            {
                RTPrintf("tstVDCopy: Images differ uOffCurr=%llu\n", uOffCurr);
                /* Do byte by byte comparison. */
                for (size_t i = 0; i < cbRead; i++)
                {
                    if (pbBuf1[i] != pbBuf2[i])
                    {
                        RTPrintf("tstVDCopy: First different byte is at offset %llu\n", uOffCurr + i);
                        break;
                    }
                }
                break;
            }

            uOffCurr += cbRead;
        }
    }
    else
        RTPrintf("tstVDCopy: Images have different size hdd1=%llu hdd2=%llu\n", cbSize1, cbSize2);

    VDClose(pVD1, false);
    CHECK("VDClose() hdd1");

    VDClose(pVD2, false);
    CHECK("VDClose() hdd2");

    VDDestroy(pVD1);
    VDDestroy(pVD2);
    RTMemFree(pbBuf1);
    RTMemFree(pbBuf2);
#undef CHECK

    rc = VDShutdown();
    if (RT_FAILURE(rc))
    {
        RTPrintf("tstVDCopy: unloading backends failed! rc=%Rrc\n", rc);
        g_cErrors++;
    }
    /*
     * Summary
     */
    if (!g_cErrors)
        RTPrintf("tstVDCopy: SUCCESS\n");
    else
        RTPrintf("tstVDCopy: FAILURE - %d errors\n", g_cErrors);

    return !!g_cErrors;
}
Esempio n. 29
0
/**
 * @interface_method_impl{DBGCCMDHLP,pfnMemRead}
 */
static DECLCALLBACK(int) dbgcHlpMemRead(PDBGCCMDHLP pCmdHlp, void *pvBuffer, size_t cbRead, PCDBGCVAR pVarPointer, size_t *pcbRead)
{
    PDBGC       pDbgc = DBGC_CMDHLP2DBGC(pCmdHlp);
    DBGFADDRESS Address;
    int         rc;

    /*
     * Dummy check.
     */
    if (cbRead == 0)
    {
        if (*pcbRead)
            *pcbRead = 0;
        return VINF_SUCCESS;
    }

    /*
     * Convert Far addresses getting size and the correct base address.
     * Getting and checking the size is what makes this messy and slow.
     */
    DBGCVAR Var = *pVarPointer;
    switch (pVarPointer->enmType)
    {
        case DBGCVAR_TYPE_GC_FAR:
            /* Use DBGFR3AddrFromSelOff for the conversion. */
            Assert(pDbgc->pUVM);
            rc = DBGFR3AddrFromSelOff(pDbgc->pUVM, pDbgc->idCpu, &Address, Var.u.GCFar.sel, Var.u.GCFar.off);
            if (RT_FAILURE(rc))
                return rc;

            /* don't bother with flat selectors (for now). */
            if (!DBGFADDRESS_IS_FLAT(&Address))
            {
                DBGFSELINFO SelInfo;
                rc = DBGFR3SelQueryInfo(pDbgc->pUVM, pDbgc->idCpu, Address.Sel,
                                        DBGFSELQI_FLAGS_DT_GUEST | DBGFSELQI_FLAGS_DT_ADJ_64BIT_MODE, &SelInfo);
                if (RT_SUCCESS(rc))
                {
                    RTGCUINTPTR cb; /* -1 byte */
                    if (DBGFSelInfoIsExpandDown(&SelInfo))
                    {
                        if (    !SelInfo.u.Raw.Gen.u1Granularity
                            &&  Address.off > UINT16_C(0xffff))
                            return VERR_OUT_OF_SELECTOR_BOUNDS;
                        if (Address.off <= SelInfo.cbLimit)
                            return VERR_OUT_OF_SELECTOR_BOUNDS;
                        cb = (SelInfo.u.Raw.Gen.u1Granularity ? UINT32_C(0xffffffff) : UINT32_C(0xffff)) - Address.off;
                    }
                    else
                    {
                        if (Address.off > SelInfo.cbLimit)
                            return VERR_OUT_OF_SELECTOR_BOUNDS;
                        cb = SelInfo.cbLimit - Address.off;
                    }
                    if (cbRead - 1 > cb)
                    {
                        if (!pcbRead)
                            return VERR_OUT_OF_SELECTOR_BOUNDS;
                        cbRead = cb + 1;
                    }
                }
            }
            Var.enmType = DBGCVAR_TYPE_GC_FLAT;
            Var.u.GCFlat = Address.FlatPtr;
            break;

        case DBGCVAR_TYPE_GC_FLAT:
        case DBGCVAR_TYPE_GC_PHYS:
        case DBGCVAR_TYPE_HC_FLAT:
        case DBGCVAR_TYPE_HC_PHYS:
            break;

        default:
            return VERR_NOT_IMPLEMENTED;
    }



    /*
     * Copy page by page.
     */
    size_t cbLeft = cbRead;
    for (;;)
    {
        /*
         * Calc read size.
         */
        size_t cb = RT_MIN(PAGE_SIZE, cbLeft);
        switch (pVarPointer->enmType)
        {
            case DBGCVAR_TYPE_GC_FLAT: cb = RT_MIN(cb, PAGE_SIZE - (Var.u.GCFlat & PAGE_OFFSET_MASK)); break;
            case DBGCVAR_TYPE_GC_PHYS: cb = RT_MIN(cb, PAGE_SIZE - (Var.u.GCPhys & PAGE_OFFSET_MASK)); break;
            case DBGCVAR_TYPE_HC_FLAT: cb = RT_MIN(cb, PAGE_SIZE - ((uintptr_t)Var.u.pvHCFlat & PAGE_OFFSET_MASK)); break;
            case DBGCVAR_TYPE_HC_PHYS: cb = RT_MIN(cb, PAGE_SIZE - ((size_t)Var.u.HCPhys & PAGE_OFFSET_MASK)); break; /* size_t: MSC has braindead loss of data warnings! */
            default: break;
        }

        /*
         * Perform read.
         */
        switch (Var.enmType)
        {
            case DBGCVAR_TYPE_GC_FLAT:
                rc = DBGFR3MemRead(pDbgc->pUVM, pDbgc->idCpu,
                                   DBGFR3AddrFromFlat(pDbgc->pUVM, &Address, Var.u.GCFlat),
                                   pvBuffer, cb);
                break;

            case DBGCVAR_TYPE_GC_PHYS:
                rc = DBGFR3MemRead(pDbgc->pUVM, pDbgc->idCpu,
                                   DBGFR3AddrFromPhys(pDbgc->pUVM, &Address, Var.u.GCPhys),
                                   pvBuffer, cb);
                break;

            case DBGCVAR_TYPE_HC_PHYS:
            case DBGCVAR_TYPE_HC_FLAT:
            {
                DBGCVAR Var2;
                rc = dbgcOpAddrFlat(pDbgc, &Var, DBGCVAR_CAT_ANY, &Var2);
                if (RT_SUCCESS(rc))
                {
                    /** @todo protect this!!! */
                    memcpy(pvBuffer, Var2.u.pvHCFlat, cb);
                    rc = 0;
                }
                else
                    rc = VERR_INVALID_POINTER;
                break;
            }

            default:
                rc = VERR_DBGC_PARSE_INCORRECT_ARG_TYPE;
        }

        /*
         * Check for failure.
         */
        if (RT_FAILURE(rc))
        {
            if (pcbRead && (*pcbRead = cbRead - cbLeft) > 0)
                return VINF_SUCCESS;
            return rc;
        }

        /*
         * Next.
         */
        cbLeft -= cb;
        if (!cbLeft)
            break;
        pvBuffer = (char *)pvBuffer + cb;
        rc = DBGCCmdHlpEval(pCmdHlp, &Var, "%DV + %d", &Var, cb);
        if (RT_FAILURE(rc))
        {
            if (pcbRead && (*pcbRead = cbRead - cbLeft) > 0)
                return VINF_SUCCESS;
            return rc;
        }
    }

    /*
     * Done
     */
    if (pcbRead)
        *pcbRead = cbRead;
    return 0;
}
Esempio n. 30
0
/** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
static int parallelsAsyncWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
                               PVDIOCTX pIoCtx,
                               size_t *pcbWriteProcess, size_t *pcbPreRead,
                               size_t *pcbPostRead, unsigned fWrite)
{
    LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p\n",
                 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess));
    int rc = VINF_SUCCESS;
    PPARALLELSIMAGE pImage = (PPARALLELSIMAGE)pBackendData;
    uint64_t uSector;
    uint64_t uOffsetInFile;
    uint32_t iIndexInAllocationTable;

    AssertPtr(pImage);
    Assert(uOffset % 512 == 0);
    Assert(cbToWrite % 512 == 0);

    if (pImage->uImageFlags & VD_IMAGE_FLAGS_FIXED)
        rc = vdIfIoIntFileWriteUserAsync(pImage->pIfIo, pImage->pStorage, uOffset,
                                         pIoCtx, cbToWrite, NULL, NULL);
    else
    {
        /* Calculate offset in the real file. */
        uSector = uOffset / 512;
        /* One chunk in the file is always one track big. */
        iIndexInAllocationTable = (uint32_t)(uSector / pImage->PCHSGeometry.cSectors);
        uSector = uSector % pImage->PCHSGeometry.cSectors;

        cbToWrite = RT_MIN(cbToWrite, (pImage->PCHSGeometry.cSectors - uSector)*512);

        if (pImage->pAllocationBitmap[iIndexInAllocationTable] == 0)
        {
            if (fWrite & VD_WRITE_NO_ALLOC)
            {
                *pcbPreRead  = uSector * 512;
                *pcbPostRead = pImage->PCHSGeometry.cSectors * 512 - cbToWrite - *pcbPreRead;

                if (pcbWriteProcess)
                    *pcbWriteProcess = cbToWrite;
                return VERR_VD_BLOCK_FREE;
            }

            /* Allocate new chunk in the file. */
            Assert(uSector == 0);
            AssertMsg(pImage->cbFileCurrent % 512 == 0, ("File size is not a multiple of 512\n"));
            pImage->pAllocationBitmap[iIndexInAllocationTable] = (uint32_t)(pImage->cbFileCurrent / 512);
            pImage->cbFileCurrent += pImage->PCHSGeometry.cSectors * 512;
            pImage->fAllocationBitmapChanged = true;
            uOffsetInFile = (uint64_t)pImage->pAllocationBitmap[iIndexInAllocationTable] * 512;

            /*
             * Write the new block at the current end of the file.
             */
            rc = vdIfIoIntFileWriteUserAsync(pImage->pIfIo, pImage->pStorage,
                                             uOffsetInFile, pIoCtx, cbToWrite, NULL, NULL);
            if (RT_SUCCESS(rc) || (rc == VERR_VD_ASYNC_IO_IN_PROGRESS))
            {
                /* Write the changed allocation bitmap entry. */
                /** @todo: Error handling. */
                rc = vdIfIoIntFileWriteMetaAsync(pImage->pIfIo, pImage->pStorage,
                                                 sizeof(ParallelsHeader) + iIndexInAllocationTable * sizeof(uint32_t),
                                                 &pImage->pAllocationBitmap[iIndexInAllocationTable],
                                                 sizeof(uint32_t), pIoCtx,
                                                 NULL, NULL);
            }

            *pcbPreRead  = 0;
            *pcbPostRead = 0;
        }
        else
        {
            uOffsetInFile = (pImage->pAllocationBitmap[iIndexInAllocationTable] + uSector) * 512;
            rc = vdIfIoIntFileWriteUserAsync(pImage->pIfIo, pImage->pStorage,
                                             uOffsetInFile, pIoCtx, cbToWrite, NULL, NULL);
        }
    }

    if (pcbWriteProcess)
        *pcbWriteProcess = cbToWrite;

    LogFlowFunc(("returns %Rrc\n", rc));
    return rc;
}