/**
 * Queries information from a file or directory handle.
 *
 * This is shared between the RTPathQueryInfo, RTFileQueryInfo and
 * RTDirQueryInfo code.
 *
 * @returns IPRT status code.
 * @param   hFile               The handle to query information from.  Must have
 *                              the necessary privileges.
 * @param   pvBuf               Pointer to a scratch buffer.
 * @param   cbBuf               The size of the buffer.  This must be large
 *                              enough to hold a FILE_ALL_INFORMATION struct.
 * @param   pObjInfo            Where to return information about the handle.
 * @param   enmAddAttr          What extra info to return.
 * @param   pszPath             The path if this is a file (for exe detect).
 * @param   uReparseTag         The reparse tag number (0 if not applicable) for
 *                              symlink detection/whatnot.
 */
DECLHIDDEN(int) rtPathNtQueryInfoFromHandle(HANDLE hFile, void *pvBuf, size_t cbBuf, PRTFSOBJINFO pObjInfo,
                                            RTFSOBJATTRADD enmAddAttr, const char *pszPath, ULONG uReparseTag)
{
    Assert(cbBuf >= sizeof(FILE_ALL_INFORMATION));

    /** @todo Try optimize this for when RTFSOBJATTRADD_UNIX isn't set? */
    IO_STATUS_BLOCK  Ios = RTNT_IO_STATUS_BLOCK_INITIALIZER;
    NTSTATUS rcNt = NtQueryInformationFile(hFile, &Ios, pvBuf, sizeof(FILE_ALL_INFORMATION), FileAllInformation);
    if (   NT_SUCCESS(rcNt)
        || rcNt == STATUS_BUFFER_OVERFLOW)
    {
        FILE_ALL_INFORMATION *pAllInfo = (FILE_ALL_INFORMATION *)pvBuf;
        pObjInfo->cbObject    = pAllInfo->StandardInformation.EndOfFile.QuadPart;
        pObjInfo->cbAllocated = pAllInfo->StandardInformation.AllocationSize.QuadPart;
        RTTimeSpecSetNtTime(&pObjInfo->BirthTime,         pAllInfo->BasicInformation.CreationTime.QuadPart);
        RTTimeSpecSetNtTime(&pObjInfo->AccessTime,        pAllInfo->BasicInformation.LastAccessTime.QuadPart);
        RTTimeSpecSetNtTime(&pObjInfo->ModificationTime,  pAllInfo->BasicInformation.LastWriteTime.QuadPart);
        RTTimeSpecSetNtTime(&pObjInfo->ChangeTime,        pAllInfo->BasicInformation.ChangeTime.QuadPart);
        pObjInfo->Attr.fMode = rtFsModeFromDos(  (pAllInfo->BasicInformation.FileAttributes << RTFS_DOS_SHIFT)
                                               & RTFS_DOS_MASK_NT,
                                               pszPath, pszPath ? strlen(pszPath) : 0, uReparseTag);
        pObjInfo->Attr.enmAdditional = enmAddAttr;
        if (enmAddAttr == RTFSOBJATTRADD_UNIX)
        {
            pObjInfo->Attr.u.Unix.uid             = ~0U;
            pObjInfo->Attr.u.Unix.gid             = ~0U;
            pObjInfo->Attr.u.Unix.cHardlinks      = RT_MAX(1, pAllInfo->StandardInformation.NumberOfLinks);
            pObjInfo->Attr.u.Unix.INodeIdDevice   = 0; /* below */
            pObjInfo->Attr.u.Unix.INodeId         = pAllInfo->InternalInformation.IndexNumber.QuadPart;
            pObjInfo->Attr.u.Unix.fFlags          = 0;
            pObjInfo->Attr.u.Unix.GenerationId    = 0;
            pObjInfo->Attr.u.Unix.Device          = 0;

            /* Get the serial number. */
            rcNt = NtQueryVolumeInformationFile(hFile, &Ios, pvBuf, (ULONG)RT_MIN(cbBuf, _2K), FileFsVolumeInformation);
            if (NT_SUCCESS(rcNt) || rcNt == STATUS_BUFFER_OVERFLOW)
            {
                FILE_FS_VOLUME_INFORMATION *pVolInfo = (FILE_FS_VOLUME_INFORMATION *)pvBuf;
                pObjInfo->Attr.u.Unix.INodeIdDevice = pVolInfo->VolumeSerialNumber;
            }
        }

        return rtPathNtQueryInfoFillInDummyData(VINF_SUCCESS, pObjInfo, enmAddAttr);
    }
    return RTErrConvertFromNtStatus(rcNt);
}
示例#2
0
static void testFile(const char *pszFilename)
{
    size_t  cbSrcActually = 0;
    void   *pvSrc;
    size_t  cbSrc;
    int rc = RTFileReadAll(pszFilename, &pvSrc, &cbSrc);
    RTTESTI_CHECK_RC_OK_RETV(rc);

    size_t  cbDstActually = 0;
    size_t  cbDst = RT_MAX(cbSrc * 8, _1M);
    void   *pvDst = RTMemAllocZ(cbDst);

    rc = RTZipBlockDecompress(RTZIPTYPE_ZLIB, 0, pvSrc, cbSrc, &cbSrcActually, pvDst, cbDst, &cbDstActually);
    RTTestIPrintf(RTTESTLVL_ALWAYS, "cbSrc=%zu cbSrcActually=%zu cbDst=%zu cbDstActually=%zu rc=%Rrc\n",
                  cbSrc, cbSrcActually, cbDst, cbDstActually, rc);
    RTTESTI_CHECK_RC_OK(rc);

}
/** @todo Mappings should be automatically removed by unloading the service,
 *        but unloading is currently a no-op! */
static void unmapAndRemoveMapping(RTTEST hTest, VBOXHGCMSVCFNTABLE *psvcTable,
                                  SHFLROOT root, const char *pcszFolderName)
{
    VBOXHGCMSVCPARM aParms[RT_MAX(SHFL_CPARMS_UNMAP_FOLDER,
                                  SHFL_CPARMS_REMOVE_MAPPING)];
    VBOXHGCMCALLHANDLE_TYPEDEF callHandle = { VINF_SUCCESS };
    struct TESTSHFLSTRING FolderName;
    int rc;

    aParms[0].setUInt32(root);
    psvcTable->pfnCall(psvcTable->pvService, &callHandle, 0,
                       psvcTable->pvService, SHFL_FN_UNMAP_FOLDER,
                       SHFL_CPARMS_UNMAP_FOLDER, aParms);
    AssertReleaseRC(callHandle.rc);
    fillTestShflString(&FolderName, pcszFolderName);
    aParms[0].setPointer(&FolderName,   RT_UOFFSETOF(SHFLSTRING, String)
                                      + FolderName.string.u16Size);
    rc = psvcTable->pfnHostCall(psvcTable->pvService, SHFL_FN_REMOVE_MAPPING,
                                SHFL_CPARMS_REMOVE_MAPPING, aParms);
    AssertReleaseRC(rc);
}
示例#4
0
RTR3DECL(int) RTManifestWriteFilesBuf(void **ppvBuf, size_t *pcbSize, PRTMANIFESTTEST paFiles, size_t cFiles)
{
    /* Validate input */
    AssertPtrReturn(ppvBuf, VERR_INVALID_POINTER);
    AssertPtrReturn(pcbSize, VERR_INVALID_POINTER);
    AssertPtrReturn(paFiles, VERR_INVALID_POINTER);
    AssertReturn(cFiles > 0, VERR_INVALID_PARAMETER);

    /* Calculate the size necessary for the memory buffer. */
    size_t cbSize = 0;
    size_t cbMaxSize = 0;
    for (size_t i = 0; i < cFiles; ++i)
    {
        size_t cbTmp = strlen(RTPathFilename(paFiles[i].pszTestFile)) + strlen(paFiles[i].pszTestDigest) + 10;
        cbMaxSize = RT_MAX(cbMaxSize, cbTmp);
        cbSize += cbTmp;
    }

    /* Create the memory buffer */
    void *pvBuf = RTMemAlloc(cbSize);
    if (!pvBuf)
        return VERR_NO_MEMORY;

    /* Allocate a temporary string buffer. */
    char * pszTmp = RTStrAlloc(cbMaxSize + 1);
    size_t cbPos = 0;
    for (size_t i = 0; i < cFiles; ++i)
    {
        size_t cch = RTStrPrintf(pszTmp, cbMaxSize + 1, "SHA1 (%s)= %s\n", RTPathFilename(paFiles[i].pszTestFile), paFiles[i].pszTestDigest);
        memcpy(&((char*)pvBuf)[cbPos], pszTmp, cch);
        cbPos += cch;
    }
    RTStrFree(pszTmp);

    /* Results */
    *ppvBuf = pvBuf;
    *pcbSize = cbSize;

    return VINF_SUCCESS;
}
static SHFLROOT initWithWritableMapping(RTTEST hTest,
                                        VBOXHGCMSVCFNTABLE *psvcTable,
                                        VBOXHGCMSVCHELPERS *psvcHelpers,
                                        const char *pcszFolderName,
                                        const char *pcszMapping)
{
    VBOXHGCMSVCPARM aParms[RT_MAX(SHFL_CPARMS_ADD_MAPPING,
                                  SHFL_CPARMS_MAP_FOLDER)];
    struct TESTSHFLSTRING FolderName;
    struct TESTSHFLSTRING Mapping;
    VBOXHGCMCALLHANDLE_TYPEDEF callHandle = { VINF_SUCCESS };
    int rc;

    initTable(psvcTable, psvcHelpers);
    AssertReleaseRC(VBoxHGCMSvcLoad(psvcTable));
    AssertRelease(  psvcTable->pvService
                  = RTTestGuardedAllocTail(hTest, psvcTable->cbClient));
    RT_BZERO(psvcTable->pvService, psvcTable->cbClient);
    fillTestShflString(&FolderName, pcszFolderName);
    fillTestShflString(&Mapping, pcszMapping);
    aParms[0].setPointer(&FolderName,   RT_UOFFSETOF(SHFLSTRING, String)
                                      + FolderName.string.u16Size);
    aParms[1].setPointer(&Mapping,   RT_UOFFSETOF(SHFLSTRING, String)
                                   + Mapping.string.u16Size);
    aParms[2].setUInt32(1);
    rc = psvcTable->pfnHostCall(psvcTable->pvService, SHFL_FN_ADD_MAPPING,
                                SHFL_CPARMS_ADD_MAPPING, aParms);
    AssertReleaseRC(rc);
    aParms[0].setPointer(&Mapping,   RT_UOFFSETOF(SHFLSTRING, String)
                                   + Mapping.string.u16Size);
    aParms[1].setUInt32(0);  /* root */
    aParms[2].setUInt32('/');  /* delimiter */
    aParms[3].setUInt32(1);  /* case sensitive */
    psvcTable->pfnCall(psvcTable->pvService, &callHandle, 0,
                       psvcTable->pvService, SHFL_FN_MAP_FOLDER,
                       SHFL_CPARMS_MAP_FOLDER, aParms);
    AssertReleaseRC(callHandle.rc);
    return aParms[1].u.uint32;
}
/**
 * Called by TRPM and CPUM assembly code to make sure the guest state is
 * ready for execution.
 *
 * @param   pVM                 The VM handle.
 */
DECLASM(void) CPUMRCAssertPreExecutionSanity(PVM pVM)
{
    /*
     * Check some important assumptions before resuming guest execution.
     */
    PVMCPU         pVCpu     = VMMGetCpu0(pVM);
    PCCPUMCTX      pCtx      = &pVCpu->cpum.s.Guest;
    uint8_t  const uRawCpl   = CPUMGetGuestCPL(pVCpu);
    uint32_t const u32EFlags = CPUMRawGetEFlags(pVCpu);
    bool     const fPatch    = PATMIsPatchGCAddr(pVM, pCtx->eip);
    AssertMsg(pCtx->eflags.Bits.u1IF,                ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    AssertMsg(pCtx->eflags.Bits.u2IOPL < RT_MAX(uRawCpl, 1U),
                                                     ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    if (!(u32EFlags & X86_EFL_VM))
    {
        AssertMsg((u32EFlags & X86_EFL_IF) || fPatch,("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
        AssertMsg((pCtx->cs.Sel & X86_SEL_RPL) > 0,  ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
        AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) > 0,  ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    }
    AssertMsg(CPUMIsGuestInRawMode(pVCpu),           ("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
    //Log2(("cs:eip=%04x:%08x ss:esp=%04x:%08x cpl=%u raw/efl=%#x/%#x%s\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, uRawCpl, u32EFlags, pCtx->eflags.u, fPatch ? " patch" : ""));
}
/** @copydoc RTRANDINT::pfnGetBytes */
static DECLCALLBACK(void) rtRandAdvPosixGetBytes(PRTRANDINT pThis, uint8_t *pb, size_t cb)
{
    ssize_t cbRead = read(pThis->u.File.hFile, pb, cb);
    if ((size_t)cbRead != cb)
    {
        /* S10 has been observed returning 1040 bytes at the time from /dev/urandom.
           Which means we need to do than 256 rounds to reach 668171 bytes if
           that's what demanded by the caller (like tstRTMemWipe.cpp). */
        ssize_t cTries = RT_MAX(256, cb / 64);
        do
        {
            if (cbRead > 0)
            {
                cb -= cbRead;
                pb += cbRead;
            }
            cbRead = read(pThis->u.File.hFile, pb, cb);
        } while (   (size_t)cbRead != cb
                 && cTries-- > 0);
        AssertReleaseMsg((size_t)cbRead == cb, ("%zu != %zu, cTries=%zd errno=%d\n", cbRead, cb, cTries, errno));
    }
}
示例#8
0
static int shaGetSizeCallback(void *pvUser, void *pvStorage, uint64_t *pcbSize)
{
    /* Validate input. */
    AssertPtrReturn(pvUser, VERR_INVALID_POINTER);
    AssertPtrReturn(pvStorage, VERR_INVALID_POINTER);

    PSHASTORAGE pShaStorage = (PSHASTORAGE)pvUser;
    PVDINTERFACEIO pIfIo = VDIfIoGet(pShaStorage->pVDImageIfaces);
    AssertPtrReturn(pIfIo, VERR_INVALID_PARAMETER);

    PSHASTORAGEINTERNAL pInt = (PSHASTORAGEINTERNAL)pvStorage;

    DEBUG_PRINT_FLOW();

    uint64_t cbSize;
    int rc = vdIfIoFileGetSize(pIfIo, pInt->pvStorage, &cbSize);
    if (RT_FAILURE(rc))
        return rc;

    *pcbSize = RT_MAX(pInt->cbCurAll, cbSize);

    return VINF_SUCCESS;
}
示例#9
0
RTDECL(uint32_t) RTMpGetArraySize(void)
{
    /*
     * Cache the result here.  This whole point of this function is that it
     * will always return the same value, so that should be safe.
     *
     * Note! Because RTCPUSET may be to small to represent all the CPUs, we
     *       check with RTMpGetCount() as well.
     */
    static uint32_t s_cMaxCpus = 0;
    uint32_t cCpus = s_cMaxCpus;
    if (RT_UNLIKELY(cCpus == 0))
    {
        RTCPUSET    CpuSet;
        uint32_t    cCpus1 = RTCpuLastIndex(RTMpGetSet(&CpuSet)) + 1;
        uint32_t    cCpus2 = RTMpGetCount();
        cCpus              = RT_MAX(cCpus1, cCpus2);
        ASMAtomicCmpXchgU32(&s_cMaxCpus, cCpus, 0);
        return cCpus;
    }
    return s_cMaxCpus;

}
示例#10
0
DECLASM(int) VBoxDrvIOCtl(uint16_t sfn, uint8_t iCat, uint8_t iFunction, void *pvParm, void *pvData, uint16_t *pcbParm, uint16_t *pcbData)
{
    /*
     * Find the session.
     */
    const RTPROCESS     Process = RTProcSelf();
    const unsigned      iHash = SESSION_HASH(sfn);
    PSUPDRVSESSION      pSession;

    RTSpinlockAcquire(g_Spinlock);
    pSession = g_apSessionHashTab[iHash];
    if (pSession && pSession->Process != Process)
    {
        do pSession = pSession->pNextHash;
        while (     pSession
               &&   (   pSession->sfn != sfn
                     || pSession->Process != Process));

        if (RT_LIKELY(pSession))
            supdrvSessionRetain(pSession);
    }
    RTSpinlockReleaseNoInts(g_Spinlock);
    if (!pSession)
    {
        OSDBGPRINT(("VBoxDrvIoctl: WHUT?!? pSession == NULL! This must be a mistake... pid=%d\n", (int)Process));
        return VERR_INVALID_PARAMETER;
    }

    /*
     * Verify the category and dispatch the IOCtl.
     */
    if (RT_LIKELY(iCat == SUP_CTL_CATEGORY))
    {
        Log(("VBoxDrvIOCtl: pSession=%p iFunction=%#x pvParm=%p pvData=%p *pcbParm=%d *pcbData=%d\n", pSession, iFunction, pvParm, pvData, *pcbParm, *pcbData));
        Assert(pvParm);
        Assert(!pvData);

        /*
         * Lock the header.
         */
        PSUPREQHDR pHdr = (PSUPREQHDR)pvParm;
        AssertReturn(*pcbParm == sizeof(*pHdr), VERR_INVALID_PARAMETER);
        KernVMLock_t Lock;
        int rc = KernVMLock(VMDHL_WRITE, pHdr, *pcbParm, &Lock, (KernPageList_t *)-1, NULL);
        AssertMsgReturn(!rc, ("KernVMLock(VMDHL_WRITE, %p, %#x, &p, NULL, NULL) -> %d\n", pHdr, *pcbParm, &Lock, rc), VERR_LOCK_FAILED);

        /*
         * Validate the header.
         */
        if (RT_LIKELY((pHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) == SUPREQHDR_FLAGS_MAGIC))
        {
            uint32_t cbReq = RT_MAX(pHdr->cbIn, pHdr->cbOut);
            if (RT_LIKELY(    pHdr->cbIn >= sizeof(*pHdr)
                          &&  pHdr->cbOut >= sizeof(*pHdr)
                          &&  cbReq <= _1M*16))
            {
                /*
                 * Lock the rest of the buffer if necessary.
                 */
                if (((uintptr_t)pHdr & PAGE_OFFSET_MASK) + cbReq > PAGE_SIZE)
                {
                    rc = KernVMUnlock(&Lock);
                    AssertMsgReturn(!rc, ("KernVMUnlock(Lock) -> %#x\n", rc), VERR_LOCK_FAILED);

                    rc = KernVMLock(VMDHL_WRITE, pHdr, cbReq, &Lock, (KernPageList_t *)-1, NULL);
                    AssertMsgReturn(!rc, ("KernVMLock(VMDHL_WRITE, %p, %#x, &p, NULL, NULL) -> %d\n", pHdr, cbReq, &Lock, rc), VERR_LOCK_FAILED);
                }

                /*
                 * Process the IOCtl.
                 */
                rc = supdrvIOCtl(iFunction, &g_DevExt, pSession, pHdr);
            }
            else
            {
                OSDBGPRINT(("VBoxDrvIOCtl: max(%#x,%#x); iCmd=%#x\n", pHdr->cbIn, pHdr->cbOut, iFunction));
                rc = VERR_INVALID_PARAMETER;
            }
        }
        else
        {
            OSDBGPRINT(("VBoxDrvIOCtl: bad magic fFlags=%#x; iCmd=%#x\n", pHdr->fFlags, iFunction));
            rc = VERR_INVALID_PARAMETER;
        }

        /*
         * Unlock and return.
         */
        int rc2 = KernVMUnlock(&Lock);
        AssertMsg(!rc2, ("rc2=%d\n", rc2)); NOREF(rc2);s
    }
/**
 * Worker for VBoxSupDrvIOCtl that takes the slow IOCtl functions.
 *
 * @returns Solaris errno.
 *
 * @param   pSession    The session.
 * @param   Cmd         The IOCtl command.
 * @param   Mode        Information bitfield (for specifying ownership of data)
 * @param   iArg        User space address of the request buffer.
 */
static int VBoxDrvSolarisIOCtlSlow(PSUPDRVSESSION pSession, int iCmd, int Mode, intptr_t iArg)
{
    int         rc;
    uint32_t    cbBuf = 0;
    union
    {
        SUPREQHDR   Hdr;
        uint8_t     abBuf[64];
    }           StackBuf;
    PSUPREQHDR  pHdr;


    /*
     * Read the header.
     */
    if (RT_UNLIKELY(IOCPARM_LEN(iCmd) != sizeof(StackBuf.Hdr)))
    {
        LogRel(("VBoxDrvSolarisIOCtlSlow: iCmd=%#x len %d expected %d\n", iCmd, IOCPARM_LEN(iCmd), sizeof(StackBuf.Hdr)));
        return EINVAL;
    }
    rc = ddi_copyin((void *)iArg, &StackBuf.Hdr, sizeof(StackBuf.Hdr), Mode);
    if (RT_UNLIKELY(rc))
    {
        LogRel(("VBoxDrvSolarisIOCtlSlow: ddi_copyin(,%#lx,) failed; iCmd=%#x. rc=%d\n", iArg, iCmd, rc));
        return EFAULT;
    }
    if (RT_UNLIKELY((StackBuf.Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC))
    {
        LogRel(("VBoxDrvSolarisIOCtlSlow: bad header magic %#x; iCmd=%#x\n", StackBuf.Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK, iCmd));
        return EINVAL;
    }
    cbBuf = RT_MAX(StackBuf.Hdr.cbIn, StackBuf.Hdr.cbOut);
    if (RT_UNLIKELY(    StackBuf.Hdr.cbIn < sizeof(StackBuf.Hdr)
                    ||  StackBuf.Hdr.cbOut < sizeof(StackBuf.Hdr)
                    ||  cbBuf > _1M*16))
    {
        LogRel(("VBoxDrvSolarisIOCtlSlow: max(%#x,%#x); iCmd=%#x\n", StackBuf.Hdr.cbIn, StackBuf.Hdr.cbOut, iCmd));
        return EINVAL;
    }

    /*
     * Buffer the request.
     */
    if (cbBuf <= sizeof(StackBuf))
        pHdr = &StackBuf.Hdr;
    else
    {
        pHdr = RTMemTmpAlloc(cbBuf);
        if (RT_UNLIKELY(!pHdr))
        {
            LogRel(("VBoxDrvSolarisIOCtlSlow: failed to allocate buffer of %d bytes for iCmd=%#x.\n", cbBuf, iCmd));
            return ENOMEM;
        }
    }
    rc = ddi_copyin((void *)iArg, pHdr, cbBuf, Mode);
    if (RT_UNLIKELY(rc))
    {
        LogRel(("VBoxDrvSolarisIOCtlSlow: copy_from_user(,%#lx, %#x) failed; iCmd=%#x. rc=%d\n", iArg, cbBuf, iCmd, rc));
        if (pHdr != &StackBuf.Hdr)
            RTMemFree(pHdr);
        return EFAULT;
    }

    /*
     * Process the IOCtl.
     */
    rc = supdrvIOCtl(iCmd, &g_DevExt, pSession, pHdr, cbBuf);

    /*
     * Copy ioctl data and output buffer back to user space.
     */
    if (RT_LIKELY(!rc))
    {
        uint32_t cbOut = pHdr->cbOut;
        if (RT_UNLIKELY(cbOut > cbBuf))
        {
            LogRel(("VBoxDrvSolarisIOCtlSlow: too much output! %#x > %#x; iCmd=%#x!\n", cbOut, cbBuf, iCmd));
            cbOut = cbBuf;
        }
        rc = ddi_copyout(pHdr, (void *)iArg, cbOut, Mode);
        if (RT_UNLIKELY(rc != 0))
        {
            /* this is really bad */
            LogRel(("VBoxDrvSolarisIOCtlSlow: ddi_copyout(,%p,%d) failed. rc=%d\n", (void *)iArg, cbBuf, rc));
            rc = EFAULT;
        }
    }
    else
        rc = EINVAL;

    if (pHdr != &StackBuf.Hdr)
        RTMemTmpFree(pHdr);
    return rc;
}
示例#12
0
/**
 * Allocate memory from the heap.
 *
 * @returns Pointer to allocated memory.
 * @param   pHeap       Heap handle.
 * @param   enmTag      Statistics tag. Statistics are collected on a per tag
 *                      basis in addition to a global one. Thus we can easily
 *                      identify how memory is used by the VM.
 * @param   cb          Size of the block.
 * @param   fZero       Whether or not to zero the memory block.
 * @param   pR0Ptr      Where to return the ring-0 pointer.
 */
static void *mmR3UkHeapAlloc(PMMUKHEAP pHeap, MMTAG enmTag, size_t cb, bool fZero, PRTR0PTR pR0Ptr)
{
    if (pR0Ptr)
        *pR0Ptr = NIL_RTR0PTR;
    RTCritSectEnter(&pHeap->Lock);

#ifdef MMUKHEAP_WITH_STATISTICS
    /*
     * Find/alloc statistics nodes.
     */
    pHeap->Stat.cAllocations++;
    PMMUKHEAPSTAT pStat = (PMMUKHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag);
    if (pStat)
        pStat->cAllocations++;
    else
    {
        pStat = (PMMUKHEAPSTAT)MMR3HeapAllocZU(pHeap->pUVM, MM_TAG_MM, sizeof(MMUKHEAPSTAT));
        if (!pStat)
        {
            pHeap->Stat.cFailures++;
            AssertMsgFailed(("Failed to allocate heap stat record.\n"));
            RTCritSectLeave(&pHeap->Lock);
            return NULL;
        }
        pStat->Core.Key = (AVLULKEY)enmTag;
        RTAvlULInsert(&pHeap->pStatTree, &pStat->Core);

        pStat->cAllocations++;

        /* register the statistics */
        PUVM pUVM = pHeap->pUVM;
        const char *pszTag = mmGetTagName(enmTag);
        STAMR3RegisterFU(pUVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS,  STAMUNIT_BYTES, "Number of bytes currently allocated.",    "/MM/UkHeap/%s", pszTag);
        STAMR3RegisterFU(pUVM, &pStat->cAllocations,   STAMTYPE_U64, STAMVISIBILITY_ALWAYS,  STAMUNIT_CALLS, "Number or MMR3UkHeapAlloc() calls.",      "/MM/UkHeap/%s/cAllocations", pszTag);
        STAMR3RegisterFU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS,  STAMUNIT_CALLS, "Number of MMR3UkHeapRealloc() calls.",    "/MM/UkHeap/%s/cReallocations", pszTag);
        STAMR3RegisterFU(pUVM, &pStat->cFrees,         STAMTYPE_U64, STAMVISIBILITY_ALWAYS,  STAMUNIT_CALLS, "Number of MMR3UkHeapFree() calls.",       "/MM/UkHeap/%s/cFrees", pszTag);
        STAMR3RegisterFU(pUVM, &pStat->cFailures,      STAMTYPE_U64, STAMVISIBILITY_ALWAYS,  STAMUNIT_COUNT, "Number of failures.",                     "/MM/UkHeap/%s/cFailures", pszTag);
        STAMR3RegisterFU(pUVM, &pStat->cbAllocated,    STAMTYPE_U64, STAMVISIBILITY_ALWAYS,  STAMUNIT_BYTES, "Total number of bytes allocated.",        "/MM/UkHeap/%s/cbAllocated", pszTag);
        STAMR3RegisterFU(pUVM, &pStat->cbFreed,        STAMTYPE_U64, STAMVISIBILITY_ALWAYS,  STAMUNIT_BYTES, "Total number of bytes freed.",            "/MM/UkHeap/%s/cbFreed", pszTag);
    }
#endif

    /*
     * Validate input.
     */
    if (cb == 0)
    {
#ifdef MMUKHEAP_WITH_STATISTICS
        pStat->cFailures++;
        pHeap->Stat.cFailures++;
#endif
        RTCritSectLeave(&pHeap->Lock);
        return NULL;
    }

    /*
     * Allocate heap block.
     */
    cb = RT_ALIGN_Z(cb, MMUKHEAP_SIZE_ALIGNMENT);
    void *pv = NULL;
    PMMUKHEAPSUB pSubHeapPrev = NULL;
    PMMUKHEAPSUB pSubHeap = pHeap->pSubHeapHead;
    while (pSubHeap)
    {
        if (fZero)
            pv = RTHeapSimpleAllocZ(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
        else
            pv = RTHeapSimpleAlloc(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
        if (pv)
        {
            /* Move the sub-heap with free memory to the head. */
            if (pSubHeapPrev)
            {
                pSubHeapPrev->pNext = pSubHeap->pNext;
                pSubHeap->pNext = pHeap->pSubHeapHead;
                pHeap->pSubHeapHead = pSubHeap;
            }
            break;
        }
        pSubHeapPrev = pSubHeap;
        pSubHeap = pSubHeap->pNext;
    }
    if (RT_UNLIKELY(!pv))
    {
        /*
         * Add another sub-heap.
         */
        pSubHeap = mmR3UkHeapAddSubHeap(pHeap, RT_MAX(RT_ALIGN_Z(cb, PAGE_SIZE) + PAGE_SIZE * 16, _256K));
        if (pSubHeap)
        {
            if (fZero)
                pv = RTHeapSimpleAllocZ(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
            else
                pv = RTHeapSimpleAlloc(pSubHeap->hSimple, cb, MMUKHEAP_SIZE_ALIGNMENT);
        }
        if (RT_UNLIKELY(!pv))
        {
            AssertMsgFailed(("Failed to allocate heap block %d, enmTag=%x(%.4s).\n", cb, enmTag, &enmTag));
#ifdef MMUKHEAP_WITH_STATISTICS
            pStat->cFailures++;
            pHeap->Stat.cFailures++;
#endif
            RTCritSectLeave(&pHeap->Lock);
            return NULL;
        }
    }

    /*
     * Update statistics
     */
#ifdef MMUKHEAP_WITH_STATISTICS
    size_t cbActual = RTHeapSimpleSize(pSubHeap->hSimple, pv);
    pStat->cbAllocated          += cbActual;
    pStat->cbCurAllocated       += cbActual;
    pHeap->Stat.cbAllocated     += cbActual;
    pHeap->Stat.cbCurAllocated  += cbActual;
#endif

    if (pR0Ptr)
        *pR0Ptr = (uintptr_t)pv - (uintptr_t)pSubHeap->pv + pSubHeap->pvR0;
    RTCritSectLeave(&pHeap->Lock);
    return pv;
}
示例#13
0
/** @copydoc RTDBGMODVTDBG::pfnSegmentAdd */
static DECLCALLBACK(int) rtDbgModContainer_SegmentAdd(PRTDBGMODINT pMod, RTUINTPTR uRva, RTUINTPTR cb, const char *pszName, size_t cchName,
                                                      uint32_t fFlags, PRTDBGSEGIDX piSeg)
{
    PRTDBGMODCTN pThis = (PRTDBGMODCTN)pMod->pvDbgPriv;

    /*
     * Input validation (the bits the caller cannot do).
     */
    /* Overlapping segments are not yet supported. Will use flags to deal with it if it becomes necessary. */
    RTUINTPTR   uRvaLast    = uRva + RT_MAX(cb, 1) - 1;
    RTUINTPTR   uRvaLastMax = uRvaLast;
    RTDBGSEGIDX iSeg        = pThis->cSegs;
    while (iSeg-- > 0)
    {
        RTUINTPTR uCurRva     = pThis->paSegs[iSeg].off;
        RTUINTPTR uCurRvaLast = uCurRva + RT_MAX(pThis->paSegs[iSeg].cb, 1) - 1;
        if (   uRva      <= uCurRvaLast
            && uRvaLast  >= uCurRva
            && (   /* HACK ALERT! Allow empty segments to share space (bios/watcom, elf). */
                   (cb != 0 && pThis->paSegs[iSeg].cb != 0)
                || (   cb == 0
                    && uRva != uCurRva
                    && uRva != uCurRvaLast)
                || (    pThis->paSegs[iSeg].cb == 0
                    && uCurRva != uRva
                    && uCurRva != uRvaLast)
               )
           )
            AssertMsgFailedReturn(("uRva=%RTptr uRvaLast=%RTptr (cb=%RTptr) \"%s\";\n"
                                   "uRva=%RTptr uRvaLast=%RTptr (cb=%RTptr) \"%s\" iSeg=%#x\n",
                                   uRva, uRvaLast, cb, pszName,
                                   uCurRva, uCurRvaLast, pThis->paSegs[iSeg].cb, pThis->paSegs[iSeg].pszName, iSeg),
                                  VERR_DBG_SEGMENT_INDEX_CONFLICT);
        if (uRvaLastMax < uCurRvaLast)
            uRvaLastMax = uCurRvaLast;
    }
    /* Strict ordered segment addition at the moment. */
    iSeg = pThis->cSegs;
    AssertMsgReturn(!piSeg || *piSeg == NIL_RTDBGSEGIDX || *piSeg == iSeg,
                    ("iSeg=%#x *piSeg=%#x\n", iSeg, *piSeg),
                    VERR_DBG_INVALID_SEGMENT_INDEX);

    /*
     * Add an entry to the segment table, extending it if necessary.
     */
    if (!(iSeg % 8))
    {
        void *pvSegs = RTMemRealloc(pThis->paSegs, sizeof(RTDBGMODCTNSEGMENT) * (iSeg + 8));
        if (!pvSegs)
            return VERR_NO_MEMORY;
        pThis->paSegs = (PRTDBGMODCTNSEGMENT)pvSegs;
    }

    pThis->paSegs[iSeg].SymAddrTree     = NULL;
    pThis->paSegs[iSeg].LineAddrTree    = NULL;
    pThis->paSegs[iSeg].off             = uRva;
    pThis->paSegs[iSeg].cb              = cb;
    pThis->paSegs[iSeg].fFlags          = fFlags;
    pThis->paSegs[iSeg].pszName         = RTStrCacheEnterN(g_hDbgModStrCache, pszName, cchName);
    if (pThis->paSegs[iSeg].pszName)
    {
        if (piSeg)
            *piSeg = iSeg;
        pThis->cSegs++;
        pThis->cb = uRvaLastMax + 1;
        if (!pThis->cb)
            pThis->cb = RTUINTPTR_MAX;
        return VINF_SUCCESS;
    }
    return VERR_NO_MEMORY;
}
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;
    PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
    struct timespec Timeout;
    struct timespec *pTimeout = NULL;
    uint64_t         StartNanoTS = 0;

    LogFlowFunc(("hAioCtx=%#p cMinReqs=%zu cMillies=%u pahReqs=%#p cReqs=%zu pcbReqs=%#p\n",
                 hAioCtx, cMinReqs, cMillies, pahReqs, cReqs, pcReqs));

    /* Check parameters. */
    AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    rtFileAioCtxDump(pCtxInt);

    int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);

    if (   RT_UNLIKELY(cRequestsWaiting <= 0)
        && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
        return VERR_FILE_AIO_NO_REQUEST;

    if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
        return VERR_INVALID_PARAMETER;

    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = (cMillies % 1000) * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    /* Update the waiting list once before we enter the loop. */
    rc = rtFileAioCtxProcessEvents(pCtxInt);

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
#ifdef RT_STRICT
        if (RT_UNLIKELY(!pCtxInt->iFirstFree))
        {
            for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
                RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);

            AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
                            pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
        }
#endif

        LogFlow(("Waiting for %d requests to complete\n", pCtxInt->iFirstFree));
        rtFileAioCtxDump(pCtxInt);

        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
                                  pCtxInt->iFirstFree, pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
        if (rcPosix < 0)
        {
            LogFlow(("aio_suspend failed %d nent=%u\n", errno, pCtxInt->iFirstFree));
            /* Check that this is an external wakeup event. */
            if (errno == EINTR)
                rc = rtFileAioCtxProcessEvents(pCtxInt);
            else
                rc = RTErrConvertFromErrno(errno);
        }
        else
        {
            /* Requests finished. */
            unsigned iReqCurr = 0;
            unsigned cDone = 0;

            /* Remove completed requests from the waiting list. */
            while (   (iReqCurr < pCtxInt->iFirstFree)
                   && (cDone < cReqs))
            {
                PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
                int rcReq = aio_error(&pReq->AioCB);

                if (rcReq != EINPROGRESS)
                {
                    /* Completed store the return code. */
                    if (rcReq == 0)
                    {
                        pReq->Rc = VINF_SUCCESS;
                        /* Call aio_return() to free resources. */
                        pReq->cbTransfered = aio_return(&pReq->AioCB);
                    }
                    else
                    {
#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
                        pReq->Rc = RTErrConvertFromErrno(errno);
#else
                        pReq->Rc = RTErrConvertFromErrno(rcReq);
#endif
                    }

                    /* Mark the request as finished. */
                    RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
                    cDone++;

                    /* If there are other entries waiting put the head into the now free entry. */
                    if (pCtxInt->pReqsWaitHead)
                    {
                        PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;

                        pCtxInt->pReqsWaitHead = pReqInsert->pNext;
                        if (!pCtxInt->pReqsWaitHead)
                        {
                            /* List is empty now. Clear tail too. */
                            pCtxInt->pReqsWaitTail = NULL;
                        }

                        pReqInsert->iWaitingList = pReq->iWaitingList;
                        pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
                        iReqCurr++;
                    }
                    else
                    {
                        /*
                         * Move the last entry into the current position to avoid holes
                         * but only if it is not the last element already.
                         */
                        if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
                        {
                            pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
                            pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
                        }
                        else
                            pCtxInt->iFirstFree--;

                        pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
                    }

                    /* Put the request into the completed list. */
                    pahReqs[cRequestsCompleted++] = pReq;
                    pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
                }
                else
                    iReqCurr++;
            }

            AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
                                         cReqs, cDone));
            cReqs    -= cDone;
            cMinReqs  = RT_MAX(cMinReqs, cDone) - cDone;
            ASMAtomicSubS32(&pCtxInt->cRequests, cDone);

            AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));

            if (!cMinReqs)
                break;

            if (cMillies != RT_INDEFINITE_WAIT)
            {
                uint64_t TimeDiff;

                /* Recalculate the timeout. */
                TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
                Timeout.tv_sec  = Timeout.tv_sec  - (TimeDiff / 1000000);
                Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
            }

            /* Check for new elements. */
            rc = rtFileAioCtxProcessEvents(pCtxInt);
        }
    }

    *pcReqs = cRequestsCompleted;
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    rtFileAioCtxDump(pCtxInt);

    return rc;
}
int main(int argc, char **argv)
{
    RTR3InitExe(argc, &argv, 0);

    /*
     * Parse arguments.
     */
    static const RTGETOPTDEF    s_aOptions[] =
    {
        { "--iterations",     'i', RTGETOPT_REQ_UINT32 },
        { "--num-pages",      'n', RTGETOPT_REQ_UINT32 },
        { "--page-at-a-time", 'c', RTGETOPT_REQ_UINT32 },
        { "--page-file",      'f', RTGETOPT_REQ_STRING },
        { "--offset",         'o', RTGETOPT_REQ_UINT64 },
    };

    const char     *pszPageFile = NULL;
    uint64_t        offPageFile = 0;
    uint32_t        cIterations = 1;
    uint32_t        cPagesAtATime = 1;
    RTGETOPTUNION   Val;
    RTGETOPTSTATE   State;
    int rc = RTGetOptInit(&State, argc, argv, &s_aOptions[0], RT_ELEMENTS(s_aOptions), 1, 0);
    AssertRCReturn(rc, 1);

    while ((rc = RTGetOpt(&State, &Val)))
    {
        switch (rc)
        {
            case 'n':
                g_cPages = Val.u32;
                if (g_cPages * PAGE_SIZE * 4 / (PAGE_SIZE * 4) != g_cPages)
                    return Error("The specified page count is too high: %#x (%#llx bytes)\n", g_cPages, (uint64_t)g_cPages * PAGE_SHIFT);
                if (g_cPages < 1)
                    return Error("The specified page count is too low: %#x\n", g_cPages);
                break;

            case 'i':
                cIterations = Val.u32;
                if (cIterations < 1)
                    return Error("The number of iterations must be 1 or higher\n");
                break;

            case 'c':
                cPagesAtATime = Val.u32;
                if (cPagesAtATime < 1 || cPagesAtATime > 10240)
                    return Error("The specified pages-at-a-time count is out of range: %#x\n", cPagesAtATime);
                break;

            case 'f':
                pszPageFile = Val.psz;
                break;

            case 'o':
                offPageFile = Val.u64;
                break;

            case 'O':
                offPageFile = Val.u64 * PAGE_SIZE;
                break;

            case 'h':
                RTPrintf("syntax: tstCompressionBenchmark [options]\n"
                         "\n"
                         "Options:\n"
                         "  -h, --help\n"
                         "    Show this help page\n"
                         "  -i, --iterations <num>\n"
                         "    The number of iterations.\n"
                         "  -n, --num-pages <pages>\n"
                         "    The number of pages.\n"
                         "  -c, --pages-at-a-time <pages>\n"
                         "    Number of pages at a time.\n"
                         "  -f, --page-file <filename>\n"
                         "    File or device to read the page from. The default\n"
                         "    is to generate some garbage.\n"
                         "  -o, --offset <file-offset>\n"
                         "    Offset into the page file to start reading at.\n");
                return 0;

            case 'V':
                RTPrintf("%sr%s\n", RTBldCfgVersion(), RTBldCfgRevisionStr());
                return 0;

            default:
                return RTGetOptPrintError(rc, &Val);
        }
    }

    g_cbPages = g_cPages * PAGE_SIZE;
    uint64_t cbTotal = (uint64_t)g_cPages * PAGE_SIZE * cIterations;
    uint64_t cbTotalKB = cbTotal / _1K;
    if (cbTotal / cIterations != g_cbPages)
        return Error("cPages * cIterations -> overflow\n");

    /*
     * Gather the test memory.
     */
    if (pszPageFile)
    {
        size_t cbFile;
        rc = RTFileReadAllEx(pszPageFile, offPageFile, g_cbPages, RTFILE_RDALL_O_DENY_NONE, (void **)&g_pabSrc, &cbFile);
        if (RT_FAILURE(rc))
            return Error("Error reading %zu bytes from %s at %llu: %Rrc\n", g_cbPages, pszPageFile, offPageFile, rc);
        if (cbFile != g_cbPages)
            return Error("Error reading %zu bytes from %s at %llu: got %zu bytes\n", g_cbPages, pszPageFile, offPageFile, cbFile);
    }
    else
    {
        g_pabSrc = (uint8_t *)RTMemAlloc(g_cbPages);
        if (g_pabSrc)
        {
            /* Just fill it with something - warn about the low quality of the something. */
            RTPrintf("tstCompressionBenchmark: WARNING! No input file was specified so the source\n"
                     "buffer will be filled with generated data of questionable quality.\n");
#ifdef RT_OS_LINUX
            RTPrintf("To get real RAM on linux: sudo dd if=/dev/mem ... \n");
#endif
            uint8_t *pb    = g_pabSrc;
            uint8_t *pbEnd = &g_pabSrc[g_cbPages];
            for (; pb != pbEnd; pb += 16)
            {
                char szTmp[17];
                RTStrPrintf(szTmp, sizeof(szTmp), "aaaa%08Xzzzz", (uint32_t)(uintptr_t)pb);
                memcpy(pb, szTmp, 16);
            }
        }
    }

    g_pabDecompr = (uint8_t *)RTMemAlloc(g_cbPages);
    g_cbComprAlloc = RT_MAX(g_cbPages * 2, 256 * PAGE_SIZE);
    g_pabCompr   = (uint8_t *)RTMemAlloc(g_cbComprAlloc);
    if (!g_pabSrc || !g_pabDecompr || !g_pabCompr)
        return Error("failed to allocate memory buffers (g_cPages=%#x)\n", g_cPages);

    /*
     * Double loop compressing and uncompressing the data, where the outer does
     * the specified number of iterations while the inner applies the different
     * compression algorithms.
     */
    struct
    {
        /** The time spent decompressing. */
        uint64_t    cNanoDecompr;
        /** The time spent compressing. */
        uint64_t    cNanoCompr;
        /** The size of the compressed data. */
        uint64_t    cbCompr;
        /** First error. */
        int         rc;
        /** The compression style: block or stream. */
        bool        fBlock;
        /** Compression type.  */
        RTZIPTYPE   enmType;
        /** Compression level.  */
        RTZIPLEVEL  enmLevel;
        /** Method name. */
        const char *pszName;
    } aTests[] =
    {
        { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_STORE, RTZIPLEVEL_DEFAULT, "RTZip/Store"      },
        { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_LZF,   RTZIPLEVEL_DEFAULT, "RTZip/LZF"        },
/*      { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_ZLIB,  RTZIPLEVEL_DEFAULT, "RTZip/zlib"       }, - slow plus it randomly hits VERR_GENERAL_FAILURE atm. */
        { 0, 0, 0, VINF_SUCCESS, true,  RTZIPTYPE_STORE, RTZIPLEVEL_DEFAULT, "RTZipBlock/Store" },
        { 0, 0, 0, VINF_SUCCESS, true,  RTZIPTYPE_LZF,   RTZIPLEVEL_DEFAULT, "RTZipBlock/LZF"   },
        { 0, 0, 0, VINF_SUCCESS, true,  RTZIPTYPE_LZJB,  RTZIPLEVEL_DEFAULT, "RTZipBlock/LZJB"  },
        { 0, 0, 0, VINF_SUCCESS, true,  RTZIPTYPE_LZO,   RTZIPLEVEL_DEFAULT, "RTZipBlock/LZO"   },
    };
    RTPrintf("tstCompressionBenchmark: TESTING..");
    for (uint32_t i = 0; i < cIterations; i++)
    {
        for (uint32_t j = 0; j < RT_ELEMENTS(aTests); j++)
        {
            if (RT_FAILURE(aTests[j].rc))
                continue;
            memset(g_pabCompr,   0xaa, g_cbComprAlloc);
            memset(g_pabDecompr, 0xcc, g_cbPages);
            g_cbCompr = 0;
            g_offComprIn = 0;
            RTPrintf("."); RTStrmFlush(g_pStdOut);

            /*
             * Compress it.
             */
            uint64_t NanoTS = RTTimeNanoTS();
            if (aTests[j].fBlock)
            {
                size_t          cbLeft    = g_cbComprAlloc;
                uint8_t const  *pbSrcPage = g_pabSrc;
                uint8_t        *pbDstPage = g_pabCompr;
                for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
                {
                    AssertBreakStmt(cbLeft > PAGE_SIZE * 4, aTests[j].rc = rc = VERR_BUFFER_OVERFLOW);
                    uint32_t *pcb = (uint32_t *)pbDstPage;
                    pbDstPage    += sizeof(uint32_t);
                    cbLeft       -= sizeof(uint32_t);
                    size_t  cbSrc = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
                    size_t  cbDst;
                    rc = RTZipBlockCompress(aTests[j].enmType, aTests[j].enmLevel, 0 /*fFlags*/,
                                            pbSrcPage, cbSrc,
                                            pbDstPage, cbLeft, &cbDst);
                    if (RT_FAILURE(rc))
                    {
                        Error("RTZipBlockCompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                        aTests[j].rc = rc;
                        break;
                    }
                    *pcb       = (uint32_t)cbDst;
                    cbLeft    -= cbDst;
                    pbDstPage += cbDst;
                    pbSrcPage += cbSrc;
                }
                if (RT_FAILURE(rc))
                    continue;
                g_cbCompr = pbDstPage - g_pabCompr;
            }
            else
            {
                PRTZIPCOMP pZipComp;
                rc = RTZipCompCreate(&pZipComp, NULL, ComprOutCallback, aTests[j].enmType, aTests[j].enmLevel);
                if (RT_FAILURE(rc))
                {
                    Error("Failed to create the compressor for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                    aTests[j].rc = rc;
                    continue;
                }

                uint8_t const  *pbSrcPage = g_pabSrc;
                for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
                {
                    size_t cb = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
                    rc = RTZipCompress(pZipComp, pbSrcPage, cb);
                    if (RT_FAILURE(rc))
                    {
                        Error("RTZipCompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                        aTests[j].rc = rc;
                        break;
                    }
                    pbSrcPage += cb;
                }
                if (RT_FAILURE(rc))
                    continue;
                rc = RTZipCompFinish(pZipComp);
                if (RT_FAILURE(rc))
                {
                    Error("RTZipCompFinish failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                    aTests[j].rc = rc;
                    break;
                }
                RTZipCompDestroy(pZipComp);
            }
            NanoTS = RTTimeNanoTS() - NanoTS;
            aTests[j].cbCompr    += g_cbCompr;
            aTests[j].cNanoCompr += NanoTS;

            /*
             * Decompress it.
             */
            NanoTS = RTTimeNanoTS();
            if (aTests[j].fBlock)
            {
                uint8_t const  *pbSrcPage = g_pabCompr;
                size_t          cbLeft    = g_cbCompr;
                uint8_t        *pbDstPage = g_pabDecompr;
                for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
                {
                    size_t   cbDst = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
                    size_t   cbSrc = *(uint32_t *)pbSrcPage;
                    pbSrcPage     += sizeof(uint32_t);
                    cbLeft        -= sizeof(uint32_t);
                    rc = RTZipBlockDecompress(aTests[j].enmType, 0 /*fFlags*/,
                                              pbSrcPage, cbSrc, &cbSrc,
                                              pbDstPage, cbDst, &cbDst);
                    if (RT_FAILURE(rc))
                    {
                        Error("RTZipBlockDecompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                        aTests[j].rc = rc;
                        break;
                    }
                    pbDstPage += cbDst;
                    cbLeft    -= cbSrc;
                    pbSrcPage += cbSrc;
                }
                if (RT_FAILURE(rc))
                    continue;
            }
            else
            {
                PRTZIPDECOMP pZipDecomp;
                rc = RTZipDecompCreate(&pZipDecomp, NULL, DecomprInCallback);
                if (RT_FAILURE(rc))
                {
                    Error("Failed to create the decompressor for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                    aTests[j].rc = rc;
                    continue;
                }

                uint8_t *pbDstPage = g_pabDecompr;
                for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
                {
                    size_t cb = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
                    rc = RTZipDecompress(pZipDecomp, pbDstPage, cb, NULL);
                    if (RT_FAILURE(rc))
                    {
                        Error("RTZipDecompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
                        aTests[j].rc = rc;
                        break;
                    }
                    pbDstPage += cb;
                }
                RTZipDecompDestroy(pZipDecomp);
                if (RT_FAILURE(rc))
                    continue;
            }
            NanoTS = RTTimeNanoTS() - NanoTS;
            aTests[j].cNanoDecompr += NanoTS;

            if (memcmp(g_pabDecompr, g_pabSrc, g_cbPages))
            {
                Error("The compressed data doesn't match the source for '%s' (%#u)\n", aTests[j].pszName, j);
                aTests[j].rc = VERR_BAD_EXE_FORMAT;
                continue;
            }
        }
    }
    if (RT_SUCCESS(rc))
        RTPrintf("\n");

    /*
     * Report the results.
     */
    rc = 0;
    RTPrintf("tstCompressionBenchmark: BEGIN RESULTS\n");
    RTPrintf("%-20s           Compression                                             Decompression\n", "");
    RTPrintf("%-20s        In             Out      Ratio         Size                In             Out\n", "Method");
    RTPrintf("%.20s-----------------------------------------------------------------------------------------\n", "---------------------------------------------");
    for (uint32_t j = 0; j < RT_ELEMENTS(aTests); j++)
    {
        if (RT_SUCCESS(aTests[j].rc))
        {
            unsigned uComprSpeedIn    = (unsigned)(cbTotalKB         / (long double)aTests[j].cNanoCompr   * 1000000000.0);
            unsigned uComprSpeedOut   = (unsigned)(aTests[j].cbCompr / (long double)aTests[j].cNanoCompr   * 1000000000.0 / 1024);
            unsigned uRatio           = (unsigned)(aTests[j].cbCompr / cIterations * 100 / g_cbPages);
            unsigned uDecomprSpeedIn  = (unsigned)(aTests[j].cbCompr / (long double)aTests[j].cNanoDecompr * 1000000000.0 / 1024);
            unsigned uDecomprSpeedOut = (unsigned)(cbTotalKB         / (long double)aTests[j].cNanoDecompr * 1000000000.0);
            RTPrintf("%-20s %'9u KB/s  %'9u KB/s  %3u%%  %'11llu bytes   %'9u KB/s  %'9u KB/s",
                     aTests[j].pszName,
                     uComprSpeedIn,   uComprSpeedOut, uRatio, aTests[j].cbCompr / cIterations,
                     uDecomprSpeedIn, uDecomprSpeedOut);
#if 0
            RTPrintf("  [%'14llu / %'14llu ns]\n",
                     aTests[j].cNanoCompr / cIterations,
                     aTests[j].cNanoDecompr / cIterations);
#else
            RTPrintf("\n");
#endif
        }
        else
        {
            RTPrintf("%-20s: %Rrc\n", aTests[j].pszName, aTests[j].rc);
            rc = 1;
        }
    }
    if (pszPageFile)
        RTPrintf("Input: %'10zu pages from '%s' starting at offset %'lld (%#llx)\n"
                 "                                                           %'11zu bytes\n",
                 g_cPages, pszPageFile, offPageFile, offPageFile, g_cbPages);
    else
        RTPrintf("Input: %'10zu pages of generated rubbish               %'11zu bytes\n",
                 g_cPages, g_cbPages);

    /*
     * Count zero pages in the data set.
     */
    size_t cZeroPages = 0;
    for (size_t iPage = 0; iPage < g_cPages; iPage++)
    {
        if (!ASMMemIsAllU32(&g_pabSrc[iPage * PAGE_SIZE], PAGE_SIZE, 0))
            cZeroPages++;
    }
    RTPrintf("       %'10zu zero pages (%u %%)\n", cZeroPages, cZeroPages * 100 / g_cPages);

    /*
     * A little extension to the test, benchmark relevant CRCs.
     */
    RTPrintf("\n"
             "tstCompressionBenchmark: Hash/CRC - All In One\n");
    tstBenchmarkCRCsAllInOne(g_pabSrc, g_cbPages);

    RTPrintf("\n"
             "tstCompressionBenchmark: Hash/CRC - Page by Page\n");
    tstBenchmarkCRCsPageByPage(g_pabSrc, g_cbPages);

    RTPrintf("\n"
             "tstCompressionBenchmark: Hash/CRC - Zero Page Digest\n");
    static uint8_t s_abZeroPg[PAGE_SIZE];
    RT_ZERO(s_abZeroPg);
    tstBenchmarkCRCsAllInOne(s_abZeroPg, PAGE_SIZE);

    RTPrintf("\n"
             "tstCompressionBenchmark: Hash/CRC - Zero Half Page Digest\n");
    tstBenchmarkCRCsAllInOne(s_abZeroPg, PAGE_SIZE / 2);

    RTPrintf("tstCompressionBenchmark: END RESULTS\n");

    return rc;
}
示例#16
0
/**
 * Method 1 - Block whenever possible, and when lagging behind
 * switch to spinning for 10-30ms with occasional blocking until
 * the lag has been eliminated.
 */
static DECLCALLBACK(int) vmR3HaltMethod1Halt(PUVMCPU pUVCpu, const uint32_t fMask, uint64_t u64Now)
{
    PUVM    pUVM    = pUVCpu->pUVM;
    PVMCPU  pVCpu   = pUVCpu->pVCpu;
    PVM     pVM     = pUVCpu->pVM;

    /*
     * To simplify things, we decide up-front whether we should switch to spinning or
     * not. This makes some ASSUMPTIONS about the cause of the spinning (PIT/RTC/PCNet)
     * and that it will generate interrupts or other events that will cause us to exit
     * the halt loop.
     */
    bool fBlockOnce = false;
    bool fSpinning = false;
    uint32_t u32CatchUpPct = TMVirtualSyncGetCatchUpPct(pVM);
    if (u32CatchUpPct /* non-zero if catching up */)
    {
        if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
        {
            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StopSpinningCfg;
            if (fSpinning)
            {
                uint64_t u64Lag = TMVirtualSyncGetLag(pVM);
                fBlockOnce = u64Now - pUVCpu->vm.s.Halt.Method12.u64LastBlockTS
                           > RT_MAX(pUVM->vm.s.Halt.Method12.u32MinBlockIntervalCfg,
                                    RT_MIN(u64Lag / pUVM->vm.s.Halt.Method12.u32LagBlockIntervalDivisorCfg,
                                           pUVM->vm.s.Halt.Method12.u32MaxBlockIntervalCfg));
            }
            else
            {
                //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
            }
        }
        else
        {
            fSpinning = TMVirtualSyncGetLag(pVM) >= pUVM->vm.s.Halt.Method12.u32StartSpinningCfg;
            if (fSpinning)
                pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = u64Now;
        }
    }
    else if (pUVCpu->vm.s.Halt.Method12.u64StartSpinTS)
    {
        //RTLogRelPrintf("Stopped spinning (%u ms)\n", (u64Now - pUVCpu->vm.s.Halt.Method12.u64StartSpinTS) / 1000000);
        pUVCpu->vm.s.Halt.Method12.u64StartSpinTS = 0;
    }

    /*
     * Halt loop.
     */
    int rc = VINF_SUCCESS;
    ASMAtomicWriteBool(&pUVCpu->vm.s.fWait, true);
    unsigned cLoops = 0;
    for (;; cLoops++)
    {
        /*
         * Work the timers and check if we can exit.
         */
        uint64_t const u64StartTimers   = RTTimeNanoTS();
        TMR3TimerQueuesDo(pVM);
        uint64_t const cNsElapsedTimers = RTTimeNanoTS() - u64StartTimers;
        STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltTimers, cNsElapsedTimers);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Estimate time left to the next event.
         */
        uint64_t u64NanoTS;
        TMTimerPollGIP(pVM, pVCpu, &u64NanoTS);
        if (    VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK)
            ||  VMCPU_FF_ISPENDING(pVCpu, fMask))
            break;

        /*
         * Block if we're not spinning and the interval isn't all that small.
         */
        if (    (   !fSpinning
                 || fBlockOnce)
#if 1 /* DEBUGGING STUFF - REMOVE LATER */
            &&  u64NanoTS >= 100000) /* 0.100 ms */
#else
            &&  u64NanoTS >= 250000) /* 0.250 ms */
#endif
        {
            const uint64_t Start = pUVCpu->vm.s.Halt.Method12.u64LastBlockTS = RTTimeNanoTS();
            VMMR3YieldStop(pVM);

            uint32_t cMilliSecs = RT_MIN(u64NanoTS / 1000000, 15);
            if (cMilliSecs <= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg)
                cMilliSecs = 1;
            else
                cMilliSecs -= pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg;

            //RTLogRelPrintf("u64NanoTS=%RI64 cLoops=%3d sleep %02dms (%7RU64) ", u64NanoTS, cLoops, cMilliSecs, u64NanoTS);
            uint64_t const u64StartSchedHalt   = RTTimeNanoTS();
            rc = RTSemEventWait(pUVCpu->vm.s.EventSemWait, cMilliSecs);
            uint64_t const cNsElapsedSchedHalt = RTTimeNanoTS() - u64StartSchedHalt;
            STAM_REL_PROFILE_ADD_PERIOD(&pUVCpu->vm.s.StatHaltBlock, cNsElapsedSchedHalt);

            if (rc == VERR_TIMEOUT)
                rc = VINF_SUCCESS;
            else if (RT_FAILURE(rc))
            {
                rc = vmR3FatalWaitError(pUVCpu, "RTSemEventWait->%Rrc\n", rc);
                break;
            }

            /*
             * Calc the statistics.
             * Update averages every 16th time, and flush parts of the history every 64th time.
             */
            const uint64_t Elapsed = RTTimeNanoTS() - Start;
            pUVCpu->vm.s.Halt.Method12.cNSBlocked += Elapsed;
            if (Elapsed > u64NanoTS)
                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong += Elapsed - u64NanoTS;
            pUVCpu->vm.s.Halt.Method12.cBlocks++;
            if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0xf))
            {
                pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong / pUVCpu->vm.s.Halt.Method12.cBlocks;
                if (!(pUVCpu->vm.s.Halt.Method12.cBlocks & 0x3f))
                {
                    pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLong = pUVCpu->vm.s.Halt.Method12.cNSBlockedTooLongAvg * 0x40;
                    pUVCpu->vm.s.Halt.Method12.cBlocks = 0x40;
                }
            }
            //RTLogRelPrintf(" -> %7RU64 ns / %7RI64 ns delta%s\n", Elapsed, Elapsed - u64NanoTS, fBlockOnce ? " (block once)" : "");

            /*
             * Clear the block once flag if we actually blocked.
             */
            if (    fBlockOnce
                &&  Elapsed > 100000 /* 0.1 ms */)
                fBlockOnce = false;
        }
    }
    //if (fSpinning) RTLogRelPrintf("spun for %RU64 ns %u loops; lag=%RU64 pct=%d\n", RTTimeNanoTS() - u64Now, cLoops, TMVirtualSyncGetLag(pVM), u32CatchUpPct);

    ASMAtomicUoWriteBool(&pUVCpu->vm.s.fWait, false);
    return rc;
}
示例#17
0
/**
 * Device I/O Control entry point.
 *
 * @param   pFilp       Associated file pointer.
 * @param   uCmd        The function specified to ioctl().
 * @param   ulArg       The argument specified to ioctl().
 * @param   pSession    The session instance.
 */
static int VBoxDrvLinuxIOCtlSlow(struct file *pFilp, unsigned int uCmd, unsigned long ulArg, PSUPDRVSESSION pSession)
{
    int                 rc;
    SUPREQHDR           Hdr;
    PSUPREQHDR          pHdr;
    uint32_t            cbBuf;

    Log6(("VBoxDrvLinuxIOCtl: pFilp=%p uCmd=%#x ulArg=%p pid=%d/%d\n", pFilp, uCmd, (void *)ulArg, RTProcSelf(), current->pid));

    /*
     * Read the header.
     */
    if (RT_UNLIKELY(copy_from_user(&Hdr, (void *)ulArg, sizeof(Hdr))))
    {
        Log(("VBoxDrvLinuxIOCtl: copy_from_user(,%#lx,) failed; uCmd=%#x\n", ulArg, uCmd));
        return -EFAULT;
    }
    if (RT_UNLIKELY((Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC))
    {
        Log(("VBoxDrvLinuxIOCtl: bad header magic %#x; uCmd=%#x\n", Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK, uCmd));
        return -EINVAL;
    }

    /*
     * Buffer the request.
     */
    cbBuf = RT_MAX(Hdr.cbIn, Hdr.cbOut);
    if (RT_UNLIKELY(cbBuf > _1M*16))
    {
        Log(("VBoxDrvLinuxIOCtl: too big cbBuf=%#x; uCmd=%#x\n", cbBuf, uCmd));
        return -E2BIG;
    }
    if (RT_UNLIKELY(_IOC_SIZE(uCmd) ? cbBuf != _IOC_SIZE(uCmd) : Hdr.cbIn < sizeof(Hdr)))
    {
        Log(("VBoxDrvLinuxIOCtl: bad ioctl cbBuf=%#x _IOC_SIZE=%#x; uCmd=%#x\n", cbBuf, _IOC_SIZE(uCmd), uCmd));
        return -EINVAL;
    }
    pHdr = RTMemAlloc(cbBuf);
    if (RT_UNLIKELY(!pHdr))
    {
        OSDBGPRINT(("VBoxDrvLinuxIOCtl: failed to allocate buffer of %d bytes for uCmd=%#x\n", cbBuf, uCmd));
        return -ENOMEM;
    }
    if (RT_UNLIKELY(copy_from_user(pHdr, (void *)ulArg, Hdr.cbIn)))
    {
        Log(("VBoxDrvLinuxIOCtl: copy_from_user(,%#lx, %#x) failed; uCmd=%#x\n", ulArg, Hdr.cbIn, uCmd));
        RTMemFree(pHdr);
        return -EFAULT;
    }
    if (Hdr.cbIn < cbBuf)
        RT_BZERO((uint8_t *)pHdr + Hdr.cbIn, cbBuf - Hdr.cbIn);

    /*
     * Process the IOCtl.
     */
    stac();
    rc = supdrvIOCtl(uCmd, &g_DevExt, pSession, pHdr, cbBuf);
    clac();

    /*
     * Copy ioctl data and output buffer back to user space.
     */
    if (RT_LIKELY(!rc))
    {
        uint32_t cbOut = pHdr->cbOut;
        if (RT_UNLIKELY(cbOut > cbBuf))
        {
            OSDBGPRINT(("VBoxDrvLinuxIOCtl: too much output! %#x > %#x; uCmd=%#x!\n", cbOut, cbBuf, uCmd));
            cbOut = cbBuf;
        }
        if (RT_UNLIKELY(copy_to_user((void *)ulArg, pHdr, cbOut)))
        {
            /* this is really bad! */
            OSDBGPRINT(("VBoxDrvLinuxIOCtl: copy_to_user(%#lx,,%#x); uCmd=%#x!\n", ulArg, cbOut, uCmd));
            rc = -EFAULT;
        }
    }
    else
    {
        Log(("VBoxDrvLinuxIOCtl: pFilp=%p uCmd=%#x ulArg=%p failed, rc=%d\n", pFilp, uCmd, (void *)ulArg, rc));
        rc = -EINVAL;
    }
    RTMemFree(pHdr);

    Log6(("VBoxDrvLinuxIOCtl: returns %d (pid=%d/%d)\n", rc, RTProcSelf(), current->pid));
    return rc;
}
static void supR3HardenedParseModule(PSUPHNTIMPDLL pDll)
{
    /*
     * Locate the PE header, do some basic validations.
     */
    IMAGE_DOS_HEADER const *pMzHdr = (IMAGE_DOS_HEADER const *)pDll->pbImageBase;
    uint32_t           offNtHdrs = 0;
    PIMAGE_NT_HEADERS  pNtHdrs;
    if (pMzHdr->e_magic == IMAGE_DOS_SIGNATURE)
    {
        offNtHdrs = pMzHdr->e_lfanew;
        if (offNtHdrs > _2K)
            SUPHNTIMP_ERROR(false, 2, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_MODULE_NOT_FOUND,
                            "%ls: e_lfanew=%#x, expected a lower value", pDll->pwszName, offNtHdrs);
    }
    pDll->pNtHdrs = pNtHdrs = (PIMAGE_NT_HEADERS)&pDll->pbImageBase[offNtHdrs];

    if (pNtHdrs->Signature != IMAGE_NT_SIGNATURE)
        SUPHNTIMP_ERROR(false, 3, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                        "%ls: Invalid PE signature: %#x", pDll->pwszName, pNtHdrs->Signature);
    if (pNtHdrs->FileHeader.SizeOfOptionalHeader != sizeof(pNtHdrs->OptionalHeader))
        SUPHNTIMP_ERROR(false, 4, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                        "%ls: Unexpected optional header size: %#x", pDll->pwszName, pNtHdrs->FileHeader.SizeOfOptionalHeader);
    if (pNtHdrs->OptionalHeader.Magic != RT_CONCAT3(IMAGE_NT_OPTIONAL_HDR,ARCH_BITS,_MAGIC))
        SUPHNTIMP_ERROR(false, 5, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                        "%ls: Unexpected optional header magic: %#x", pDll->pwszName, pNtHdrs->OptionalHeader.Magic);
    if (pNtHdrs->OptionalHeader.NumberOfRvaAndSizes != IMAGE_NUMBEROF_DIRECTORY_ENTRIES)
        SUPHNTIMP_ERROR(false, 6, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                        "%ls: Unexpected number of RVA and sizes: %#x", pDll->pwszName, pNtHdrs->OptionalHeader.NumberOfRvaAndSizes);

    pDll->offNtHdrs      = offNtHdrs;
    pDll->offEndSectHdrs = offNtHdrs
                         + sizeof(*pNtHdrs)
                         + pNtHdrs->FileHeader.NumberOfSections * sizeof(IMAGE_SECTION_HEADER);
    pDll->cbImage        = pNtHdrs->OptionalHeader.SizeOfImage;

    /*
     * Find the export directory.
     */
    IMAGE_DATA_DIRECTORY ExpDir = pNtHdrs->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
    if (   ExpDir.Size < sizeof(IMAGE_EXPORT_DIRECTORY)
        || ExpDir.VirtualAddress < pDll->offEndSectHdrs
        || ExpDir.VirtualAddress >= pNtHdrs->OptionalHeader.SizeOfImage
        || ExpDir.VirtualAddress + ExpDir.Size > pNtHdrs->OptionalHeader.SizeOfImage)
        SUPHNTIMP_ERROR(false, 7, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                        "%ls: Missing or invalid export directory: %#lx LB %#x", pDll->pwszName, ExpDir.VirtualAddress, ExpDir.Size);
    pDll->offExportDir = ExpDir.VirtualAddress;
    pDll->cbExportDir  = ExpDir.Size;

    IMAGE_EXPORT_DIRECTORY const *pExpDir = (IMAGE_EXPORT_DIRECTORY const *)&pDll->pbImageBase[ExpDir.VirtualAddress];

    if (   pExpDir->NumberOfFunctions >= _1M
        || pExpDir->NumberOfFunctions <  1
        || pExpDir->NumberOfNames     >= _1M
        || pExpDir->NumberOfNames     <  1)
        SUPHNTIMP_ERROR(false, 8, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                        "%ls: NumberOfNames or/and NumberOfFunctions are outside the expected range: nof=%#x non=%#x\n",
                        pDll->pwszName, pExpDir->NumberOfFunctions, pExpDir->NumberOfNames);
    pDll->cNamedExports = pExpDir->NumberOfNames;
    pDll->cExports      = RT_MAX(pExpDir->NumberOfNames,  pExpDir->NumberOfFunctions);

    if (   pExpDir->AddressOfFunctions < pDll->offEndSectHdrs
        || pExpDir->AddressOfFunctions >= pNtHdrs->OptionalHeader.SizeOfImage
        || pExpDir->AddressOfFunctions + pDll->cExports * sizeof(uint32_t) > pNtHdrs->OptionalHeader.SizeOfImage)
           SUPHNTIMP_ERROR(false, 9, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                           "%ls: Bad AddressOfFunctions: %#x\n", pDll->pwszName, pExpDir->AddressOfFunctions);
    pDll->paoffExports = (uint32_t const *)&pDll->pbImageBase[pExpDir->AddressOfFunctions];

    if (   pExpDir->AddressOfNames < pDll->offEndSectHdrs
        || pExpDir->AddressOfNames >= pNtHdrs->OptionalHeader.SizeOfImage
        || pExpDir->AddressOfNames + pExpDir->NumberOfNames * sizeof(uint32_t) > pNtHdrs->OptionalHeader.SizeOfImage)
           SUPHNTIMP_ERROR(false, 10, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                           "%ls: Bad AddressOfNames: %#x\n", pDll->pwszName, pExpDir->AddressOfNames);
    pDll->paoffNamedExports = (uint32_t const *)&pDll->pbImageBase[pExpDir->AddressOfNames];

    if (   pExpDir->AddressOfNameOrdinals < pDll->offEndSectHdrs
        || pExpDir->AddressOfNameOrdinals >= pNtHdrs->OptionalHeader.SizeOfImage
        || pExpDir->AddressOfNameOrdinals + pExpDir->NumberOfNames * sizeof(uint32_t) > pNtHdrs->OptionalHeader.SizeOfImage)
           SUPHNTIMP_ERROR(false, 11, "supR3HardenedParseModule", kSupInitOp_Misc, VERR_INVALID_EXE_SIGNATURE,
                           "%ls: Bad AddressOfNameOrdinals: %#x\n", pDll->pwszName, pExpDir->AddressOfNameOrdinals);
    pDll->pau16NameOrdinals = (uint16_t const *)&pDll->pbImageBase[pExpDir->AddressOfNameOrdinals];
}
示例#19
0
/**
 * Do the bi-directional transfer test.
 */
static void tstBidirectionalTransfer(PTSTSTATE pThis, uint32_t cbFrame)
{
    MYARGS Args0;
    RT_ZERO(Args0);
    Args0.hIf         = pThis->hIf0;
    Args0.pBuf        = pThis->pBuf0;
    Args0.Mac.au16[0] = 0x8086;
    Args0.Mac.au16[1] = 0;
    Args0.Mac.au16[2] = 0;
    Args0.cbFrame     = cbFrame;

    MYARGS Args1;
    RT_ZERO(Args1);
    Args1.hIf         = pThis->hIf1;
    Args1.pBuf        = pThis->pBuf1;
    Args1.Mac.au16[0] = 0x8086;
    Args1.Mac.au16[1] = 0;
    Args1.Mac.au16[2] = 1;
    Args1.cbFrame     = cbFrame;

    RTTHREAD ThreadRecv0 = NIL_RTTHREAD;
    RTTHREAD ThreadRecv1 = NIL_RTTHREAD;
    RTTHREAD ThreadSend0 = NIL_RTTHREAD;
    RTTHREAD ThreadSend1 = NIL_RTTHREAD;
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadRecv0, ReceiveThread, &Args0, 0, RTTHREADTYPE_IO,        RTTHREADFLAGS_WAITABLE, "RECV0"));
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadRecv1, ReceiveThread, &Args1, 0, RTTHREADTYPE_IO,        RTTHREADFLAGS_WAITABLE, "RECV1"));
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadSend0, SendThread,    &Args0, 0, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "SEND0"));
    RTTESTI_CHECK_RC_OK_RETV(RTThreadCreate(&ThreadSend1, SendThread,    &Args1, 0, RTTHREADTYPE_EMULATION, RTTHREADFLAGS_WAITABLE, "SEND1"));

    int rc2 = VINF_SUCCESS;
    int rc;
    RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadSend0, 5*60*1000, &rc2));
    if (RT_SUCCESS(rc))
    {
        RTTESTI_CHECK_RC_OK(rc2);
        ThreadSend0 = NIL_RTTHREAD;
        RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadSend1, 5*60*1000, RT_SUCCESS(rc2) ? &rc2 : NULL));
        if (RT_SUCCESS(rc))
        {
            ThreadSend1 = NIL_RTTHREAD;
            RTTESTI_CHECK_RC_OK(rc2);
        }
    }
    if (RTTestErrorCount(g_hTest) == 0)
    {
        /*
         * Wait a bit for the receivers to finish up.
         */
        unsigned cYields = 100000;
        while (     (  IntNetRingHasMoreToRead(&pThis->pBuf0->Recv)
                    || IntNetRingHasMoreToRead(&pThis->pBuf1->Recv))
               &&   cYields-- > 0)
            RTThreadYield();

        uint64_t u64Elapsed = RT_MAX(Args0.u64End, Args1.u64End) - RT_MIN(Args0.u64Start, Args1.u64Start);
        uint64_t u64Speed = (uint64_t)((2 * g_cbTransfer / 1024) / (u64Elapsed / 1000000000.0));
        RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                     "transferred %u bytes in %'RU64 ns (%'RU64 KB/s)\n",
                     2 * g_cbTransfer, u64Elapsed, u64Speed);

        /*
         * Wait for the threads to finish up...
         */
        RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadRecv0, 5000, &rc2));
        if (RT_SUCCESS(rc))
        {
            RTTESTI_CHECK_RC_OK(rc2);
            ThreadRecv0 = NIL_RTTHREAD;
        }

        RTTESTI_CHECK_RC_OK(rc = RTThreadWait(ThreadRecv1, 5000, &rc2));
        if (RT_SUCCESS(rc))
        {
            RTTESTI_CHECK_RC_OK(rc2);
            ThreadRecv1 = NIL_RTTHREAD;
        }
    }

    /*
     * Give them a chance to complete...
     */
    RTThreadWait(ThreadRecv0, 5000, NULL);
    RTThreadWait(ThreadRecv1, 5000, NULL);
    RTThreadWait(ThreadSend0, 5000, NULL);
    RTThreadWait(ThreadSend1, 5000, NULL);


    /*
     * Display statistics.
     */
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf0: Yields-OK=%llu Yields-NOK=%llu Lost=%llu Bad=%llu\n",
                 pThis->pBuf0->cStatYieldsOk.c,
                 pThis->pBuf0->cStatYieldsNok.c,
                 pThis->pBuf0->cStatLost.c,
                 pThis->pBuf0->cStatBadFrames.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf0.Recv: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf0->Recv.cStatFrames,
                 pThis->pBuf0->Recv.cbStatWritten.c,
                 pThis->pBuf0->Recv.cOverflows.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf0.Send: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf0->Send.cStatFrames,
                 pThis->pBuf0->Send.cbStatWritten.c,
                 pThis->pBuf0->Send.cOverflows.c);

    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf1: Yields-OK=%llu Yields-NOK=%llu Lost=%llu Bad=%llu\n",
                 pThis->pBuf1->cStatYieldsOk.c,
                 pThis->pBuf1->cStatYieldsNok.c,
                 pThis->pBuf1->cStatLost.c,
                 pThis->pBuf1->cStatBadFrames.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf1.Recv: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf1->Recv.cStatFrames,
                 pThis->pBuf1->Recv.cbStatWritten.c,
                 pThis->pBuf1->Recv.cOverflows.c);
    RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS,
                 "Buf1.Send: Frames=%llu Bytes=%llu Overflows=%llu\n",
                 pThis->pBuf1->Send.cStatFrames,
                 pThis->pBuf1->Send.cbStatWritten.c,
                 pThis->pBuf1->Send.cOverflows.c);

}
STDMETHODIMP UIFrameBufferQuartz2D::SetVisibleRegion(BYTE *pRectangles, ULONG aCount)
{
    LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Rectangle count=%lu\n",
             (unsigned long)aCount));

    /* Make sure rectangles were passed: */
    if (!pRectangles)
    {
        LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Invalid pRectangles pointer!\n"));

        return E_POINTER;
    }

    /* Lock access to frame-buffer: */
    lock();

    /* Make sure frame-buffer is used: */
    if (m_fIsMarkedAsUnused)
    {
        LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Ignored!\n"));

        /* Unlock access to frame-buffer: */
        unlock();

        /* Ignore SetVisibleRegion: */
        return E_FAIL;
    }

    /** @todo r=bird: Is this thread safe? If I remember the code flow correctly, the
     * GUI thread could be happily jogging along paintEvent now on another cpu core.
     * This function is called on the EMT (emulation thread). Which means, blocking
     * execution waiting for a lock is out of the question. A quick solution using
     * ASMAtomic(Cmp)XchgPtr and a struct { cAllocated; cRects; aRects[1]; }
     * *mRegion, *mUnusedRegion; should suffice (and permit you to reuse allocations). */
    RegionRects *rgnRcts = ASMAtomicXchgPtrT(&mRegionUnused, NULL, RegionRects *);
    if (rgnRcts && rgnRcts->allocated < aCount)
    {
        RTMemFree (rgnRcts);
        rgnRcts = NULL;
    }
    if (!rgnRcts)
    {
        ULONG allocated = RT_ALIGN_32(aCount + 1, 32);
        allocated = RT_MAX (128, allocated);
        rgnRcts = (RegionRects *)RTMemAlloc(RT_OFFSETOF(RegionRects, rcts[allocated]));
        if (!rgnRcts)
        {
            /* Unlock access to frame-buffer: */
            unlock();

            return E_OUTOFMEMORY;
        }
        rgnRcts->allocated = allocated;
    }
    rgnRcts->used = 0;

    /* Compose region: */
    QRegion reg;
    PRTRECT rects = (PRTRECT)pRectangles;
    QRect vmScreenRect(0, 0, width(), height());
    for (ULONG ind = 0; ind < aCount; ++ ind)
    {
        /* Get current rectangle: */
        QRect rect;
        rect.setLeft(rects->xLeft);
        rect.setTop(rects->yTop);
        /* Which is inclusive: */
        rect.setRight(rects->xRight - 1);
        rect.setBottom(rects->yBottom - 1);

        /* The rect should intersect with the vm screen. */
        rect = vmScreenRect.intersect(rect);
        ++rects;
        /* Make sure only valid rects are distributed: */
        if (rect.isValid() &&
           rect.width() > 0 && rect.height() > 0)
            reg += rect;
        else
            continue;

        /* That is some *magic* added by Knut in r27807: */
        CGRect *cgRct = &rgnRcts->rcts[rgnRcts->used];
        cgRct->origin.x = rect.x();
        cgRct->origin.y = height() - rect.y() - rect.height();
        cgRct->size.width = rect.width();
        cgRct->size.height = rect.height();
        rgnRcts->used++;
    }

    RegionRects *pOld = ASMAtomicXchgPtrT(&mRegion, rgnRcts, RegionRects *);
    if (    pOld
        &&  !ASMAtomicCmpXchgPtr(&mRegionUnused, pOld, NULL))
        RTMemFree(pOld);

    /* Send async signal to update asynchronous visible-region: */
    LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Sending to async-handler...\n"));
    emit sigSetVisibleRegion(reg);

    /* Unlock access to frame-buffer: */
    unlock();

    /* Confirm SetVisibleRegion: */
    return S_OK;
}
示例#21
0
/**
 * Deal with the 'slow' I/O control requests.
 *
 * @returns 0 on success, appropriate errno on failure.
 * @param   pSession    The session.
 * @param   ulCmd       The command.
 * @param   pvData      The request data.
 * @param   pTd         The calling thread.
 */
static int VBoxDrvFreeBSDIOCtlSlow(PSUPDRVSESSION pSession, u_long ulCmd, caddr_t pvData, struct thread *pTd)
{
    PSUPREQHDR  pHdr;
    uint32_t    cbReq = IOCPARM_LEN(ulCmd);
    void       *pvUser = NULL;

    /*
     * Buffered request?
     */
    if ((IOC_DIRMASK & ulCmd) == IOC_INOUT)
    {
        pHdr = (PSUPREQHDR)pvData;
        if (RT_UNLIKELY(cbReq < sizeof(*pHdr)))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: cbReq=%#x < %#x; ulCmd=%#lx\n", cbReq, (int)sizeof(*pHdr), ulCmd));
            return EINVAL;
        }
        if (RT_UNLIKELY((pHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: bad magic fFlags=%#x; ulCmd=%#lx\n", pHdr->fFlags, ulCmd));
            return EINVAL;
        }
        if (RT_UNLIKELY(    RT_MAX(pHdr->cbIn, pHdr->cbOut) != cbReq
                        ||  pHdr->cbIn < sizeof(*pHdr)
                        ||  pHdr->cbOut < sizeof(*pHdr)))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: max(%#x,%#x) != %#x; ulCmd=%#lx\n", pHdr->cbIn, pHdr->cbOut, cbReq, ulCmd));
            return EINVAL;
        }
    }
    /*
     * Big unbuffered request?
     */
    else if ((IOC_DIRMASK & ulCmd) == IOC_VOID && !cbReq)
    {
        /*
         * Read the header, validate it and figure out how much that needs to be buffered.
         */
        SUPREQHDR Hdr;
        pvUser = *(void **)pvData;
        int rc = copyin(pvUser, &Hdr, sizeof(Hdr));
        if (RT_UNLIKELY(rc))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: copyin(%p,Hdr,) -> %#x; ulCmd=%#lx\n", pvUser, rc, ulCmd));
            return rc;
        }
        if (RT_UNLIKELY((Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: bad magic fFlags=%#x; ulCmd=%#lx\n", Hdr.fFlags, ulCmd));
            return EINVAL;
        }
        cbReq = RT_MAX(Hdr.cbIn, Hdr.cbOut);
        if (RT_UNLIKELY(    Hdr.cbIn < sizeof(Hdr)
                        ||  Hdr.cbOut < sizeof(Hdr)
                        ||  cbReq > _1M*16))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: max(%#x,%#x); ulCmd=%#lx\n", Hdr.cbIn, Hdr.cbOut, ulCmd));
            return EINVAL;
        }

        /*
         * Allocate buffer and copy in the data.
         */
        pHdr = (PSUPREQHDR)RTMemTmpAlloc(cbReq);
        if (RT_UNLIKELY(!pHdr))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: failed to allocate buffer of %d bytes; ulCmd=%#lx\n", cbReq, ulCmd));
            return ENOMEM;
        }
        rc = copyin(pvUser, pHdr, Hdr.cbIn);
        if (RT_UNLIKELY(rc))
        {
            OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: copyin(%p,%p,%#x) -> %#x; ulCmd=%#lx\n",
                        pvUser, pHdr, Hdr.cbIn, rc, ulCmd));
            RTMemTmpFree(pHdr);
            return rc;
        }
    }
    else
    {
        Log(("VBoxDrvFreeBSDIOCtlSlow: huh? cbReq=%#x ulCmd=%#lx\n", cbReq, ulCmd));
        return EINVAL;
    }

    /*
     * Process the IOCtl.
     */
    int rc = supdrvIOCtl(ulCmd, &g_VBoxDrvFreeBSDDevExt, pSession, pHdr);
    if (RT_LIKELY(!rc))
    {
        /*
         * If unbuffered, copy back the result before returning.
         */
        if (pvUser)
        {
            uint32_t cbOut = pHdr->cbOut;
            if (cbOut > cbReq)
            {
                OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: too much output! %#x > %#x; uCmd=%#lx!\n", cbOut, cbReq, ulCmd));
                cbOut = cbReq;
            }
            rc = copyout(pHdr, pvUser, cbOut);
            if (RT_UNLIKELY(rc))
                OSDBGPRINT(("VBoxDrvFreeBSDIOCtlSlow: copyout(%p,%p,%#x) -> %d; uCmd=%#lx!\n", pHdr, pvUser, cbOut, rc, ulCmd));

            Log(("VBoxDrvFreeBSDIOCtlSlow: returns %d / %d ulCmd=%lx\n", 0, pHdr->rc, ulCmd));

            /* cleanup */
            RTMemTmpFree(pHdr);
        }
    }
    else
    {
        /*
         * The request failed, just clean up.
         */
        if (pvUser)
            RTMemTmpFree(pHdr);

        Log(("VBoxDrvFreeBSDIOCtlSlow: ulCmd=%lx pData=%p failed, rc=%d\n", ulCmd, pvData, rc));
        rc = EINVAL;
    }

    return rc;
}
示例#22
0
/**
 * Worker for VBoxSupDrvIOCtl that takes the slow IOCtl functions.
 *
 * @returns Solaris errno.
 *
 * @param   pSession    The session.
 * @param   iCmd        The IOCtl command.
 * @param   Mode        Information bitfield (for specifying ownership of data)
 * @param   iArg        User space address of the request buffer.
 */
static int vgdrvSolarisIOCtlSlow(PVBOXGUESTSESSION pSession, int iCmd, int Mode, intptr_t iArg)
{
    int         rc;
    uint32_t    cbBuf = 0;
    union
    {
        VBGLREQHDR  Hdr;
        uint8_t     abBuf[64];
    }           StackBuf;
    PVBGLREQHDR  pHdr;


    /*
     * Read the header.
     */
    if (RT_UNLIKELY(IOCPARM_LEN(iCmd) != sizeof(StackBuf.Hdr)))
    {
        LogRel(("vgdrvSolarisIOCtlSlow: iCmd=%#x len %d expected %d\n", iCmd, IOCPARM_LEN(iCmd), sizeof(StackBuf.Hdr)));
        return EINVAL;
    }
    rc = ddi_copyin((void *)iArg, &StackBuf.Hdr, sizeof(StackBuf.Hdr), Mode);
    if (RT_UNLIKELY(rc))
    {
        LogRel(("vgdrvSolarisIOCtlSlow: ddi_copyin(,%#lx,) failed; iCmd=%#x. rc=%d\n", iArg, iCmd, rc));
        return EFAULT;
    }
    if (RT_UNLIKELY(StackBuf.Hdr.uVersion != VBGLREQHDR_VERSION))
    {
        LogRel(("vgdrvSolarisIOCtlSlow: bad header version %#x; iCmd=%#x\n", StackBuf.Hdr.uVersion, iCmd));
        return EINVAL;
    }
    cbBuf = RT_MAX(StackBuf.Hdr.cbIn, StackBuf.Hdr.cbOut);
    if (RT_UNLIKELY(   StackBuf.Hdr.cbIn < sizeof(StackBuf.Hdr)
                    || (StackBuf.Hdr.cbOut < sizeof(StackBuf.Hdr) && StackBuf.Hdr.cbOut != 0)
                    || cbBuf > _1M*16))
    {
        LogRel(("vgdrvSolarisIOCtlSlow: max(%#x,%#x); iCmd=%#x\n", StackBuf.Hdr.cbIn, StackBuf.Hdr.cbOut, iCmd));
        return EINVAL;
    }

    /*
     * Buffer the request.
     *
     * Note! Common code revalidates the header sizes and version. So it's
     *       fine to read it once more.
     */
    if (cbBuf <= sizeof(StackBuf))
        pHdr = &StackBuf.Hdr;
    else
    {
        pHdr = RTMemTmpAlloc(cbBuf);
        if (RT_UNLIKELY(!pHdr))
        {
            LogRel(("vgdrvSolarisIOCtlSlow: failed to allocate buffer of %d bytes for iCmd=%#x.\n", cbBuf, iCmd));
            return ENOMEM;
        }
    }
    rc = ddi_copyin((void *)iArg, pHdr, cbBuf, Mode);
    if (RT_UNLIKELY(rc))
    {
        LogRel(("vgdrvSolarisIOCtlSlow: copy_from_user(,%#lx, %#x) failed; iCmd=%#x. rc=%d\n", iArg, cbBuf, iCmd, rc));
        if (pHdr != &StackBuf.Hdr)
            RTMemFree(pHdr);
        return EFAULT;
    }

    /*
     * Process the IOCtl.
     */
    rc = VGDrvCommonIoCtl(iCmd, &g_DevExt, pSession, pHdr, cbBuf);

    /*
     * Copy ioctl data and output buffer back to user space.
     */
    if (RT_SUCCESS(rc))
    {
        uint32_t cbOut = pHdr->cbOut;
        if (RT_UNLIKELY(cbOut > cbBuf))
        {
            LogRel(("vgdrvSolarisIOCtlSlow: too much output! %#x > %#x; iCmd=%#x!\n", cbOut, cbBuf, iCmd));
            cbOut = cbBuf;
        }
        rc = ddi_copyout(pHdr, (void *)iArg, cbOut, Mode);
        if (RT_UNLIKELY(rc != 0))
        {
            /* this is really bad */
            LogRel(("vgdrvSolarisIOCtlSlow: ddi_copyout(,%p,%d) failed. rc=%d\n", (void *)iArg, cbBuf, rc));
            rc = EFAULT;
        }
    }
    else
        rc = EINVAL;

    if (pHdr != &StackBuf.Hdr)
        RTMemTmpFree(pHdr);
    return rc;
}
RTDECL(int) RTVfsIoStrmReadAll(RTVFSIOSTREAM hVfsIos, void **ppvBuf, size_t *pcbBuf)
{
    /*
     * Try query the object information and in case the stream has a known
     * size we could use for guidance.
     */
    RTFSOBJINFO ObjInfo;
    int    rc = RTVfsIoStrmQueryInfo(hVfsIos, &ObjInfo, RTFSOBJATTRADD_NOTHING);
    size_t cbAllocated = RT_SUCCESS(rc) && ObjInfo.cbObject > 0 && ObjInfo.cbObject < _1G
                       ? (size_t)ObjInfo.cbObject + 1 : _16K;
    cbAllocated += READ_ALL_HEADER_SIZE;
    void *pvBuf = RTMemAlloc(cbAllocated);
    if (pvBuf)
    {
        memset(pvBuf, 0xfe, READ_ALL_HEADER_SIZE);
        size_t off = 0;
        for (;;)
        {
            /*
             * Handle buffer growing and detecting the end of it all.
             */
            size_t cbToRead = cbAllocated - off - READ_ALL_HEADER_SIZE - 1;
            if (!cbToRead)
            {
                /* The end? */
                uint8_t bIgn;
                size_t cbIgn;
                rc = RTVfsIoStrmRead(hVfsIos, &bIgn, 0, true /*fBlocking*/, &cbIgn);
                if (rc == VINF_EOF)
                    break;

                /* Grow the buffer. */
                cbAllocated -= READ_ALL_HEADER_SIZE - 1;
                cbAllocated  = RT_MAX(RT_MIN(cbAllocated, _32M), _1K);
                cbAllocated  = RT_ALIGN_Z(cbAllocated, _4K);
                cbAllocated += READ_ALL_HEADER_SIZE + 1;

                void *pvNew = RTMemRealloc(pvBuf, cbAllocated);
                AssertBreakStmt(pvNew, rc = VERR_NO_MEMORY);
                pvBuf = pvNew;

                cbToRead = cbAllocated - off - READ_ALL_HEADER_SIZE - 1;
            }
            Assert(cbToRead < cbAllocated);

            /*
             * Read.
             */
            size_t cbActual;
            rc = RTVfsIoStrmRead(hVfsIos, (uint8_t *)pvBuf + READ_ALL_HEADER_SIZE + off, cbToRead,
                                 true /*fBlocking*/, &cbActual);
            if (RT_FAILURE(rc))
                break;
            Assert(cbActual > 0);
            Assert(cbActual <= cbToRead);
            off += cbActual;
            if (rc == VINF_EOF)
                break;
        }
        Assert(rc != VERR_EOF);
        if (RT_SUCCESS(rc))
        {
            ((size_t *)pvBuf)[0] = READ_ALL_HEADER_MAGIC;
            ((size_t *)pvBuf)[1] = off;
            ((uint8_t *)pvBuf)[READ_ALL_HEADER_SIZE + off] = 0;

            *ppvBuf = (uint8_t *)pvBuf + READ_ALL_HEADER_SIZE;
            *pcbBuf = off;
            return VINF_SUCCESS;
        }

        RTMemFree(pvBuf);
    }
    else
        rc = VERR_NO_MEMORY;
    *ppvBuf = NULL;
    *pcbBuf = 0;
    return rc;
}
示例#24
0
/**
 * Does one free space wipe, using the given filename.
 *
 * @returns RTEXITCODE_SUCCESS on success, RTEXITCODE_FAILURE on failure (fully
 *          bitched).
 * @param   pszFilename     The filename to use for wiping free space.  Will be
 *                          replaced and afterwards deleted.
 * @param   pvFiller        The filler block buffer.
 * @param   cbFiller        The size of the filler block buffer.
 * @param   cbMinLeftOpt    When to stop wiping.
 */
static RTEXITCODE doOneFreeSpaceWipe(const char *pszFilename, void const *pvFiller, size_t cbFiller, uint64_t cbMinLeftOpt)
{
    /*
     * Open the file.
     */
    RTEXITCODE  rcExit = RTEXITCODE_SUCCESS;
    RTFILE      hFile  = NIL_RTFILE;
    int rc = RTFileOpen(&hFile, pszFilename,
                        RTFILE_O_WRITE | RTFILE_O_DENY_NONE | RTFILE_O_CREATE_REPLACE | (0775 << RTFILE_O_CREATE_MODE_SHIFT));
    if (RT_SUCCESS(rc))
    {
        /*
         * Query the amount of available free space.  Figure out which API we should use.
         */
        RTFOFF cbTotal = 0;
        RTFOFF cbFree = 0;
        rc = RTFileQueryFsSizes(hFile, &cbTotal, &cbFree, NULL, NULL);
        bool const fFileHandleApiSupported = rc != VERR_NOT_SUPPORTED && rc != VERR_NOT_IMPLEMENTED;
        if (!fFileHandleApiSupported)
            rc = RTFsQuerySizes(pszFilename, &cbTotal, &cbFree, NULL, NULL);
        if (RT_SUCCESS(rc))
        {
            RTPrintf("%s: %'9RTfoff MiB out of %'9RTfoff are free\n", pszFilename, cbFree / _1M, cbTotal / _1M);

            /*
             * Start filling up the free space, down to the last 32MB.
             */
            uint64_t const  nsStart       = RTTimeNanoTS();     /* for speed calcs */
            uint64_t        nsStat        = nsStart;            /* for speed calcs */
            uint64_t        cbStatWritten = 0;                  /* for speed calcs */
            RTFOFF const    cbMinLeft     = RT_MAX(cbMinLeftOpt, cbFiller * 2);
            RTFOFF          cbLeftToWrite = cbFree - cbMinLeft;
            uint64_t        cbWritten     = 0;
            uint32_t        iLoop         = 0;
            while (cbLeftToWrite >= (RTFOFF)cbFiller)
            {
                rc = RTFileWrite(hFile, pvFiller, cbFiller, NULL);
                if (RT_FAILURE(rc))
                {
                    if (rc == VERR_DISK_FULL)
                        RTPrintf("%s: Disk full after writing %'9RU64 MiB\n", pszFilename, cbWritten / _1M);
                    else
                        rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Write error after %'RU64 bytes: %Rrc\n",
                                                pszFilename, cbWritten, rc);
                    break;
                }

                /* Flush every now and then as we approach a completely full disk. */
                if (cbLeftToWrite <= _1G && (iLoop & (cbLeftToWrite  > _128M ? 15 : 3)) == 0)
                    RTFileFlush(hFile);

                /*
                 * Advance and maybe recheck the amount of free space.
                 */
                cbWritten     += cbFiller;
                cbLeftToWrite -= (ssize_t)cbFiller;
                iLoop++;
                if ((iLoop & (16 - 1)) == 0 || cbLeftToWrite < _256M)
                {
                    RTFOFF cbFreeUpdated;
                    if (fFileHandleApiSupported)
                        rc = RTFileQueryFsSizes(hFile, NULL, &cbFreeUpdated, NULL, NULL);
                    else
                        rc = RTFsQuerySizes(pszFilename, NULL, &cbFreeUpdated, NULL, NULL);
                    if (RT_SUCCESS(rc))
                    {
                        cbFree = cbFreeUpdated;
                        cbLeftToWrite = cbFree - cbMinLeft;
                    }
                    else
                    {
                        rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Failed to query free space after %'RU64 bytes: %Rrc\n",
                                                pszFilename, cbWritten, rc);
                        break;
                    }
                    if ((iLoop & (512 - 1)) == 0)
                    {
                        uint64_t const nsNow = RTTimeNanoTS();
                        uint64_t cNsInterval = nsNow - nsStat;
                        uint64_t cbInterval  = cbWritten - cbStatWritten;
                        uint64_t cbIntervalPerSec = cbInterval ? (uint64_t)(cbInterval / (cNsInterval / (double)RT_NS_1SEC)) : 0;

                        RTPrintf("%s: %'9RTfoff MiB out of %'9RTfoff are free after writing %'9RU64 MiB (%'5RU64 MiB/s)\n",
                                 pszFilename, cbFree / _1M, cbTotal  / _1M, cbWritten  / _1M, cbIntervalPerSec / _1M);
                        nsStat        = nsNow;
                        cbStatWritten = cbWritten;
                    }
                }
            }

            /*
             * Now flush the file and then reduce the size a little before closing
             * it so the system won't entirely run out of space.  The flush should
             * ensure the data has actually hit the disk.
             */
            rc = RTFileFlush(hFile);
            if (RT_FAILURE(rc))
                rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Flush failed at %'RU64 bytes: %Rrc\n", pszFilename, cbWritten, rc);

            uint64_t cbReduced = cbWritten > _512M ? cbWritten - _512M : cbWritten / 2;
            rc = RTFileSetSize(hFile, cbReduced);
            if (RT_FAILURE(rc))
                rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Failed to reduce file size from %'RU64 to %'RU64 bytes: %Rrc\n",
                                        pszFilename, cbWritten, cbReduced, rc);

            /* Issue a summary statements. */
            uint64_t cNsElapsed = RTTimeNanoTS() - nsStart;
            uint64_t cbPerSec   = cbWritten ? (uint64_t)(cbWritten / (cNsElapsed / (double)RT_NS_1SEC)) : 0;
            RTPrintf("%s: Wrote %'RU64 MiB in %'RU64 s, avg %'RU64 MiB/s.\n",
                     pszFilename, cbWritten / _1M, cNsElapsed / RT_NS_1SEC, cbPerSec / _1M);
        }
        else
            rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Initial free space query failed: %Rrc \n", pszFilename, rc);

        RTFileClose(hFile);

        /*
         * Delete the file.
         */
        rc = RTFileDelete(pszFilename);
        if (RT_FAILURE(rc))
            rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Delete failed: %Rrc !!\n", pszFilename, rc);
    }
    else
        rcExit = RTMsgErrorExit(RTEXITCODE_FAILURE, "%s: Open failed: %Rrc\n", pszFilename, rc);
    return rcExit;
}
示例#25
0
/**
 * Deal with the 'slow' I/O control requests.
 *
 * @returns 0 on success, appropriate errno on failure.
 * @param   pSession    The session.
 * @param   ulCmd       The command.
 * @param   pvData      The request data.
 * @param   pTd         The calling thread.
 */
static int vgdrvFreeBSDIOCtlSlow(PVBOXGUESTSESSION pSession, u_long ulCmd, caddr_t pvData, struct thread *pTd)
{
    PVBGLREQHDR pHdr;
    uint32_t    cbReq = IOCPARM_LEN(ulCmd);
    void       *pvUser = NULL;

    /*
     * Buffered request?
     */
    if ((IOC_DIRMASK & ulCmd) == IOC_INOUT)
    {
        pHdr = (PVBGLREQHDR)pvData;
        if (RT_UNLIKELY(cbReq < sizeof(*pHdr)))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: cbReq=%#x < %#x; ulCmd=%#lx\n", cbReq, (int)sizeof(*pHdr), ulCmd));
            return EINVAL;
        }
        if (RT_UNLIKELY(pHdr->uVersion != VBGLREQHDR_VERSION))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: bad uVersion=%#x; ulCmd=%#lx\n", pHdr->uVersion, ulCmd));
            return EINVAL;
        }
        if (RT_UNLIKELY(   RT_MAX(pHdr->cbIn, pHdr->cbOut) != cbReq
                        || pHdr->cbIn < sizeof(*pHdr)
                        || (pHdr->cbOut < sizeof(*pHdr) && pHdr->cbOut != 0)))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: max(%#x,%#x) != %#x; ulCmd=%#lx\n", pHdr->cbIn, pHdr->cbOut, cbReq, ulCmd));
            return EINVAL;
        }
    }
    /*
     * Big unbuffered request?
     */
    else if ((IOC_DIRMASK & ulCmd) == IOC_VOID && !cbReq)
    {
        /*
         * Read the header, validate it and figure out how much that needs to be buffered.
         */
        VBGLREQHDR Hdr;
        pvUser = *(void **)pvData;
        int rc = copyin(pvUser, &Hdr, sizeof(Hdr));
        if (RT_UNLIKELY(rc))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: copyin(%p,Hdr,) -> %#x; ulCmd=%#lx\n", pvUser, rc, ulCmd));
            return rc;
        }
        if (RT_UNLIKELY(Hdr.uVersion != VBGLREQHDR_VERSION))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: bad uVersion=%#x; ulCmd=%#lx\n", Hdr.uVersion, ulCmd));
            return EINVAL;
        }
        cbReq = RT_MAX(Hdr.cbIn, Hdr.cbOut);
        if (RT_UNLIKELY(   Hdr.cbIn < sizeof(Hdr)
                        || (Hdr.cbOut < sizeof(Hdr) && Hdr.cbOut != 0)
                        || cbReq > _1M*16))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: max(%#x,%#x); ulCmd=%#lx\n", Hdr.cbIn, Hdr.cbOut, ulCmd));
            return EINVAL;
        }

        /*
         * Allocate buffer and copy in the data.
         */
        pHdr = (PVBGLREQHDR)RTMemTmpAlloc(cbReq);
        if (RT_UNLIKELY(!pHdr))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: failed to allocate buffer of %d bytes; ulCmd=%#lx\n", cbReq, ulCmd));
            return ENOMEM;
        }
        rc = copyin(pvUser, pHdr, Hdr.cbIn);
        if (RT_UNLIKELY(rc))
        {
            LogRel(("vgdrvFreeBSDIOCtlSlow: copyin(%p,%p,%#x) -> %#x; ulCmd=%#lx\n",
                        pvUser, pHdr, Hdr.cbIn, rc, ulCmd));
            RTMemTmpFree(pHdr);
            return rc;
        }
        if (Hdr.cbIn < cbReq)
            RT_BZERO((uint8_t *)pHdr + Hdr.cbIn, cbReq - Hdr.cbIn);
    }
    else
    {
        Log(("vgdrvFreeBSDIOCtlSlow: huh? cbReq=%#x ulCmd=%#lx\n", cbReq, ulCmd));
        return EINVAL;
    }

    /*
     * Process the IOCtl.
     */
    int rc = VGDrvCommonIoCtl(ulCmd, &g_DevExt, pSession, pHdr, cbReq);
    if (RT_LIKELY(!rc))
    {
        /*
         * If unbuffered, copy back the result before returning.
         */
        if (pvUser)
        {
            uint32_t cbOut = pHdr->cbOut;
            if (cbOut > cbReq)
            {
                LogRel(("vgdrvFreeBSDIOCtlSlow: too much output! %#x > %#x; uCmd=%#lx!\n", cbOut, cbReq, ulCmd));
                cbOut = cbReq;
            }
            rc = copyout(pHdr, pvUser, cbOut);
            if (RT_UNLIKELY(rc))
                LogRel(("vgdrvFreeBSDIOCtlSlow: copyout(%p,%p,%#x) -> %d; uCmd=%#lx!\n", pHdr, pvUser, cbOut, rc, ulCmd));

            Log(("vgdrvFreeBSDIOCtlSlow: returns %d / %d ulCmd=%lx\n", 0, pHdr->rc, ulCmd));

            /* cleanup */
            RTMemTmpFree(pHdr);
        }
    }
    else
    {
        /*
         * The request failed, just clean up.
         */
        if (pvUser)
            RTMemTmpFree(pHdr);

        Log(("vgdrvFreeBSDIOCtlSlow: ulCmd=%lx pData=%p failed, rc=%d\n", ulCmd, pvData, rc));
        rc = EINVAL;
    }

    return rc;
}
示例#26
0
RTDECL(int) RTPathAppendEx(char *pszPath, size_t cbPathDst, const char *pszAppend, size_t cchAppendMax)
{
    char *pszPathEnd = RTStrEnd(pszPath, cbPathDst);
    AssertReturn(pszPathEnd, VERR_INVALID_PARAMETER);

    /*
     * Special cases.
     */
    if (!pszAppend)
        return VINF_SUCCESS;
    size_t cchAppend = RTStrNLen(pszAppend, cchAppendMax);
    if (!cchAppend)
        return VINF_SUCCESS;
    if (pszPathEnd == pszPath)
    {
        if (cchAppend >= cbPathDst)
            return VERR_BUFFER_OVERFLOW;
        memcpy(pszPath, pszAppend, cchAppend);
        pszPath[cchAppend] = '\0';
        return VINF_SUCCESS;
    }

    /*
     * Balance slashes and check for buffer overflow.
     */
    if (!RTPATH_IS_SLASH(pszPathEnd[-1]))
    {
        if (!RTPATH_IS_SLASH(pszAppend[0]))
        {
#if defined (RT_OS_OS2) || defined (RT_OS_WINDOWS)
            if (    (size_t)(pszPathEnd - pszPath) == 2
                &&  pszPath[1] == ':'
                &&  RT_C_IS_ALPHA(pszPath[0]))
            {
                if ((size_t)(pszPathEnd - pszPath) + cchAppend >= cbPathDst)
                    return VERR_BUFFER_OVERFLOW;
            }
            else
#endif
            {
                if ((size_t)(pszPathEnd - pszPath) + 1 + cchAppend >= cbPathDst)
                    return VERR_BUFFER_OVERFLOW;
                *pszPathEnd++ = RTPATH_SLASH;
            }
        }
        else
        {
            /* One slash is sufficient at this point. */
            while (cchAppend > 1 && RTPATH_IS_SLASH(pszAppend[1]))
                pszAppend++, cchAppend--;

            if ((size_t)(pszPathEnd - pszPath) + cchAppend >= cbPathDst)
                return VERR_BUFFER_OVERFLOW;
        }
    }
    else
    {
        /* No slashes needed in the appended bit. */
        while (cchAppend && RTPATH_IS_SLASH(*pszAppend))
            pszAppend++, cchAppend--;

        /* In the leading path we can skip unnecessary trailing slashes, but
           be sure to leave one. */
        size_t const cchRoot = rtPathRootSpecLen2(pszPath);
        while (     (size_t)(pszPathEnd - pszPath) > RT_MAX(1, cchRoot)
               &&   RTPATH_IS_SLASH(pszPathEnd[-2]))
            pszPathEnd--;

        if ((size_t)(pszPathEnd - pszPath) + cchAppend >= cbPathDst)
            return VERR_BUFFER_OVERFLOW;
    }

    /*
     * What remains now is the just the copying.
     */
    memcpy(pszPathEnd, pszAppend, cchAppend);
    pszPathEnd[cchAppend] = '\0';
    return VINF_SUCCESS;
}
示例#27
0
/**
 * Allocates a chunk of memory from the specified heap.
 * The caller validates the parameters of this request.
 *
 * @returns Pointer to the allocated chunk.
 * @returns NULL on failure.
 * @param   pHeap       The heap.
 * @param   cb          Size of the memory block to allocate.
 * @param   uAlignment  The alignment specifications for the allocated block.
 * @internal
 */
static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
{
    Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
#ifdef MMHYPER_HEAP_STRICT
    mmHyperHeapCheck(pHeap);
#endif
#ifdef MMHYPER_HEAP_STRICT_FENCE
    uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment);
    cb += cbFence;
#endif

    /*
     * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
     */
    if (pHeap->offFreeHead == NIL_OFFSET)
        return NULL;

    /*
     * Small alignments - from the front of the heap.
     *
     * Must split off free chunks at the end to prevent messing up the
     * last free node which we take the page aligned memory from the top of.
     */
    PMMHYPERCHUNK     pRet = NULL;
    PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
    while (pFree)
    {
        ASSERT_CHUNK_FREE(pHeap, pFree);
        if (pFree->cb >= cb)
        {
            unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
            if (offAlign)
                offAlign = uAlignment - offAlign;
            if (!offAlign || pFree->cb - offAlign >= cb)
            {
                Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));

                /*
                 * Adjust the node in front.
                 * Because of multiple alignments we need to special case allocation of the first block.
                 */
                if (offAlign)
                {
                    MMHYPERCHUNKFREE Free = *pFree;
                    if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
                    {
                        /* just add a bit of memory to it. */
                        PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
                        pPrev->core.offNext += offAlign;
                        AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
                        Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
                    }
                    else
                    {
                        /* make new head node, mark it USED for simplicity. */
                        PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap);
                        Assert(pPrev == &pFree->core);
                        pPrev->offPrev = 0;
                        MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
                        pPrev->offNext = offAlign;
                        Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));

                    }
                    Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
                    pHeap->cbFree -= offAlign;

                    /* Recreate pFree node and adjusting everything... */
                    pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
                    *pFree = Free;

                    pFree->cb -= offAlign;
                    if (pFree->core.offNext)
                    {
                        pFree->core.offNext -= offAlign;
                        PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
                        MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
                        ASSERT_CHUNK(pHeap, pNext);
                    }
                    if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
                        MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);

                    if (pFree->offNext)
                    {
                        pFree->offNext -= offAlign;
                        PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
                        pNext->offPrev = -(int32_t)pFree->offNext;
                        ASSERT_CHUNK_FREE(pHeap, pNext);
                    }
                    else
                        pHeap->offFreeTail += offAlign;
                    if (pFree->offPrev)
                    {
                        pFree->offPrev -= offAlign;
                        PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
                        pPrev->offNext = -pFree->offPrev;
                        ASSERT_CHUNK_FREE(pHeap, pPrev);
                    }
                    else
                        pHeap->offFreeHead += offAlign;
                    pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
                    pFree->core.offStat = 0;
                    ASSERT_CHUNK_FREE(pHeap, pFree);
                    Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
                }

                /*
                 * Split off a new FREE chunk?
                 */
                if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
                {
                    /*
                     * Move the FREE chunk up to make room for the new USED chunk.
                     */
                    const int           off = cb + sizeof(MMHYPERCHUNK);
                    PMMHYPERCHUNKFREE   pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
                    *pNew = *pFree;
                    pNew->cb -= off;
                    if (pNew->core.offNext)
                    {
                        pNew->core.offNext -= off;
                        PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
                        MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
                        ASSERT_CHUNK(pHeap, pNext);
                    }
                    pNew->core.offPrev  = -off;
                    MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);

                    if (pNew->offNext)
                    {
                        pNew->offNext -= off;
                        PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
                        pNext->offPrev = -(int32_t)pNew->offNext;
                        ASSERT_CHUNK_FREE(pHeap, pNext);
                    }
                    else
                        pHeap->offFreeTail += off;
                    if (pNew->offPrev)
                    {
                        pNew->offPrev -= off;
                        PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
                        pPrev->offNext = -pNew->offPrev;
                        ASSERT_CHUNK_FREE(pHeap, pPrev);
                    }
                    else
                        pHeap->offFreeHead += off;
                    pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
                    pNew->core.offStat = 0;
                    ASSERT_CHUNK_FREE(pHeap, pNew);

                    /*
                     * Update the old FREE node making it a USED node.
                     */
                    pFree->core.offNext = off;
                    MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);


                    Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
                          pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
                    pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
                    pRet = &pFree->core;
                    ASSERT_CHUNK(pHeap, &pFree->core);
                    Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
                }
                else
                {
                    /*
                     * Link out of free list.
                     */
                    if (pFree->offNext)
                    {
                        PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
                        if (pFree->offPrev)
                        {
                            pNext->offPrev += pFree->offPrev;
                            PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
                            pPrev->offNext += pFree->offNext;
                            ASSERT_CHUNK_FREE(pHeap, pPrev);
                        }
                        else
                        {
                            pHeap->offFreeHead += pFree->offNext;
                            pNext->offPrev = 0;
                        }
                        ASSERT_CHUNK_FREE(pHeap, pNext);
                    }
                    else
                    {
                        if (pFree->offPrev)
                        {
                            pHeap->offFreeTail += pFree->offPrev;
                            PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
                            pPrev->offNext = 0;
                            ASSERT_CHUNK_FREE(pHeap, pPrev);
                        }
                        else
                        {
                            pHeap->offFreeHead = NIL_OFFSET;
                            pHeap->offFreeTail = NIL_OFFSET;
                        }
                    }

                    Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
                          pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
                    pHeap->cbFree -= pFree->cb;
                    MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
                    pRet = &pFree->core;
                    ASSERT_CHUNK(pHeap, &pFree->core);
                    Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
                }
                Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
                break;
            }
        }

        /* next */
        pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
    }

#ifdef MMHYPER_HEAP_STRICT_FENCE
    uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb);
    uint32_t *pu32EndReal = pRet->offNext
                          ? (uint32_t *)((uint8_t *)pRet + pRet->offNext)
                          : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap);
    cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3));
    ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32);
    pu32EndReal[-1] = cbFence;
#endif
#ifdef MMHYPER_HEAP_STRICT
    mmHyperHeapCheck(pHeap);
#endif
    return pRet;
}
示例#28
0
/**
 * Parses a path.
 *
 * It figures the length of the directory component, the offset of
 * the file name and the location of the suffix dot.
 *
 * @returns The path length.
 *
 * @param   pszPath     Path to find filename in.
 * @param   pcchDir     Where to put the length of the directory component. If
 *                      no directory, this will be 0. Optional.
 * @param   poffName    Where to store the filename offset.
 *                      If empty string or if it's ending with a slash this
 *                      will be set to -1. Optional.
 * @param   poffSuff    Where to store the suffix offset (the last dot).
 *                      If empty string or if it's ending with a slash this
 *                      will be set to -1. Optional.
 */
RTDECL(size_t) RTPathParseSimple(const char *pszPath, size_t *pcchDir, ssize_t *poffName, ssize_t *poffSuff)
{
    const char *psz = pszPath;
    ssize_t     offRoot = 0;
    const char *pszName = pszPath;
    const char *pszLastDot = NULL;

    for (;; psz++)
    {
        switch (*psz)
        {
            /* handle separators. */
#if defined(RT_OS_WINDOWS) || defined(RT_OS_OS2)
            case ':':
                pszName = psz + 1;
                offRoot = pszName - psz;
                break;

            case '\\':
#endif
            case '/':
                pszName = psz + 1;
                break;

            case '.':
                pszLastDot = psz;
                break;

            /*
             * The end. Complete the results.
             */
            case '\0':
            {
                ssize_t offName = *pszName != '\0' ? pszName - pszPath : -1;
                if (poffName)
                    *poffName = offName;

                if (poffSuff)
                {
                    ssize_t offSuff = -1;
                    if (pszLastDot)
                    {
                        offSuff = pszLastDot - pszPath;
                        if (offSuff <= offName)
                            offSuff = -1;
                    }
                    *poffSuff = offSuff;
                }

                if (pcchDir)
                {
                    ssize_t off = offName - 1;
                    while (off >= offRoot && RTPATH_IS_SLASH(pszPath[off]))
                        off--;
                    *pcchDir = RT_MAX(off, offRoot) + 1;
                }

                return psz - pszPath;
            }
        }
    }

    /* will never get here */
    return 0;
}
示例#29
0
/**
 * Tests if the given string is a valid IPv6 address.
 *
 * @returns 0 if valid, some random number if not.  THIS IS NOT AN IPRT STATUS!
 * @param psz                  The string to test
 * @param pszResultAddress     plain address, optional read "valid addresses
 *                             and strings" above.
 * @param resultAddressSize    size of pszResultAddress
 * @param addressOnly          return only the plain address (no scope)
 *                             Ignored, and will always return the if id
 */
static int rtNetIpv6CheckAddrStr(const char *psz, char *pszResultAddress, size_t resultAddressSize, bool addressOnly, bool followRfc)
{
    int rc;
    int rc2;
    int returnValue;

    char *p = NULL, *pl = NULL;

    size_t memAllocMaxSize = RT_MAX(strlen(psz), resultAddressSize) + 40;

    char *pszAddressOutLocal = (char *)RTMemTmpAlloc(memAllocMaxSize);
    char *pszIfIdOutLocal = (char *)RTMemTmpAlloc(memAllocMaxSize);
    char *pszAddressRfcOutLocal = (char *)RTMemTmpAlloc(memAllocMaxSize);

    if (!pszAddressOutLocal || !pszIfIdOutLocal || !pszAddressRfcOutLocal)
        return VERR_NO_TMP_MEMORY;

    memset(pszAddressOutLocal, '\0', memAllocMaxSize);
    memset(pszIfIdOutLocal, '\0', memAllocMaxSize);
    memset(pszAddressRfcOutLocal, '\0', memAllocMaxSize);

    rc = rtStrParseAddrStr6(psz, strlen(psz), pszAddressOutLocal, memAllocMaxSize, NULL, pszIfIdOutLocal, memAllocMaxSize, NULL, NULL, followRfc);

    if (rc == 0)
        returnValue = VINF_SUCCESS;

    if (rc == 0 && pszResultAddress)
    {
        // convert the 32 characters to a valid, shortened ipv6 address

        rc2 = rtStrToIpAddr6Str((const char *)pszAddressOutLocal, pszAddressRfcOutLocal, memAllocMaxSize, NULL, 0, followRfc);

        if (rc2 != 0)
            returnValue = 951;

        // this is a temporary solution
        if (!returnValue && strlen(pszIfIdOutLocal) > 0) // the if identifier is copied over _ALWAYS_ && !addressOnly)
        {
            p = pszAddressRfcOutLocal + strlen(pszAddressRfcOutLocal);

            *p = '%';

            p++;

            pl = (char *)memcpy(p, pszIfIdOutLocal, strlen(pszIfIdOutLocal));

            if (!pl)
                returnValue = VERR_NOT_SUPPORTED;
        }

        pl = NULL;

        pl = (char *)memcpy(pszResultAddress, pszAddressRfcOutLocal, strlen(pszAddressRfcOutLocal));

        if (!pl)
            returnValue = VERR_NOT_SUPPORTED;
    }

    if (rc != 0)
        returnValue = VERR_NOT_SUPPORTED;

    if (pszAddressOutLocal)
        RTMemTmpFree(pszAddressOutLocal);

    if (pszAddressRfcOutLocal)
        RTMemTmpFree(pszAddressRfcOutLocal);

    if (pszIfIdOutLocal)
        RTMemTmpFree(pszIfIdOutLocal);

    return returnValue;

}
示例#30
0
/**
 * Worker for VBoxDrvDarwinIOCtl that takes the slow IOCtl functions.
 *
 * @returns Darwin errno.
 *
 * @param pSession  The session.
 * @param iCmd      The IOCtl command.
 * @param pData     Pointer to the kernel copy of the SUPDRVIOCTLDATA buffer.
 * @param pProcess  The calling process.
 */
static int VBoxDrvDarwinIOCtlSlow(PSUPDRVSESSION pSession, u_long iCmd, caddr_t pData, struct proc *pProcess)
{
    LogFlow(("VBoxDrvDarwinIOCtlSlow: pSession=%p iCmd=%p pData=%p pProcess=%p\n", pSession, iCmd, pData, pProcess));


    /*
     * Buffered or unbuffered?
     */
    PSUPREQHDR pHdr;
    user_addr_t pUser = 0;
    void *pvPageBuf = NULL;
    uint32_t cbReq = IOCPARM_LEN(iCmd);
    if ((IOC_DIRMASK & iCmd) == IOC_INOUT)
    {
        pHdr = (PSUPREQHDR)pData;
        if (RT_UNLIKELY(cbReq < sizeof(*pHdr)))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: cbReq=%#x < %#x; iCmd=%#lx\n", cbReq, (int)sizeof(*pHdr), iCmd));
            return EINVAL;
        }
        if (RT_UNLIKELY((pHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: bad magic fFlags=%#x; iCmd=%#lx\n", pHdr->fFlags, iCmd));
            return EINVAL;
        }
        if (RT_UNLIKELY(    RT_MAX(pHdr->cbIn, pHdr->cbOut) != cbReq
                        ||  pHdr->cbIn < sizeof(*pHdr)
                        ||  pHdr->cbOut < sizeof(*pHdr)))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: max(%#x,%#x) != %#x; iCmd=%#lx\n", pHdr->cbIn, pHdr->cbOut, cbReq, iCmd));
            return EINVAL;
        }
    }
    else if ((IOC_DIRMASK & iCmd) == IOC_VOID && !cbReq)
    {
        /*
         * Get the header and figure out how much we're gonna have to read.
         */
        SUPREQHDR Hdr;
        pUser = (user_addr_t)*(void **)pData;
        int rc = copyin(pUser, &Hdr, sizeof(Hdr));
        if (RT_UNLIKELY(rc))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: copyin(%llx,Hdr,) -> %#x; iCmd=%#lx\n", (unsigned long long)pUser, rc, iCmd));
            return rc;
        }
        if (RT_UNLIKELY((Hdr.fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: bad magic fFlags=%#x; iCmd=%#lx\n", Hdr.fFlags, iCmd));
            return EINVAL;
        }
        cbReq = RT_MAX(Hdr.cbIn, Hdr.cbOut);
        if (RT_UNLIKELY(    Hdr.cbIn < sizeof(Hdr)
                        ||  Hdr.cbOut < sizeof(Hdr)
                        ||  cbReq > _1M*16))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: max(%#x,%#x); iCmd=%#lx\n", Hdr.cbIn, Hdr.cbOut, iCmd));
            return EINVAL;
        }

        /*
         * Allocate buffer and copy in the data.
         */
        pHdr = (PSUPREQHDR)RTMemTmpAlloc(cbReq);
        if (!pHdr)
            pvPageBuf = pHdr = (PSUPREQHDR)IOMallocAligned(RT_ALIGN_Z(cbReq, PAGE_SIZE), 8);
        if (RT_UNLIKELY(!pHdr))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: failed to allocate buffer of %d bytes; iCmd=%#lx\n", cbReq, iCmd));
            return ENOMEM;
        }
        rc = copyin(pUser, pHdr, Hdr.cbIn);
        if (RT_UNLIKELY(rc))
        {
            OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: copyin(%llx,%p,%#x) -> %#x; iCmd=%#lx\n",
                        (unsigned long long)pUser, pHdr, Hdr.cbIn, rc, iCmd));
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pHdr);
            return rc;
        }
    }
    else
    {
        Log(("VBoxDrvDarwinIOCtlSlow: huh? cbReq=%#x iCmd=%#lx\n", cbReq, iCmd));
        return EINVAL;
    }

    /*
     * Process the IOCtl.
     */
    int rc = supdrvIOCtl(iCmd, &g_DevExt, pSession, pHdr);
    if (RT_LIKELY(!rc))
    {
        /*
         * If not buffered, copy back the buffer before returning.
         */
        if (pUser)
        {
            uint32_t cbOut = pHdr->cbOut;
            if (cbOut > cbReq)
            {
                OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: too much output! %#x > %#x; uCmd=%#lx!\n", cbOut, cbReq, iCmd));
                cbOut = cbReq;
            }
            rc = copyout(pHdr, pUser, cbOut);
            if (RT_UNLIKELY(rc))
                OSDBGPRINT(("VBoxDrvDarwinIOCtlSlow: copyout(%p,%llx,%#x) -> %d; uCmd=%#lx!\n",
                            pHdr, (unsigned long long)pUser, cbOut, rc, iCmd));

            /* cleanup */
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pHdr);
        }
    }
    else
    {
        /*
         * The request failed, just clean up.
         */
        if (pUser)
        {
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pHdr);
        }

        Log(("VBoxDrvDarwinIOCtlSlow: pid=%d iCmd=%lx pData=%p failed, rc=%d\n", proc_pid(pProcess), iCmd, (void *)pData, rc));
        rc = EINVAL;
    }

    Log2(("VBoxDrvDarwinIOCtlSlow: returns %d\n", rc));
    return rc;
}