/**
 * Service request callback function.
 *
 * @returns VBox status code.
 * @param   pSession    The caller's session.
 * @param   u64Arg      64-bit integer argument.
 * @param   pReqHdr     The request header. Input / Output. Optional.
 */
DECLEXPORT(int) TSTRTR0MemUserKernelSrvReqHandler(PSUPDRVSESSION pSession, uint32_t uOperation,
                                                  uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr)
{
    NOREF(pSession);
    if (!VALID_PTR(pReqHdr))
        return VERR_INVALID_PARAMETER;
    char   *pszErr = (char *)(pReqHdr + 1);
    size_t  cchErr = pReqHdr->cbReq - sizeof(*pReqHdr);
    if (cchErr < 32 || cchErr >= 0x10000)
        return VERR_INVALID_PARAMETER;
    *pszErr = '\0';

    /*
     * R3Ptr is valid and good for up to a page. The page before
     * and after are both invalid. Or, it's a kernel page.
     */
    RTR3PTR R3Ptr = (RTR3PTR)u64Arg;
    if (R3Ptr != u64Arg)
        return VERR_INVALID_PARAMETER;

    /*
     * Allocate a kernel buffer.
     */
    uint8_t *pbKrnlBuf = (uint8_t *)RTMemAlloc(PAGE_SIZE * 2);
    if (!pbKrnlBuf)
    {
        RTStrPrintf(pszErr, cchErr, "!no memory for kernel buffers");
        return VINF_SUCCESS;
    }

    /*
     * The big switch.
     */
    switch (uOperation)
    {
        case TSTRTR0MEMUSERKERNEL_SANITY_OK:
            break;

        case TSTRTR0MEMUSERKERNEL_SANITY_FAILURE:
            RTStrPrintf(pszErr, cchErr, "!42failure42%1024s", "");
            break;

        case TSTRTR0MEMUSERKERNEL_BASIC:
        {
            int rc = RTR0MemUserCopyFrom(pbKrnlBuf, R3Ptr, PAGE_SIZE);
            if (rc == VINF_SUCCESS)
            {
                rc = RTR0MemUserCopyTo(R3Ptr, pbKrnlBuf, PAGE_SIZE);
                if (rc == VINF_SUCCESS)
                {
                    if (RTR0MemUserIsValidAddr(R3Ptr))
                    {
                        if (RTR0MemKernelIsValidAddr(pbKrnlBuf))
                        {
                            if (RTR0MemAreKrnlAndUsrDifferent())
                            {
                                RTStrPrintf(pszErr, cchErr, "RTR0MemAreKrnlAndUsrDifferent returns true");
                                if (!RTR0MemUserIsValidAddr((uintptr_t)pbKrnlBuf))
                                {
                                    if (!RTR0MemKernelIsValidAddr((void *)R3Ptr))
                                    {
                                        /* done */
                                    }
                                    else
                                        RTStrPrintf(pszErr, cchErr, "! #5 - RTR0MemKernelIsValidAddr -> true, expected false");
                                }
                                else
                                    RTStrPrintf(pszErr, cchErr, "! #5 - RTR0MemUserIsValidAddr -> true, expected false");
                            }
                            else
                                RTStrPrintf(pszErr, cchErr, "RTR0MemAreKrnlAndUsrDifferent returns false");
                        }
                        else
                            RTStrPrintf(pszErr, cchErr, "! #4 - RTR0MemKernelIsValidAddr -> false, expected true");
                    }
                    else
                        RTStrPrintf(pszErr, cchErr, "! #3 - RTR0MemUserIsValidAddr -> false, expected true");
                }
                else
                    RTStrPrintf(pszErr, cchErr, "! #2 - RTR0MemUserCopyTo -> %Rrc expected %Rrc", rc, VINF_SUCCESS);
            }
            else
                RTStrPrintf(pszErr, cchErr, "! #1 - RTR0MemUserCopyFrom -> %Rrc expected %Rrc", rc, VINF_SUCCESS);
            break;
        }

#define TEST_OFF_SIZE(off, size, rcExpect) \
    if (1) \
    { \
        int rc = RTR0MemUserCopyFrom(pbKrnlBuf, R3Ptr + (off), (size)); \
        if (rc != (rcExpect)) \
        { \
            RTStrPrintf(pszErr, cchErr, "!RTR0MemUserCopyFrom(, +%#x, %#x) -> %Rrc, expected %Rrc", \
                        (off), (size), rc, (rcExpect)); \
            break; \
        } \
        rc = RTR0MemUserCopyTo(R3Ptr + (off), pbKrnlBuf, (size)); \
        if (rc != (rcExpect)) \
        { \
            RTStrPrintf(pszErr, cchErr, "!RTR0MemUserCopyTo(+%#x,, %#x) -> %Rrc, expected %Rrc", \
                        (off), (size), rc, (rcExpect)); \
            break; \
        } \
    } else do {} while (0)

        case TSTRTR0MEMUSERKERNEL_GOOD:
        {
            for (unsigned off = 0; off < 16 && !*pszErr; off++)
                for (unsigned cb = 0; cb < PAGE_SIZE - 16; cb++)
                    TEST_OFF_SIZE(off, cb, VINF_SUCCESS);
            break;
        }

        case TSTRTR0MEMUSERKERNEL_BAD:
        {
            for (unsigned off = 0; off < 16 && !*pszErr; off++)
                for (unsigned cb = 0; cb < PAGE_SIZE - 16; cb++)
                    TEST_OFF_SIZE(off, cb, cb > 0 ? VERR_ACCESS_DENIED : VINF_SUCCESS);
            break;
        }

        case TSTRTR0MEMUSERKERNEL_INVALID_ADDRESS:
        {
            if (    !RTR0MemUserIsValidAddr(R3Ptr)
                &&  RTR0MemKernelIsValidAddr((void *)R3Ptr))
            {
                for (unsigned off = 0; off < 16 && !*pszErr; off++)
                    for (unsigned cb = 0; cb < PAGE_SIZE - 16; cb++)
                        TEST_OFF_SIZE(off, cb, cb > 0 ? VERR_ACCESS_DENIED : VINF_SUCCESS); /* ... */
            }
            else
                RTStrPrintf(pszErr, cchErr, "RTR0MemUserIsValidAddr returns true");
            break;
        }

        default:
            RTStrPrintf(pszErr, cchErr, "!Unknown test #%d", uOperation);
            break;
    }

    /* The error indicator is the '!' in the message buffer. */
    RTMemFree(pbKrnlBuf);
    return VINF_SUCCESS;
}
예제 #2
0
int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
    void           *pvLast = (uint8_t *)pv + cb - 1;
    size_t const    cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJLNX  pMemLnx;
    bool            fLinearMapping;
    int             rc;
    uint8_t        *pbPage;
    size_t          iPage;
    NOREF(fAccess);

    /*
     * Classify the memory and check that we can deal with it.
     */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    fLinearMapping = virt_addr_valid(pvLast)          && virt_addr_valid(pv);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
    fLinearMapping = VALID_PAGE(virt_to_page(pvLast)) && VALID_PAGE(virt_to_page(pv));
#else
# error "not supported"
#endif
    if (!fLinearMapping)
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
        if (   !RTR0MemKernelIsValidAddr(pv)
            || !RTR0MemKernelIsValidAddr(pv + cb))
#endif
            return VERR_INVALID_PARAMETER;
    }

    /*
     * Allocate the memory object.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;

    /*
     * Gather the pages.
     * We ASSUME all kernel pages are non-swappable.
     */
    rc     = VINF_SUCCESS;
    pbPage = (uint8_t *)pvLast;
    iPage  = cPages;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
    if (!fLinearMapping)
    {
        while (iPage-- > 0)
        {
            struct page *pPage = vmalloc_to_page(pbPage);
            if (RT_UNLIKELY(!pPage))
            {
                rc = VERR_LOCK_FAILED;
                break;
            }
            pMemLnx->apPages[iPage] = pPage;
            pbPage -= PAGE_SIZE;
        }
    }
    else
#endif
    {
        while (iPage-- > 0)
        {
            pMemLnx->apPages[iPage] = virt_to_page(pbPage);
            pbPage -= PAGE_SIZE;
        }
    }
    if (RT_SUCCESS(rc))
    {
        /*
         * Complete the memory object and return.
         */
        pMemLnx->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
        pMemLnx->cPages = cPages;
        Assert(!pMemLnx->fMappedToRing0);
        *ppMem = &pMemLnx->Core;

        return VINF_SUCCESS;
    }

    rtR0MemObjDelete(&pMemLnx->Core);
    return rc;
}