コード例 #1
0
DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
{
    AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);

    /** @todo alignment  */
    if (uAlignment != PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /* create the object. */
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* do the allocation. */
    ULONG ulPhys = ~0UL;
    int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
    if (!rc)
    {
        Assert(ulPhys != ~0UL);
        pMemOs2->Core.u.Phys.fAllocated = true;
        pMemOs2->Core.u.Phys.PhysBase = ulPhys;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
コード例 #2
0
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    NOREF(fExecutable);

    /* create the object. */
    const ULONG cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOW, NULL, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* do the allocation. */
    int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
    if (!rc)
    {
        ULONG cPagesRet = cPages;
        rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
        if (!rc)
        {
            rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
            *ppMem = &pMemOs2->Core;
            return VINF_SUCCESS;
        }
        KernVMFree(pMemOs2->Core.pv);
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    rc = RTErrConvertFromOS2(rc);
    return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
}
コード例 #3
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
/**
 * Worker locking the memory in either kernel or user maps.
 */
static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
                                     vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
                                     RTR0PROCESS R0Process, int fFlags)
{
    int rc;
    NOREF(fAccess);

    /* create the object. */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    /*
     * We could've used vslock here, but we don't wish to be subject to
     * resource usage restrictions, so we'll call vm_map_wire directly.
     */
    rc = vm_map_wire(pVmMap,                                         /* the map */
                     AddrStart,                                      /* start */
                     AddrStart + cb,                                 /* end */
                     fFlags);                                        /* flags */
    if (rc == KERN_SUCCESS)
    {
        pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
        *ppMem = &pMemFreeBSD->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
}
コード例 #4
0
/**
 * Worker locking the memory in either kernel or user maps.
 *
 * @returns IPRT status code.
 * @param   ppMem        Where to store the allocated memory object.
 * @param   pvStart      The starting address.
 * @param   cb           The size of the block.
 * @param   fAccess      The mapping protection to apply.
 * @param   R0Process    The process to map the memory to (use NIL_RTR0PROCESS
 *                       for the kernel)
 * @param   fFlags       Memory flags (B_READ_DEVICE indicates the memory is
 *                       intended to be written from a "device").
 */
static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvStart, size_t cb, uint32_t fAccess,
                                     RTR0PROCESS R0Process, int fFlags)
{
    NOREF(fAccess);
    int rc;
    team_id TeamId = B_SYSTEM_TEAM;

    LogFlowFunc(("ppMem=%p pvStart=%p cb=%u fAccess=%x R0Process=%d fFlags=%x\n", ppMem, pvStart, cb, fAccess, R0Process,
                 fFlags));

    /* Create the object. */
    PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_LOCK, pvStart, cb);
    if (RT_UNLIKELY(!pMemHaiku))
        return VERR_NO_MEMORY;

    if (R0Process != NIL_RTR0PROCESS)
        TeamId = (team_id)R0Process;
    rc = lock_memory_etc(TeamId, pvStart, cb, fFlags);
    if (rc == B_OK)
    {
        pMemHaiku->AreaId = -1;
        pMemHaiku->Core.u.Lock.R0Process = R0Process;
        *ppMem = &pMemHaiku->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemHaiku->Core);
    return RTErrConvertFromHaikuKernReturn(rc);
}
コード例 #5
0
DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
                                         RTR0PROCESS R0Process)
{
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);

    /* create the object. */
    const ULONG cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* lock it. */
    ULONG cPagesRet = cPages;
    int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
                        (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
    if (!rc)
    {
        rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
        Assert(cb == pMemOs2->Core.cb);
        Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
        pMemOs2->Core.u.Lock.R0Process = R0Process;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
コード例 #6
0
int rtR0MemObjNativeReserveUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3PtrFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process)
{
    PRTR0MEMOBJLNX      pMemLnx;
    void               *pv;
    struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
    if (!pTask)
        return VERR_NOT_SUPPORTED;

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Let rtR0MemObjLinuxDoMmap do the difficult bits.
     */
    pv = rtR0MemObjLinuxDoMmap(R3PtrFixed, cb, uAlignment, pTask, RTMEM_PROT_NONE);
    if (pv == (void *)-1)
        return VERR_NO_MEMORY;

    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
    if (!pMemLnx)
    {
        MY_DO_MUNMAP(pTask->mm, (unsigned long)pv, cb);
        return VERR_NO_MEMORY;
    }

    pMemLnx->Core.u.ResVirt.R0Process = R0Process;
    *ppMem = &pMemLnx->Core;
    return VINF_SUCCESS;
}
コード例 #7
0
int rtR0MemObjNativeReserveKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    const size_t cPages = cb >> PAGE_SHIFT;
    struct page *pDummyPage;
    struct page **papPages;

    /* check for unsupported stuff. */
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Allocate a dummy page and create a page pointer array for vmap such that
     * the dummy page is mapped all over the reserved area.
     */
    pDummyPage = alloc_page(GFP_HIGHUSER);
    if (!pDummyPage)
        return VERR_NO_MEMORY;
    papPages = RTMemAlloc(sizeof(*papPages) * cPages);
    if (papPages)
    {
        void *pv;
        size_t iPage = cPages;
        while (iPage-- > 0)
            papPages[iPage] = pDummyPage;
# ifdef VM_MAP
        pv = vmap(papPages, cPages, VM_MAP, PAGE_KERNEL_RO);
# else
        pv = vmap(papPages, cPages, VM_ALLOC, PAGE_KERNEL_RO);
# endif
        RTMemFree(papPages);
        if (pv)
        {
            PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_RES_VIRT, pv, cb);
            if (pMemLnx)
            {
                pMemLnx->Core.u.ResVirt.R0Process = NIL_RTR0PROCESS;
                pMemLnx->cPages = 1;
                pMemLnx->apPages[0] = pDummyPage;
                *ppMem = &pMemLnx->Core;
                return VINF_SUCCESS;
            }
            vunmap(pv);
        }
    }
    __free_page(pDummyPage);
    return VERR_NO_MEMORY;

#else   /* < 2.4.22 */
    /*
     * Could probably use ioremap here, but the caller is in a better position than us
     * to select some safe physical memory.
     */
    return VERR_NOT_SUPPORTED;
#endif
}
コード例 #8
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
                                           size_t cb,
                                           RTHCPHYS PhysHighest, size_t uAlignment,
                                           bool fContiguous, int rcNoMem)
{
    uint32_t   cPages = atop(cb);
    vm_paddr_t VmPhysAddrHigh;

    /* create the object. */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       enmType, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb));

    if (PhysHighest != NIL_RTHCPHYS)
        VmPhysAddrHigh = PhysHighest;
    else
        VmPhysAddrHigh = ~(vm_paddr_t)0;

    int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh,
                                              uAlignment, fContiguous, true, rcNoMem);
    if (RT_SUCCESS(rc))
    {
        if (fContiguous)
        {
            Assert(enmType == RTR0MEMOBJTYPE_PHYS);
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_LOCK(pMemFreeBSD->pObject);
#endif
            pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0));
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
#endif
            pMemFreeBSD->Core.u.Phys.fAllocated = true;
        }

        *ppMem = &pMemFreeBSD->Core;
    }
    else
    {
        vm_object_deallocate(pMemFreeBSD->pObject);
        rtR0MemObjDelete(&pMemFreeBSD->Core);
    }

    return rc;
}
コード例 #9
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
DECLHIDDEN(int) rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
{
    AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);

    /* create the object. */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    /* there is no allocation here, it needs to be mapped somewhere first. */
    pMemFreeBSD->Core.u.Phys.fAllocated = false;
    pMemFreeBSD->Core.u.Phys.PhysBase = Phys;
    pMemFreeBSD->Core.u.Phys.uCachePolicy = uCachePolicy;
    *ppMem = &pMemFreeBSD->Core;
    return VINF_SUCCESS;
}
コード例 #10
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       RTR0MEMOBJTYPE_LOW, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false, VERR_NO_LOW_MEMORY);
    if (RT_FAILURE(rc))
    {
        rtR0MemObjDelete(&pMemFreeBSD->Core);
        return rc;
    }

    *ppMem = &pMemFreeBSD->Core;
    return rc;
}
コード例 #11
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       RTR0MEMOBJTYPE_CONT, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true, VERR_NO_CONT_MEMORY);
    if (RT_FAILURE(rc))
    {
        rtR0MemObjDelete(&pMemFreeBSD->Core);
        return rc;
    }

    pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
    *ppMem = &pMemFreeBSD->Core;
    return rc;
}
コード例 #12
0
int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
{
    AssertReturn(uCachePolicy == RTMEM_CACHE_POLICY_DONT_CARE, VERR_NOT_SUPPORTED);
    LogFlowFunc(("ppMem=%p Phys=%08x cb=%u uCachePolicy=%x\n", ppMem, Phys,(unsigned)cb, uCachePolicy));

    /* Create the object. */
    PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    if (!pMemHaiku)
        return VERR_NO_MEMORY;

    /* There is no allocation here, it needs to be mapped somewhere first. */
    pMemHaiku->AreaId = -1;
    pMemHaiku->Core.u.Phys.fAllocated = false;
    pMemHaiku->Core.u.Phys.PhysBase = Phys;
    pMemHaiku->Core.u.Phys.uCachePolicy = uCachePolicy;
    *ppMem = &pMemHaiku->Core;
    return VINF_SUCCESS;
}
コード例 #13
0
int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t cb, uint32_t uCachePolicy)
{
    /*
     * All we need to do here is to validate that we can use
     * ioremap on the specified address (32/64-bit dma_addr_t).
     */
    PRTR0MEMOBJLNX  pMemLnx;
    dma_addr_t      PhysAddr = Phys;
    AssertMsgReturn(PhysAddr == Phys, ("%#llx\n", (unsigned long long)Phys), VERR_ADDRESS_TOO_BIG);

    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;

    pMemLnx->Core.u.Phys.PhysBase = PhysAddr;
    pMemLnx->Core.u.Phys.fAllocated = false;
    pMemLnx->Core.u.Phys.uCachePolicy = uCachePolicy;
    Assert(!pMemLnx->cPages);
    *ppMem = &pMemLnx->Core;
    return VINF_SUCCESS;
}
コード例 #14
0
ファイル: memobj-r0drv-nt.cpp プロジェクト: jeppeter/vbox
DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */

    /*
     * Try allocate the memory and create an MDL for them so
     * we can query the physical addresses and do mappings later
     * without running into out-of-memory conditions and similar problems.
     */
    int rc = VERR_NO_PAGE_MEMORY;
    void *pv = ExAllocatePoolWithTag(NonPagedPool, cb, IPRT_NT_POOL_TAG);
    if (pv)
    {
        PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
        if (pMdl)
        {
            MmBuildMdlForNonPagedPool(pMdl);
#ifdef RT_ARCH_AMD64
            MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
#endif

            /*
             * Create the IPRT memory object.
             */
            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
            if (pMemNt)
            {
                pMemNt->cMdls = 1;
                pMemNt->apMdls[0] = pMdl;
                *ppMem = &pMemNt->Core;
                return VINF_SUCCESS;
            }

            rc = VERR_NO_MEMORY;
            IoFreeMdl(pMdl);
        }
        ExFreePool(pv);
    }
    return rc;
}
コード例 #15
0
DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    NOREF(fExecutable);

    /* create the object. */
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* do the allocation. */
    ULONG ulPhys = ~0UL;
    int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
    if (!rc)
    {
        Assert(ulPhys != ~0UL);
        pMemOs2->Core.u.Cont.Phys = ulPhys;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
コード例 #16
0
DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
    /* create the object. */
    const ULONG cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* lock it. */
    ULONG cPagesRet = cPages;
    int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
                        pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
    if (!rc)
    {
        rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
        pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
コード例 #17
0
/**
 * Worker for the two virtual address space reservers.
 *
 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
 */
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment,
                                        RTR0PROCESS R0Process)
{
    int rc;
    team_id TeamId = B_SYSTEM_TEAM;

    LogFlowFunc(("ppMem=%p pvFixed=%p cb=%u uAlignment=%u R0Process=%d\n", ppMem, pvFixed, (unsigned)cb, uAlignment, R0Process));

    if (R0Process != NIL_RTR0PROCESS)
        team = (team_id)R0Process;

    /* Check that the specified alignment is supported. */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /* Create the object. */
    PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
    if (!pMemHaiku)
        return VERR_NO_MEMORY;

    /* Ask the kernel to reserve the address range. */
    //XXX: vm_reserve_address_range ?
    return VERR_NOT_SUPPORTED;
}
コード例 #18
0
DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process)
{
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
    AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    int rc;
    void *pvR0;
    void *pvR3 = NULL;
    PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
    switch (pMemToMapOs2->Core.enmType)
    {
        /*
         * These has kernel mappings.
         */
        case RTR0MEMOBJTYPE_PAGE:
        case RTR0MEMOBJTYPE_LOW:
        case RTR0MEMOBJTYPE_CONT:
            pvR0 = pMemToMapOs2->Core.pv;
            break;

        case RTR0MEMOBJTYPE_PHYS:
            pvR0 = pMemToMapOs2->Core.pv;
#if 0/* this is wrong. */
            if (!pvR0)
            {
                /* no ring-0 mapping, so allocate a mapping in the process. */
                AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
                Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
                ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
                rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS | VMDHA_PROCESS, &pvR3, (PPVOID)&ulPhys, NULL);
                if (rc)
                    return RTErrConvertFromOS2(rc);
            }
            break;
#endif
            return VERR_NOT_SUPPORTED;

        case RTR0MEMOBJTYPE_PHYS_NC:
            AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
            return VERR_INTERNAL_ERROR_5;
            break;

        case RTR0MEMOBJTYPE_LOCK:
            if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
                return VERR_NOT_SUPPORTED; /** @todo implement this... */
            pvR0 = pMemToMapOs2->Core.pv;
            break;

        case RTR0MEMOBJTYPE_RES_VIRT:
        case RTR0MEMOBJTYPE_MAPPING:
        default:
            AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
            return VERR_INTERNAL_ERROR;
    }

    /*
     * Map the ring-0 memory into the current process.
     */
    if (!pvR3)
    {
        Assert(pvR0);
        ULONG flFlags = 0;
        if (uAlignment == PAGE_SIZE)
            flFlags |= VMDHGP_4MB;
        if (fProt & RTMEM_PROT_WRITE)
            flFlags |= VMDHGP_WRITE;
        rc = RTR0Os2DHVMGlobalToProcess(flFlags, pvR0, pMemToMapOs2->Core.cb, &pvR3);
        if (rc)
            return RTErrConvertFromOS2(rc);
    }
    Assert(pvR3);

    /*
     * Create a mapping object for it.
     */
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR3, pMemToMapOs2->Core.cb);
    if (pMemOs2)
    {
        Assert(pMemOs2->Core.pv == pvR3);
        pMemOs2->Core.u.Mapping.R0Process = R0Process;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    KernVMFree(pvR3);
    return VERR_NO_MEMORY;
}
コード例 #19
0
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                              unsigned fProt, size_t offSub, size_t cbSub)
{
    PRTR0MEMOBJHAIKU pMemToMapHaiku = (PRTR0MEMOBJHAIKU)pMemToMap;
    PRTR0MEMOBJHAIKU pMemHaiku;
    area_id area = -1;
    void *pvMap = pvFixed;
    uint32 uAddrSpec = B_EXACT_ADDRESS;
    uint32 fProtect = 0;
    int rc = VERR_MAP_FAILED;
    AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
#if 0
    /** @todo r=ramshankar: Wrong format specifiers, fix later! */
    dprintf("%s(%p, %p, %p, %d, %x, %u, %u)\n", __FUNCTION__, ppMem, pMemToMap, pvFixed, uAlignment,
            fProt, offSub, cbSub);
#endif
    /* Check that the specified alignment is supported. */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /* We can't map anything to the first page, sorry. */
    if (pvFixed == 0)
        return VERR_NOT_SUPPORTED;

    if (fProt & RTMEM_PROT_READ)
        fProtect |= B_KERNEL_READ_AREA;
    if (fProt & RTMEM_PROT_WRITE)
        fProtect |= B_KERNEL_WRITE_AREA;

    /*
     * Either the object we map has an area associated with, which we can clone,
     * or it's a physical address range which we must map.
     */
    if (pMemToMapHaiku->AreaId > -1)
    {
        if (pvFixed == (void *)-1)
            uAddrSpec = B_ANY_KERNEL_ADDRESS;

        rc = area = clone_area("IPRT R0MemObj MapKernel", &pvMap, uAddrSpec, fProtect, pMemToMapHaiku->AreaId);
        LogFlow(("rtR0MemObjNativeMapKernel: clone_area uAddrSpec=%d fProtect=%x AreaId=%d rc=%d\n", uAddrSpec, fProtect,
                 pMemToMapHaiku->AreaId, rc));
    }
    else if (pMemToMapHaiku->Core.enmType == RTR0MEMOBJTYPE_PHYS)
    {
        /* map_physical_memory() won't let you choose where. */
        if (pvFixed != (void *)-1)
            return VERR_NOT_SUPPORTED;
        uAddrSpec = B_ANY_KERNEL_ADDRESS;

        rc = area = map_physical_memory("IPRT R0MemObj MapKernelPhys", (phys_addr_t)pMemToMapHaiku->Core.u.Phys.PhysBase,
                                        pMemToMapHaiku->Core.cb, uAddrSpec, fProtect, &pvMap);
    }
    else
        return VERR_NOT_SUPPORTED;

    if (rc >= B_OK)
    {
        /* Create the object. */
        pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU), RTR0MEMOBJTYPE_MAPPING, pvMap,
                    pMemToMapHaiku->Core.cb);
        if (RT_UNLIKELY(!pMemHaiku))
            return VERR_NO_MEMORY;

        pMemHaiku->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
        pMemHaiku->Core.pv = pvMap;
        pMemHaiku->AreaId = area;
        *ppMem = &pMemHaiku->Core;
        return VINF_SUCCESS;
    }
    rc = VERR_MAP_FAILED;

    /** @todo finish the implementation. */

    rtR0MemObjDelete(&pMemHaiku->Core);
    return rc;
}
コード例 #20
0
int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
    void           *pvLast = (uint8_t *)pv + cb - 1;
    size_t const    cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJLNX  pMemLnx;
    bool            fLinearMapping;
    int             rc;
    uint8_t        *pbPage;
    size_t          iPage;
    NOREF(fAccess);

    /*
     * Classify the memory and check that we can deal with it.
     */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    fLinearMapping = virt_addr_valid(pvLast)          && virt_addr_valid(pv);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
    fLinearMapping = VALID_PAGE(virt_to_page(pvLast)) && VALID_PAGE(virt_to_page(pv));
#else
# error "not supported"
#endif
    if (!fLinearMapping)
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
        if (   !RTR0MemKernelIsValidAddr(pv)
            || !RTR0MemKernelIsValidAddr(pv + cb))
#endif
            return VERR_INVALID_PARAMETER;
    }

    /*
     * Allocate the memory object.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;

    /*
     * Gather the pages.
     * We ASSUME all kernel pages are non-swappable.
     */
    rc     = VINF_SUCCESS;
    pbPage = (uint8_t *)pvLast;
    iPage  = cPages;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
    if (!fLinearMapping)
    {
        while (iPage-- > 0)
        {
            struct page *pPage = vmalloc_to_page(pbPage);
            if (RT_UNLIKELY(!pPage))
            {
                rc = VERR_LOCK_FAILED;
                break;
            }
            pMemLnx->apPages[iPage] = pPage;
            pbPage -= PAGE_SIZE;
        }
    }
    else
#endif
    {
        while (iPage-- > 0)
        {
            pMemLnx->apPages[iPage] = virt_to_page(pbPage);
            pbPage -= PAGE_SIZE;
        }
    }
    if (RT_SUCCESS(rc))
    {
        /*
         * Complete the memory object and return.
         */
        pMemLnx->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
        pMemLnx->cPages = cPages;
        Assert(!pMemLnx->fMappedToRing0);
        *ppMem = &pMemLnx->Core;

        return VINF_SUCCESS;
    }

    rtR0MemObjDelete(&pMemLnx->Core);
    return rc;
}
コード例 #21
0
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
    const int cPages = cb >> PAGE_SHIFT;
    struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
    struct vm_area_struct **papVMAs;
    PRTR0MEMOBJLNX pMemLnx;
    int rc = VERR_NO_MEMORY;
    NOREF(fAccess);

    /*
     * Check for valid task and size overflows.
     */
    if (!pTask)
        return VERR_NOT_SUPPORTED;
    if (((size_t)cPages << PAGE_SHIFT) != cb)
        return VERR_OUT_OF_RANGE;

    /*
     * Allocate the memory object and a temporary buffer for the VMAs.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;

    papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
    if (papVMAs)
    {
        down_read(&pTask->mm->mmap_sem);

        /*
         * Get user pages.
         */
        rc = get_user_pages(pTask,                  /* Task for fault acounting. */
                            pTask->mm,              /* Whose pages. */
                            R3Ptr,                  /* Where from. */
                            cPages,                 /* How many pages. */
                            1,                      /* Write to memory. */
                            0,                      /* force. */
                            &pMemLnx->apPages[0],   /* Page array. */
                            papVMAs);               /* vmas */
        if (rc == cPages)
        {
            /*
             * Flush dcache (required?), protect against fork and _really_ pin the page
             * table entries. get_user_pages() will protect against swapping out the
             * pages but it will NOT protect against removing page table entries. This
             * can be achieved with
             *   - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires
             *     an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...).
             *     Usual Linux distributions support only a limited size of locked pages
             *     (e.g. 32KB).
             *   - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages()
             *     or by
             *   - setting the VM_LOCKED flag. This is the same as doing mlock() without
             *     a range check.
             */
            /** @todo The Linux fork() protection will require more work if this API
             * is to be used for anything but locking VM pages. */
            while (rc-- > 0)
            {
                flush_dcache_page(pMemLnx->apPages[rc]);
                papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED);
            }

            up_read(&pTask->mm->mmap_sem);

            RTMemFree(papVMAs);

            pMemLnx->Core.u.Lock.R0Process = R0Process;
            pMemLnx->cPages = cPages;
            Assert(!pMemLnx->fMappedToRing0);
            *ppMem = &pMemLnx->Core;

            return VINF_SUCCESS;
        }

        /*
         * Failed - we need to unlock any pages that we succeeded to lock.
         */
        while (rc-- > 0)
        {
            if (!PageReserved(pMemLnx->apPages[rc]))
                SetPageDirty(pMemLnx->apPages[rc]);
            page_cache_release(pMemLnx->apPages[rc]);
        }

        up_read(&pTask->mm->mmap_sem);

        RTMemFree(papVMAs);
        rc = VERR_LOCK_FAILED;
    }

    rtR0MemObjDelete(&pMemLnx->Core);
    return rc;
}
コード例 #22
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                                          unsigned fProt, size_t offSub, size_t cbSub)
{
//  AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    int                rc;
    PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;

    /* calc protection */
    vm_prot_t       ProtectionFlags = 0;
    if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
        ProtectionFlags = VM_PROT_NONE;
    if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
        ProtectionFlags |= VM_PROT_READ;
    if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
        ProtectionFlags |= VM_PROT_WRITE;
    if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
        ProtectionFlags |= VM_PROT_EXECUTE;

    vm_offset_t  Addr = vm_map_min(kernel_map);
    if (cbSub == 0)
        cbSub = pMemToMap->cb - offSub;

    vm_object_reference(pMemToMapFreeBSD->pObject);
    rc = vm_map_find(kernel_map,            /* Map to insert the object in */
                     pMemToMapFreeBSD->pObject, /* Object to map */
                     offSub,                /* Start offset in the object */
                     &Addr,                 /* Start address IN/OUT */
                     cbSub,                 /* Size of the mapping */
#if __FreeBSD_version >= 1000055
                     0,                     /* Upper bound of mapping */
#endif
                     VMFS_ANY_SPACE,        /* Whether a suitable address should be searched for first */
                     ProtectionFlags,       /* protection flags */
                     VM_PROT_ALL,           /* Maximum protection flags */
                     0);                    /* copy-on-write and similar flags */

    if (rc == KERN_SUCCESS)
    {
        rc = vm_map_wire(kernel_map, Addr, Addr + cbSub, VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
        AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));

        PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
                                                                           RTR0MEMOBJTYPE_MAPPING,
                                                                           (void *)Addr,
                                                                           cbSub);
        if (pMemFreeBSD)
        {
            Assert((vm_offset_t)pMemFreeBSD->Core.pv == Addr);
            pMemFreeBSD->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
            *ppMem = &pMemFreeBSD->Core;
            return VINF_SUCCESS;
        }
        rc = vm_map_remove(kernel_map, Addr, Addr + cbSub);
        AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
    }
    else
        vm_object_deallocate(pMemToMapFreeBSD->pObject);

    return VERR_NO_MEMORY;
}
コード例 #23
0
DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                                          unsigned fProt, size_t offSub, size_t cbSub)
{
    AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;


/** @todo finish the implementation. */

    int rc;
    void *pvR0 = NULL;
    PRTR0MEMOBJOS2 pMemToMapOs2 = (PRTR0MEMOBJOS2)pMemToMap;
    switch (pMemToMapOs2->Core.enmType)
    {
        /*
         * These has kernel mappings.
         */
        case RTR0MEMOBJTYPE_PAGE:
        case RTR0MEMOBJTYPE_LOW:
        case RTR0MEMOBJTYPE_CONT:
            pvR0 = pMemToMapOs2->Core.pv;
            break;

        case RTR0MEMOBJTYPE_PHYS:
            pvR0 = pMemToMapOs2->Core.pv;
            if (!pvR0)
            {
                /* no ring-0 mapping, so allocate a mapping in the process. */
                AssertMsgReturn(fProt & RTMEM_PROT_WRITE, ("%#x\n", fProt), VERR_NOT_SUPPORTED);
                Assert(!pMemToMapOs2->Core.u.Phys.fAllocated);
                ULONG ulPhys = pMemToMapOs2->Core.u.Phys.PhysBase;
                rc = KernVMAlloc(pMemToMapOs2->Core.cb, VMDHA_PHYS, &pvR0, (PPVOID)&ulPhys, NULL);
                if (rc)
                    return RTErrConvertFromOS2(rc);
                pMemToMapOs2->Core.pv = pvR0;
            }
            break;

        case RTR0MEMOBJTYPE_PHYS_NC:
            AssertMsgFailed(("RTR0MEMOBJTYPE_PHYS_NC\n"));
            return VERR_INTERNAL_ERROR_3;
            break;

        case RTR0MEMOBJTYPE_LOCK:
            if (pMemToMapOs2->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
                return VERR_NOT_SUPPORTED; /** @todo implement this... */
            pvR0 = pMemToMapOs2->Core.pv;
            break;

        case RTR0MEMOBJTYPE_RES_VIRT:
        case RTR0MEMOBJTYPE_MAPPING:
        default:
            AssertMsgFailed(("enmType=%d\n", pMemToMapOs2->Core.enmType));
            return VERR_INTERNAL_ERROR;
    }

    /*
     * Create a dummy mapping object for it.
     *
     * All mappings are read/write/execute in OS/2 and there isn't
     * any cache options, so sharing is ok. And the main memory object
     * isn't actually freed until all the mappings have been freed up
     * (reference counting).
     */
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_MAPPING, pvR0, pMemToMapOs2->Core.cb);
    if (pMemOs2)
    {
        pMemOs2->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    return VERR_NO_MEMORY;
}
コード例 #24
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
/**
 * Worker for the two virtual address space reservers.
 *
 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
 */
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
{
    int rc;

    /*
     * The pvFixed address range must be within the VM space when specified.
     */
    if (   pvFixed != (void *)-1
        && (    (vm_offset_t)pvFixed      < vm_map_min(pMap)
            ||  (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
        return VERR_INVALID_PARAMETER;

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Create the object.
     */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    vm_offset_t MapAddress = pvFixed != (void *)-1
                           ? (vm_offset_t)pvFixed
                           : vm_map_min(pMap);
    if (pvFixed != (void *)-1)
        vm_map_remove(pMap,
                      MapAddress,
                      MapAddress + cb);

    rc = vm_map_find(pMap,                          /* map */
                     NULL,                          /* object */
                     0,                             /* offset */
                     &MapAddress,                   /* addr (IN/OUT) */
                     cb,                            /* length */
#if __FreeBSD_version >= 1000055
                     0,                             /* max addr */
#endif
                     pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
                                                    /* find_space */
                     VM_PROT_NONE,                  /* protection */
                     VM_PROT_ALL,                   /* max(_prot) ?? */
                     0);                            /* cow (copy-on-write) */
    if (rc == KERN_SUCCESS)
    {
        if (R0Process != NIL_RTR0PROCESS)
        {
            rc = vm_map_inherit(pMap,
                                MapAddress,
                                MapAddress + cb,
                                VM_INHERIT_SHARE);
            AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
        }
        pMemFreeBSD->Core.pv = (void *)MapAddress;
        pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
        *ppMem = &pMemFreeBSD->Core;
        return VINF_SUCCESS;
    }

    rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return rc;

}
コード例 #25
0
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                              unsigned fProt, size_t offSub, size_t cbSub)
{
    int rc = VERR_NO_MEMORY;
    PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
    PRTR0MEMOBJLNX pMemLnx;

    /* Fail if requested to do something we can't. */
    AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Create the IPRT memory object.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
    if (pMemLnx)
    {
        if (pMemLnxToMap->cPages)
        {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
            /*
             * Use vmap - 2.4.22 and later.
             */
            pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
# ifdef VM_MAP
            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
# else
            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
# endif
            if (pMemLnx->Core.pv)
            {
                pMemLnx->fMappedToRing0 = true;
                rc = VINF_SUCCESS;
            }
            else
                rc = VERR_MAP_FAILED;

#else   /* < 2.4.22 */
            /*
             * Only option here is to share mappings if possible and forget about fProt.
             */
            if (rtR0MemObjIsRing3(pMemToMap))
                rc = VERR_NOT_SUPPORTED;
            else
            {
                rc = VINF_SUCCESS;
                if (!pMemLnxToMap->Core.pv)
                    rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
                if (RT_SUCCESS(rc))
                {
                    Assert(pMemLnxToMap->Core.pv);
                    pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
                }
            }
#endif
        }
        else
        {
            /*
             * MMIO / physical memory.
             */
            Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
            pMemLnx->Core.pv = pMemLnxToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO
                             ? ioremap_nocache(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb)
                             : ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
            if (pMemLnx->Core.pv)
            {
                /** @todo fix protection. */
                rc = VINF_SUCCESS;
            }
        }
        if (RT_SUCCESS(rc))
        {
            pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
            *ppMem = &pMemLnx->Core;
            return VINF_SUCCESS;
        }
        rtR0MemObjDelete(&pMemLnx->Core);
    }

    return rc;
}
コード例 #26
0
ファイル: memobj-r0drv-freebsd.c プロジェクト: mcenirm/vbox
DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
                                        unsigned fProt, RTR0PROCESS R0Process)
{
    /*
     * Check for unsupported stuff.
     */
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    int                rc;
    PRTR0MEMOBJFREEBSD pMemToMapFreeBSD = (PRTR0MEMOBJFREEBSD)pMemToMap;
    struct proc       *pProc            = (struct proc *)R0Process;
    struct vm_map     *pProcMap         = &pProc->p_vmspace->vm_map;

    /* calc protection */
    vm_prot_t       ProtectionFlags = 0;
    if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
        ProtectionFlags = VM_PROT_NONE;
    if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
        ProtectionFlags |= VM_PROT_READ;
    if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
        ProtectionFlags |= VM_PROT_WRITE;
    if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
        ProtectionFlags |= VM_PROT_EXECUTE;

    /* calc mapping address */
    vm_offset_t AddrR3;
    if (R3PtrFixed == (RTR3PTR)-1)
    {
        /** @todo: is this needed?. */
        PROC_LOCK(pProc);
        AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
        PROC_UNLOCK(pProc);
    }
    else
        AddrR3 = (vm_offset_t)R3PtrFixed;

    /* Insert the pObject in the map. */
    vm_object_reference(pMemToMapFreeBSD->pObject);
    rc = vm_map_find(pProcMap,              /* Map to insert the object in */
                     pMemToMapFreeBSD->pObject, /* Object to map */
                     0,                     /* Start offset in the object */
                     &AddrR3,               /* Start address IN/OUT */
                     pMemToMap->cb,         /* Size of the mapping */
#if __FreeBSD_version >= 1000055
                     0,                     /* Upper bound of the mapping */
#endif
                     R3PtrFixed == (RTR3PTR)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
                                            /* Whether a suitable address should be searched for first */
                     ProtectionFlags,       /* protection flags */
                     VM_PROT_ALL,           /* Maximum protection flags */
                     0);                    /* copy-on-write and similar flags */

    if (rc == KERN_SUCCESS)
    {
        rc = vm_map_wire(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES);
        AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));

        rc = vm_map_inherit(pProcMap, AddrR3, AddrR3 + pMemToMap->cb, VM_INHERIT_SHARE);
        AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));

        /*
         * Create a mapping object for it.
         */
        PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(RTR0MEMOBJFREEBSD),
                                                                           RTR0MEMOBJTYPE_MAPPING,
                                                                           (void *)AddrR3,
                                                                           pMemToMap->cb);
        if (pMemFreeBSD)
        {
            Assert((vm_offset_t)pMemFreeBSD->Core.pv == AddrR3);
            pMemFreeBSD->Core.u.Mapping.R0Process = R0Process;
            *ppMem = &pMemFreeBSD->Core;
            return VINF_SUCCESS;
        }

        rc = vm_map_remove(pProcMap, AddrR3, AddrR3 + pMemToMap->cb);
        AssertMsg(rc == KERN_SUCCESS, ("Deleting mapping failed\n"));
    }
    else
        vm_object_deallocate(pMemToMapFreeBSD->pObject);

    return VERR_NO_MEMORY;
}
コード例 #27
0
/**
 * Internal worker that allocates physical pages and creates the memory object for them.
 *
 * @returns IPRT status code.
 * @param   ppMemLnx    Where to store the memory object pointer.
 * @param   enmType     The object type.
 * @param   cb          The number of bytes to allocate.
 * @param   uAlignment  The alignment of the phyiscal memory.
 *                      Only valid if fContiguous == true, ignored otherwise.
 * @param   fFlagsLnx   The page allocation flags (GPFs).
 * @param   fContiguous Whether the allocation must be contiguous.
 */
static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb,
                                     size_t uAlignment, unsigned fFlagsLnx, bool fContiguous)
{
    size_t          iPage;
    size_t const    cPages = cb >> PAGE_SHIFT;
    struct page    *paPages;

    /*
     * Allocate a memory object structure that's large enough to contain
     * the page pointer array.
     */
    PRTR0MEMOBJLNX  pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;
    pMemLnx->cPages = cPages;

    /*
     * Allocate the pages.
     * For small allocations we'll try contiguous first and then fall back on page by page.
     */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    if (    fContiguous
        ||  cb <= PAGE_SIZE * 2)
    {
# ifdef VBOX_USE_INSERT_PAGE
        paPages = alloc_pages(fFlagsLnx |  __GFP_COMP, rtR0MemObjLinuxOrder(cPages));
# else
        paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
# endif
        if (paPages)
        {
            fContiguous = true;
            for (iPage = 0; iPage < cPages; iPage++)
                pMemLnx->apPages[iPage] = &paPages[iPage];
        }
        else if (fContiguous)
        {
            rtR0MemObjDelete(&pMemLnx->Core);
            return VERR_NO_MEMORY;
        }
    }

    if (!fContiguous)
    {
        for (iPage = 0; iPage < cPages; iPage++)
        {
            pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx);
            if (RT_UNLIKELY(!pMemLnx->apPages[iPage]))
            {
                while (iPage-- > 0)
                    __free_page(pMemLnx->apPages[iPage]);
                rtR0MemObjDelete(&pMemLnx->Core);
                return VERR_NO_MEMORY;
            }
        }
    }

#else /* < 2.4.22 */
    /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
    paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
    if (!paPages)
    {
        rtR0MemObjDelete(&pMemLnx->Core);
        return VERR_NO_MEMORY;
    }
    for (iPage = 0; iPage < cPages; iPage++)
    {
        pMemLnx->apPages[iPage] = &paPages[iPage];
        MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1);
        if (PageHighMem(pMemLnx->apPages[iPage]))
            BUG();
    }

    fContiguous = true;
#endif /* < 2.4.22 */
    pMemLnx->fContiguous = fContiguous;

    /*
     * Reserve the pages.
     */
    for (iPage = 0; iPage < cPages; iPage++)
        SetPageReserved(pMemLnx->apPages[iPage]);

    /*
     * Note that the physical address of memory allocated with alloc_pages(flags, order)
     * is always 2^(PAGE_SHIFT+order)-aligned.
     */
    if (   fContiguous
        && uAlignment > PAGE_SIZE)
    {
        /*
         * Check for alignment constraints. The physical address of memory allocated with
         * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned.
         */
        if (RT_UNLIKELY(page_to_phys(pMemLnx->apPages[0]) & ~(uAlignment - 1)))
        {
            /*
             * This should never happen!
             */
            printk("rtR0MemObjLinuxAllocPages(cb=%ld, uAlignment=%ld): alloc_pages(..., %d) returned physical memory at %lu!\n",
                    (unsigned long)cb, (unsigned long)uAlignment, rtR0MemObjLinuxOrder(cPages), (unsigned long)page_to_phys(pMemLnx->apPages[0]));
            rtR0MemObjLinuxFreePages(pMemLnx);
            return VERR_NO_MEMORY;
        }
    }

    *ppMemLnx = pMemLnx;
    return VINF_SUCCESS;
}
コード例 #28
0
ファイル: memobj-r0drv-nt.cpp プロジェクト: jeppeter/vbox
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */

    /*
     * Try see if we get lucky first...
     * (We could probably just assume we're lucky on NT4.)
     */
    int rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
    if (RT_SUCCESS(rc))
    {
        size_t iPage = cb >> PAGE_SHIFT;
        while (iPage-- > 0)
            if (rtR0MemObjNativeGetPagePhysAddr(*ppMem, iPage) >= _4G)
            {
                rc = VERR_NO_LOW_MEMORY;
                break;
            }
        if (RT_SUCCESS(rc))
            return rc;

        /* The following ASSUMES that rtR0MemObjNativeAllocPage returns a completed object. */
        RTR0MemObjFree(*ppMem, false);
        *ppMem = NULL;
    }

#ifndef IPRT_TARGET_NT4
    /*
     * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
     */
    PHYSICAL_ADDRESS Zero;
    Zero.QuadPart = 0;
    PHYSICAL_ADDRESS HighAddr;
    HighAddr.QuadPart = _4G - 1;
    PMDL pMdl = MmAllocatePagesForMdl(Zero, HighAddr, Zero, cb);
    if (pMdl)
    {
        if (MmGetMdlByteCount(pMdl) >= cb)
        {
            __try
            {
                void *pv = MmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
                                                        FALSE /* no bug check on failure */, NormalPagePriority);
                if (pv)
                {
                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
                    if (pMemNt)
                    {
                        pMemNt->fAllocatedPagesForMdl = true;
                        pMemNt->cMdls = 1;
                        pMemNt->apMdls[0] = pMdl;
                        *ppMem = &pMemNt->Core;
                        return VINF_SUCCESS;
                    }
                    MmUnmapLockedPages(pv, pMdl);
                }
            }
            __except(EXCEPTION_EXECUTE_HANDLER)
            {
                NTSTATUS rcNt = GetExceptionCode();
                Log(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
                /* nothing */
            }
        }
        MmFreePagesFromMdl(pMdl);
        ExFreePool(pMdl);
    }
コード例 #29
0
static int rtR0MemObjNativeAllocArea(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
                                     bool fExecutable, RTR0MEMOBJTYPE type, RTHCPHYS PhysHighest, size_t uAlignment)
{
    NOREF(fExecutable);

    int rc;
    void *pvMap         = NULL;
    const char *pszName = NULL;
    uint32 addressSpec  = B_ANY_KERNEL_ADDRESS;
    uint32 fLock        = ~0U;
    LogFlowFunc(("ppMem=%p cb=%u, fExecutable=%s, type=%08x, PhysHighest=%RX64 uAlignment=%u\n", ppMem,(unsigned)cb,
                 fExecutable ? "true" : "false", type, PhysHighest,(unsigned)uAlignment));

    switch (type)
    {
    case RTR0MEMOBJTYPE_PAGE:
        pszName = "IPRT R0MemObj Alloc";
        fLock = B_FULL_LOCK;
        break;
    case RTR0MEMOBJTYPE_LOW:
        pszName = "IPRT R0MemObj AllocLow";
        fLock = B_32_BIT_FULL_LOCK;
        break;
    case RTR0MEMOBJTYPE_CONT:
        pszName = "IPRT R0MemObj AllocCont";
        fLock = B_32_BIT_CONTIGUOUS;
        break;
#if 0
    case RTR0MEMOBJTYPE_MAPPING:
        pszName = "IPRT R0MemObj Mapping";
        fLock = B_FULL_LOCK;
        break;
#endif
    case RTR0MEMOBJTYPE_PHYS:
        /** @todo alignment  */
        if (uAlignment != PAGE_SIZE)
            return VERR_NOT_SUPPORTED;
    /** @todo r=ramshankar: no 'break' here?? */
    case RTR0MEMOBJTYPE_PHYS_NC:
        pszName = "IPRT R0MemObj AllocPhys";
        fLock   = (PhysHighest < _4G ? B_LOMEM : B_32_BIT_CONTIGUOUS);
        break;
#if 0
    case RTR0MEMOBJTYPE_LOCK:
        break;
#endif
    default:
        return VERR_INTERNAL_ERROR;
    }

    /* Create the object. */
    PRTR0MEMOBJHAIKU pMemHaiku;
    pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU), type, NULL, cb);
    if (RT_UNLIKELY(!pMemHaiku))
        return VERR_NO_MEMORY;

    rc = pMemHaiku->AreaId = create_area(pszName, &pvMap, addressSpec, cb, fLock, B_READ_AREA | B_WRITE_AREA);
    if (pMemHaiku->AreaId >= 0)
    {
        physical_entry physMap[2];
        pMemHaiku->Core.pv = pvMap;   /* store start address */
        switch (type)
        {
        case RTR0MEMOBJTYPE_CONT:
            rc = get_memory_map(pvMap, cb, physMap, 2);
            if (rc == B_OK)
                pMemHaiku->Core.u.Cont.Phys = physMap[0].address;
            break;

        case RTR0MEMOBJTYPE_PHYS:
        case RTR0MEMOBJTYPE_PHYS_NC:
            rc = get_memory_map(pvMap, cb, physMap, 2);
            if (rc == B_OK)
            {
                pMemHaiku->Core.u.Phys.PhysBase = physMap[0].address;
                pMemHaiku->Core.u.Phys.fAllocated = true;
            }
            break;

        default:
            break;
        }
        if (rc >= B_OK)
        {
            *ppMem = &pMemHaiku->Core;
            return VINF_SUCCESS;
        }

        delete_area(pMemHaiku->AreaId);
    }

    rtR0MemObjDelete(&pMemHaiku->Core);
    return RTErrConvertFromHaikuKernReturn(rc);
}