int rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJLNX pMemLnx;
    int rc;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_HIGHUSER, false /* non-contiguous */);
#else
    rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_PAGE, cb, PAGE_SIZE, GFP_USER, false /* non-contiguous */);
#endif
    if (RT_SUCCESS(rc))
    {
        rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
        if (RT_SUCCESS(rc))
        {
            *ppMem = &pMemLnx->Core;
            return rc;
        }

        rtR0MemObjLinuxFreePages(pMemLnx);
        rtR0MemObjDelete(&pMemLnx->Core);
    }

    return rc;
}
int rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJLNX pMemLnx;
    int rc;

    /* Try to avoid GFP_DMA. GFM_DMA32 was introduced with Linux 2.6.15. */
#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
    /* ZONE_DMA32: 0-4GB */
    rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA32, false /* non-contiguous */);
    if (RT_FAILURE(rc))
#endif
#ifdef RT_ARCH_AMD64
        /* ZONE_DMA: 0-16MB */
        rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_DMA, false /* non-contiguous */);
#else
# ifdef CONFIG_X86_PAE
# endif
        /* ZONE_NORMAL: 0-896MB */
        rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_LOW, cb, PAGE_SIZE, GFP_USER, false /* non-contiguous */);
#endif
    if (RT_SUCCESS(rc))
    {
        rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
        if (RT_SUCCESS(rc))
        {
            *ppMem = &pMemLnx->Core;
            return rc;
        }

        rtR0MemObjLinuxFreePages(pMemLnx);
        rtR0MemObjDelete(&pMemLnx->Core);
    }

    return rc;
}
Esempio n. 3
0
DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess,
                                         RTR0PROCESS R0Process)
{
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);

    /* create the object. */
    const ULONG cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* lock it. */
    ULONG cPagesRet = cPages;
    int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
                        (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
    if (!rc)
    {
        rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
        Assert(cb == pMemOs2->Core.cb);
        Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv);
        pMemOs2->Core.u.Lock.R0Process = R0Process;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
Esempio n. 4
0
DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
{
    AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED);

    /** @todo alignment  */
    if (uAlignment != PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /* create the object. */
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* do the allocation. */
    ULONG ulPhys = ~0UL;
    int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
    if (!rc)
    {
        Assert(ulPhys != ~0UL);
        pMemOs2->Core.u.Phys.fAllocated = true;
        pMemOs2->Core.u.Phys.PhysBase = ulPhys;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
Esempio n. 5
0
/**
 * Worker locking the memory in either kernel or user maps.
 */
static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, vm_map_t pVmMap,
                                     vm_offset_t AddrStart, size_t cb, uint32_t fAccess,
                                     RTR0PROCESS R0Process, int fFlags)
{
    int rc;
    NOREF(fAccess);

    /* create the object. */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)AddrStart, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    /*
     * We could've used vslock here, but we don't wish to be subject to
     * resource usage restrictions, so we'll call vm_map_wire directly.
     */
    rc = vm_map_wire(pVmMap,                                         /* the map */
                     AddrStart,                                      /* start */
                     AddrStart + cb,                                 /* end */
                     fFlags);                                        /* flags */
    if (rc == KERN_SUCCESS)
    {
        pMemFreeBSD->Core.u.Lock.R0Process = R0Process;
        *ppMem = &pMemFreeBSD->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return VERR_NO_MEMORY;/** @todo fix mach -> vbox error conversion for freebsd. */
}
Esempio n. 6
0
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    NOREF(fExecutable);

    /* create the object. */
    const ULONG cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOW, NULL, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* do the allocation. */
    int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL);
    if (!rc)
    {
        ULONG cPagesRet = cPages;
        rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet);
        if (!rc)
        {
            rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
            *ppMem = &pMemOs2->Core;
            return VINF_SUCCESS;
        }
        KernVMFree(pMemOs2->Core.pv);
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    rc = RTErrConvertFromOS2(rc);
    return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc;
}
Esempio n. 7
0
/**
 * Worker locking the memory in either kernel or user maps.
 *
 * @returns IPRT status code.
 * @param   ppMem        Where to store the allocated memory object.
 * @param   pvStart      The starting address.
 * @param   cb           The size of the block.
 * @param   fAccess      The mapping protection to apply.
 * @param   R0Process    The process to map the memory to (use NIL_RTR0PROCESS
 *                       for the kernel)
 * @param   fFlags       Memory flags (B_READ_DEVICE indicates the memory is
 *                       intended to be written from a "device").
 */
static int rtR0MemObjNativeLockInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvStart, size_t cb, uint32_t fAccess,
                                     RTR0PROCESS R0Process, int fFlags)
{
    NOREF(fAccess);
    int rc;
    team_id TeamId = B_SYSTEM_TEAM;

    LogFlowFunc(("ppMem=%p pvStart=%p cb=%u fAccess=%x R0Process=%d fFlags=%x\n", ppMem, pvStart, cb, fAccess, R0Process,
                 fFlags));

    /* Create the object. */
    PRTR0MEMOBJHAIKU pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(*pMemHaiku), RTR0MEMOBJTYPE_LOCK, pvStart, cb);
    if (RT_UNLIKELY(!pMemHaiku))
        return VERR_NO_MEMORY;

    if (R0Process != NIL_RTR0PROCESS)
        TeamId = (team_id)R0Process;
    rc = lock_memory_etc(TeamId, pvStart, cb, fFlags);
    if (rc == B_OK)
    {
        pMemHaiku->AreaId = -1;
        pMemHaiku->Core.u.Lock.R0Process = R0Process;
        *ppMem = &pMemHaiku->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemHaiku->Core);
    return RTErrConvertFromHaikuKernReturn(rc);
}
Esempio n. 8
0
static int rtR0MemObjFreeBSDAllocPhysPages(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
                                           size_t cb,
                                           RTHCPHYS PhysHighest, size_t uAlignment,
                                           bool fContiguous, int rcNoMem)
{
    uint32_t   cPages = atop(cb);
    vm_paddr_t VmPhysAddrHigh;

    /* create the object. */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       enmType, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, atop(cb));

    if (PhysHighest != NIL_RTHCPHYS)
        VmPhysAddrHigh = PhysHighest;
    else
        VmPhysAddrHigh = ~(vm_paddr_t)0;

    int rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages, VmPhysAddrHigh,
                                              uAlignment, fContiguous, true, rcNoMem);
    if (RT_SUCCESS(rc))
    {
        if (fContiguous)
        {
            Assert(enmType == RTR0MEMOBJTYPE_PHYS);
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_LOCK(pMemFreeBSD->pObject);
#endif
            pMemFreeBSD->Core.u.Phys.PhysBase = VM_PAGE_TO_PHYS(vm_page_find_least(pMemFreeBSD->pObject, 0));
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
#endif
            pMemFreeBSD->Core.u.Phys.fAllocated = true;
        }

        *ppMem = &pMemFreeBSD->Core;
    }
    else
    {
        vm_object_deallocate(pMemFreeBSD->pObject);
        rtR0MemObjDelete(&pMemFreeBSD->Core);
    }

    return rc;
}
Esempio n. 9
0
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       RTR0MEMOBJTYPE_LOW, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, false, VERR_NO_LOW_MEMORY);
    if (RT_FAILURE(rc))
    {
        rtR0MemObjDelete(&pMemFreeBSD->Core);
        return rc;
    }

    *ppMem = &pMemFreeBSD->Core;
    return rc;
}
Esempio n. 10
0
static int rtR0MemObjFreeBSDAllocHelper(PRTR0MEMOBJFREEBSD pMemFreeBSD, bool fExecutable,
                                        vm_paddr_t VmPhysAddrHigh, bool fContiguous, int rcNoMem)
{
    vm_offset_t MapAddress = vm_map_min(kernel_map);
    size_t      cPages = atop(pMemFreeBSD->Core.cb);
    int         rc;

    pMemFreeBSD->pObject = vm_object_allocate(OBJT_PHYS, cPages);

    /* No additional object reference for auto-deallocation upon unmapping. */
#if __FreeBSD_version >= 1000055
    rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
                     &MapAddress, pMemFreeBSD->Core.cb, 0, VMFS_ANY_SPACE,
                     fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
#else
    rc = vm_map_find(kernel_map, pMemFreeBSD->pObject, 0,
                     &MapAddress, pMemFreeBSD->Core.cb, VMFS_ANY_SPACE,
                     fExecutable ? VM_PROT_ALL : VM_PROT_RW, VM_PROT_ALL, 0);
#endif

    if (rc == KERN_SUCCESS)
    {
        rc = rtR0MemObjFreeBSDPhysAllocHelper(pMemFreeBSD->pObject, cPages,
                                              VmPhysAddrHigh, PAGE_SIZE, fContiguous,
                                              false, rcNoMem);
        if (RT_SUCCESS(rc))
        {
            vm_map_wire(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb,
                        VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);

            /* Store start address */
            pMemFreeBSD->Core.pv = (void *)MapAddress;
            return VINF_SUCCESS;
        }

        vm_map_remove(kernel_map, MapAddress, MapAddress + pMemFreeBSD->Core.cb);
    }
    else
    {
        rc = rcNoMem; /** @todo fix translation (borrow from darwin) */
        vm_object_deallocate(pMemFreeBSD->pObject);
    }

    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return rc;
}
Esempio n. 11
0
DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD),
                                                                       RTR0MEMOBJTYPE_CONT, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    int rc = rtR0MemObjFreeBSDAllocHelper(pMemFreeBSD, fExecutable, _4G - 1, true, VERR_NO_CONT_MEMORY);
    if (RT_FAILURE(rc))
    {
        rtR0MemObjDelete(&pMemFreeBSD->Core);
        return rc;
    }

    pMemFreeBSD->Core.u.Cont.Phys = vtophys(pMemFreeBSD->Core.pv);
    *ppMem = &pMemFreeBSD->Core;
    return rc;
}
Esempio n. 12
0
DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    NOREF(fExecutable);

    /* create the object. */
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_CONT, NULL, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* do the allocation. */
    ULONG ulPhys = ~0UL;
    int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG, &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL);
    if (!rc)
    {
        Assert(ulPhys != ~0UL);
        pMemOs2->Core.u.Cont.Phys = ulPhys;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
Esempio n. 13
0
DECLHIDDEN(int) rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
    /* create the object. */
    const ULONG cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
    if (!pMemOs2)
        return VERR_NO_MEMORY;

    /* lock it. */
    ULONG cPagesRet = cPages;
    int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
                        pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
    if (!rc)
    {
        rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
        pMemOs2->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
        *ppMem = &pMemOs2->Core;
        return VINF_SUCCESS;
    }
    rtR0MemObjDelete(&pMemOs2->Core);
    return RTErrConvertFromOS2(rc);
}
Esempio n. 14
0
int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    PRTR0MEMOBJLNX pMemLnx;
    int rc;

#if (defined(RT_ARCH_AMD64) || defined(CONFIG_X86_PAE)) && defined(GFP_DMA32)
    /* ZONE_DMA32: 0-4GB */
    rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA32, true /* contiguous */);
    if (RT_FAILURE(rc))
#endif
#ifdef RT_ARCH_AMD64
        /* ZONE_DMA: 0-16MB */
        rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_DMA, true /* contiguous */);
#else
        /* ZONE_NORMAL (32-bit hosts): 0-896MB */
        rc = rtR0MemObjLinuxAllocPages(&pMemLnx, RTR0MEMOBJTYPE_CONT, cb, PAGE_SIZE, GFP_USER, true /* contiguous */);
#endif
    if (RT_SUCCESS(rc))
    {
        rc = rtR0MemObjLinuxVMap(pMemLnx, fExecutable);
        if (RT_SUCCESS(rc))
        {
#if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(CONFIG_HIGHMEM64G))
            size_t iPage = pMemLnx->cPages;
            while (iPage-- > 0)
                Assert(page_to_phys(pMemLnx->apPages[iPage]) < _4G);
#endif
            pMemLnx->Core.u.Cont.Phys = page_to_phys(pMemLnx->apPages[0]);
            *ppMem = &pMemLnx->Core;
            return rc;
        }

        rtR0MemObjLinuxFreePages(pMemLnx);
        rtR0MemObjDelete(&pMemLnx->Core);
    }

    return rc;
}
Esempio n. 15
0
/**
 * Worker for rtR0MemObjLinuxAllocPhysSub that tries one allocation strategy.
 *
 * @returns IPRT status.
 * @param   ppMemLnx    Where to
 * @param   enmType     The object type.
 * @param   cb          The size of the allocation.
 * @param   uAlignment  The alignment of the physical memory.
 *                      Only valid for fContiguous == true, ignored otherwise.
 * @param   PhysHighest See rtR0MemObjNativeAllocPhys.
 * @param   fGfp        The Linux GFP flags to use for the allocation.
 */
static int rtR0MemObjLinuxAllocPhysSub2(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJTYPE enmType,
                                        size_t cb, size_t uAlignment, RTHCPHYS PhysHighest, unsigned fGfp)
{
    PRTR0MEMOBJLNX pMemLnx;
    int rc;

    rc = rtR0MemObjLinuxAllocPages(&pMemLnx, enmType, cb, uAlignment, fGfp,
                                   enmType == RTR0MEMOBJTYPE_PHYS /* contiguous / non-contiguous */);
    if (RT_FAILURE(rc))
        return rc;

    /*
     * Check the addresses if necessary. (Can be optimized a bit for PHYS.)
     */
    if (PhysHighest != NIL_RTHCPHYS)
    {
        size_t iPage = pMemLnx->cPages;
        while (iPage-- > 0)
            if (page_to_phys(pMemLnx->apPages[iPage]) >= PhysHighest)
            {
                rtR0MemObjLinuxFreePages(pMemLnx);
                rtR0MemObjDelete(&pMemLnx->Core);
                return VERR_NO_MEMORY;
            }
    }

    /*
     * Complete the object.
     */
    if (enmType == RTR0MEMOBJTYPE_PHYS)
    {
        pMemLnx->Core.u.Phys.PhysBase = page_to_phys(pMemLnx->apPages[0]);
        pMemLnx->Core.u.Phys.fAllocated = true;
    }
    *ppMem = &pMemLnx->Core;
    return rc;
}
Esempio n. 16
0
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                              unsigned fProt, size_t offSub, size_t cbSub)
{
    PRTR0MEMOBJHAIKU pMemToMapHaiku = (PRTR0MEMOBJHAIKU)pMemToMap;
    PRTR0MEMOBJHAIKU pMemHaiku;
    area_id area = -1;
    void *pvMap = pvFixed;
    uint32 uAddrSpec = B_EXACT_ADDRESS;
    uint32 fProtect = 0;
    int rc = VERR_MAP_FAILED;
    AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
#if 0
    /** @todo r=ramshankar: Wrong format specifiers, fix later! */
    dprintf("%s(%p, %p, %p, %d, %x, %u, %u)\n", __FUNCTION__, ppMem, pMemToMap, pvFixed, uAlignment,
            fProt, offSub, cbSub);
#endif
    /* Check that the specified alignment is supported. */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /* We can't map anything to the first page, sorry. */
    if (pvFixed == 0)
        return VERR_NOT_SUPPORTED;

    if (fProt & RTMEM_PROT_READ)
        fProtect |= B_KERNEL_READ_AREA;
    if (fProt & RTMEM_PROT_WRITE)
        fProtect |= B_KERNEL_WRITE_AREA;

    /*
     * Either the object we map has an area associated with, which we can clone,
     * or it's a physical address range which we must map.
     */
    if (pMemToMapHaiku->AreaId > -1)
    {
        if (pvFixed == (void *)-1)
            uAddrSpec = B_ANY_KERNEL_ADDRESS;

        rc = area = clone_area("IPRT R0MemObj MapKernel", &pvMap, uAddrSpec, fProtect, pMemToMapHaiku->AreaId);
        LogFlow(("rtR0MemObjNativeMapKernel: clone_area uAddrSpec=%d fProtect=%x AreaId=%d rc=%d\n", uAddrSpec, fProtect,
                 pMemToMapHaiku->AreaId, rc));
    }
    else if (pMemToMapHaiku->Core.enmType == RTR0MEMOBJTYPE_PHYS)
    {
        /* map_physical_memory() won't let you choose where. */
        if (pvFixed != (void *)-1)
            return VERR_NOT_SUPPORTED;
        uAddrSpec = B_ANY_KERNEL_ADDRESS;

        rc = area = map_physical_memory("IPRT R0MemObj MapKernelPhys", (phys_addr_t)pMemToMapHaiku->Core.u.Phys.PhysBase,
                                        pMemToMapHaiku->Core.cb, uAddrSpec, fProtect, &pvMap);
    }
    else
        return VERR_NOT_SUPPORTED;

    if (rc >= B_OK)
    {
        /* Create the object. */
        pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU), RTR0MEMOBJTYPE_MAPPING, pvMap,
                    pMemToMapHaiku->Core.cb);
        if (RT_UNLIKELY(!pMemHaiku))
            return VERR_NO_MEMORY;

        pMemHaiku->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
        pMemHaiku->Core.pv = pvMap;
        pMemHaiku->AreaId = area;
        *ppMem = &pMemHaiku->Core;
        return VINF_SUCCESS;
    }
    rc = VERR_MAP_FAILED;

    /** @todo finish the implementation. */

    rtR0MemObjDelete(&pMemHaiku->Core);
    return rc;
}
Esempio n. 17
0
int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
    void           *pvLast = (uint8_t *)pv + cb - 1;
    size_t const    cPages = cb >> PAGE_SHIFT;
    PRTR0MEMOBJLNX  pMemLnx;
    bool            fLinearMapping;
    int             rc;
    uint8_t        *pbPage;
    size_t          iPage;
    NOREF(fAccess);

    /*
     * Classify the memory and check that we can deal with it.
     */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    fLinearMapping = virt_addr_valid(pvLast)          && virt_addr_valid(pv);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
    fLinearMapping = VALID_PAGE(virt_to_page(pvLast)) && VALID_PAGE(virt_to_page(pv));
#else
# error "not supported"
#endif
    if (!fLinearMapping)
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
        if (   !RTR0MemKernelIsValidAddr(pv)
            || !RTR0MemKernelIsValidAddr(pv + cb))
#endif
            return VERR_INVALID_PARAMETER;
    }

    /*
     * Allocate the memory object.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, pv, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;

    /*
     * Gather the pages.
     * We ASSUME all kernel pages are non-swappable.
     */
    rc     = VINF_SUCCESS;
    pbPage = (uint8_t *)pvLast;
    iPage  = cPages;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
    if (!fLinearMapping)
    {
        while (iPage-- > 0)
        {
            struct page *pPage = vmalloc_to_page(pbPage);
            if (RT_UNLIKELY(!pPage))
            {
                rc = VERR_LOCK_FAILED;
                break;
            }
            pMemLnx->apPages[iPage] = pPage;
            pbPage -= PAGE_SIZE;
        }
    }
    else
#endif
    {
        while (iPage-- > 0)
        {
            pMemLnx->apPages[iPage] = virt_to_page(pbPage);
            pbPage -= PAGE_SIZE;
        }
    }
    if (RT_SUCCESS(rc))
    {
        /*
         * Complete the memory object and return.
         */
        pMemLnx->Core.u.Lock.R0Process = NIL_RTR0PROCESS;
        pMemLnx->cPages = cPages;
        Assert(!pMemLnx->fMappedToRing0);
        *ppMem = &pMemLnx->Core;

        return VINF_SUCCESS;
    }

    rtR0MemObjDelete(&pMemLnx->Core);
    return rc;
}
Esempio n. 18
0
int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
    const int cPages = cb >> PAGE_SHIFT;
    struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
    struct vm_area_struct **papVMAs;
    PRTR0MEMOBJLNX pMemLnx;
    int rc = VERR_NO_MEMORY;
    NOREF(fAccess);

    /*
     * Check for valid task and size overflows.
     */
    if (!pTask)
        return VERR_NOT_SUPPORTED;
    if (((size_t)cPages << PAGE_SHIFT) != cb)
        return VERR_OUT_OF_RANGE;

    /*
     * Allocate the memory object and a temporary buffer for the VMAs.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;

    papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
    if (papVMAs)
    {
        down_read(&pTask->mm->mmap_sem);

        /*
         * Get user pages.
         */
        rc = get_user_pages(pTask,                  /* Task for fault acounting. */
                            pTask->mm,              /* Whose pages. */
                            R3Ptr,                  /* Where from. */
                            cPages,                 /* How many pages. */
                            1,                      /* Write to memory. */
                            0,                      /* force. */
                            &pMemLnx->apPages[0],   /* Page array. */
                            papVMAs);               /* vmas */
        if (rc == cPages)
        {
            /*
             * Flush dcache (required?), protect against fork and _really_ pin the page
             * table entries. get_user_pages() will protect against swapping out the
             * pages but it will NOT protect against removing page table entries. This
             * can be achieved with
             *   - using mlock / mmap(..., MAP_LOCKED, ...) from userland. This requires
             *     an appropriate limit set up with setrlimit(..., RLIMIT_MEMLOCK, ...).
             *     Usual Linux distributions support only a limited size of locked pages
             *     (e.g. 32KB).
             *   - setting the PageReserved bit (as we do in rtR0MemObjLinuxAllocPages()
             *     or by
             *   - setting the VM_LOCKED flag. This is the same as doing mlock() without
             *     a range check.
             */
            /** @todo The Linux fork() protection will require more work if this API
             * is to be used for anything but locking VM pages. */
            while (rc-- > 0)
            {
                flush_dcache_page(pMemLnx->apPages[rc]);
                papVMAs[rc]->vm_flags |= (VM_DONTCOPY | VM_LOCKED);
            }

            up_read(&pTask->mm->mmap_sem);

            RTMemFree(papVMAs);

            pMemLnx->Core.u.Lock.R0Process = R0Process;
            pMemLnx->cPages = cPages;
            Assert(!pMemLnx->fMappedToRing0);
            *ppMem = &pMemLnx->Core;

            return VINF_SUCCESS;
        }

        /*
         * Failed - we need to unlock any pages that we succeeded to lock.
         */
        while (rc-- > 0)
        {
            if (!PageReserved(pMemLnx->apPages[rc]))
                SetPageDirty(pMemLnx->apPages[rc]);
            page_cache_release(pMemLnx->apPages[rc]);
        }

        up_read(&pTask->mm->mmap_sem);

        RTMemFree(papVMAs);
        rc = VERR_LOCK_FAILED;
    }

    rtR0MemObjDelete(&pMemLnx->Core);
    return rc;
}
Esempio n. 19
0
/**
 * Internal worker that allocates physical pages and creates the memory object for them.
 *
 * @returns IPRT status code.
 * @param   ppMemLnx    Where to store the memory object pointer.
 * @param   enmType     The object type.
 * @param   cb          The number of bytes to allocate.
 * @param   uAlignment  The alignment of the phyiscal memory.
 *                      Only valid if fContiguous == true, ignored otherwise.
 * @param   fFlagsLnx   The page allocation flags (GPFs).
 * @param   fContiguous Whether the allocation must be contiguous.
 */
static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE enmType, size_t cb,
                                     size_t uAlignment, unsigned fFlagsLnx, bool fContiguous)
{
    size_t          iPage;
    size_t const    cPages = cb >> PAGE_SHIFT;
    struct page    *paPages;

    /*
     * Allocate a memory object structure that's large enough to contain
     * the page pointer array.
     */
    PRTR0MEMOBJLNX  pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJLNX, apPages[cPages]), enmType, NULL, cb);
    if (!pMemLnx)
        return VERR_NO_MEMORY;
    pMemLnx->cPages = cPages;

    /*
     * Allocate the pages.
     * For small allocations we'll try contiguous first and then fall back on page by page.
     */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
    if (    fContiguous
        ||  cb <= PAGE_SIZE * 2)
    {
# ifdef VBOX_USE_INSERT_PAGE
        paPages = alloc_pages(fFlagsLnx |  __GFP_COMP, rtR0MemObjLinuxOrder(cPages));
# else
        paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
# endif
        if (paPages)
        {
            fContiguous = true;
            for (iPage = 0; iPage < cPages; iPage++)
                pMemLnx->apPages[iPage] = &paPages[iPage];
        }
        else if (fContiguous)
        {
            rtR0MemObjDelete(&pMemLnx->Core);
            return VERR_NO_MEMORY;
        }
    }

    if (!fContiguous)
    {
        for (iPage = 0; iPage < cPages; iPage++)
        {
            pMemLnx->apPages[iPage] = alloc_page(fFlagsLnx);
            if (RT_UNLIKELY(!pMemLnx->apPages[iPage]))
            {
                while (iPage-- > 0)
                    __free_page(pMemLnx->apPages[iPage]);
                rtR0MemObjDelete(&pMemLnx->Core);
                return VERR_NO_MEMORY;
            }
        }
    }

#else /* < 2.4.22 */
    /** @todo figure out why we didn't allocate page-by-page on 2.4.21 and older... */
    paPages = alloc_pages(fFlagsLnx, rtR0MemObjLinuxOrder(cPages));
    if (!paPages)
    {
        rtR0MemObjDelete(&pMemLnx->Core);
        return VERR_NO_MEMORY;
    }
    for (iPage = 0; iPage < cPages; iPage++)
    {
        pMemLnx->apPages[iPage] = &paPages[iPage];
        MY_SET_PAGES_EXEC(pMemLnx->apPages[iPage], 1);
        if (PageHighMem(pMemLnx->apPages[iPage]))
            BUG();
    }

    fContiguous = true;
#endif /* < 2.4.22 */
    pMemLnx->fContiguous = fContiguous;

    /*
     * Reserve the pages.
     */
    for (iPage = 0; iPage < cPages; iPage++)
        SetPageReserved(pMemLnx->apPages[iPage]);

    /*
     * Note that the physical address of memory allocated with alloc_pages(flags, order)
     * is always 2^(PAGE_SHIFT+order)-aligned.
     */
    if (   fContiguous
        && uAlignment > PAGE_SIZE)
    {
        /*
         * Check for alignment constraints. The physical address of memory allocated with
         * alloc_pages(flags, order) is always 2^(PAGE_SHIFT+order)-aligned.
         */
        if (RT_UNLIKELY(page_to_phys(pMemLnx->apPages[0]) & ~(uAlignment - 1)))
        {
            /*
             * This should never happen!
             */
            printk("rtR0MemObjLinuxAllocPages(cb=%ld, uAlignment=%ld): alloc_pages(..., %d) returned physical memory at %lu!\n",
                    (unsigned long)cb, (unsigned long)uAlignment, rtR0MemObjLinuxOrder(cPages), (unsigned long)page_to_phys(pMemLnx->apPages[0]));
            rtR0MemObjLinuxFreePages(pMemLnx);
            return VERR_NO_MEMORY;
        }
    }

    *ppMemLnx = pMemLnx;
    return VINF_SUCCESS;
}
Esempio n. 20
0
int rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                              unsigned fProt, size_t offSub, size_t cbSub)
{
    int rc = VERR_NO_MEMORY;
    PRTR0MEMOBJLNX pMemLnxToMap = (PRTR0MEMOBJLNX)pMemToMap;
    PRTR0MEMOBJLNX pMemLnx;

    /* Fail if requested to do something we can't. */
    AssertMsgReturn(!offSub && !cbSub, ("%#x %#x\n", offSub, cbSub), VERR_NOT_SUPPORTED);
    AssertMsgReturn(pvFixed == (void *)-1, ("%p\n", pvFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Create the IPRT memory object.
     */
    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(sizeof(*pMemLnx), RTR0MEMOBJTYPE_MAPPING, NULL, pMemLnxToMap->Core.cb);
    if (pMemLnx)
    {
        if (pMemLnxToMap->cPages)
        {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
            /*
             * Use vmap - 2.4.22 and later.
             */
            pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
# ifdef VM_MAP
            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_MAP, fPg);
# else
            pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[0], pMemLnxToMap->cPages, VM_ALLOC, fPg);
# endif
            if (pMemLnx->Core.pv)
            {
                pMemLnx->fMappedToRing0 = true;
                rc = VINF_SUCCESS;
            }
            else
                rc = VERR_MAP_FAILED;

#else   /* < 2.4.22 */
            /*
             * Only option here is to share mappings if possible and forget about fProt.
             */
            if (rtR0MemObjIsRing3(pMemToMap))
                rc = VERR_NOT_SUPPORTED;
            else
            {
                rc = VINF_SUCCESS;
                if (!pMemLnxToMap->Core.pv)
                    rc = rtR0MemObjLinuxVMap(pMemLnxToMap, !!(fProt & RTMEM_PROT_EXEC));
                if (RT_SUCCESS(rc))
                {
                    Assert(pMemLnxToMap->Core.pv);
                    pMemLnx->Core.pv = pMemLnxToMap->Core.pv;
                }
            }
#endif
        }
        else
        {
            /*
             * MMIO / physical memory.
             */
            Assert(pMemLnxToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS && !pMemLnxToMap->Core.u.Phys.fAllocated);
            pMemLnx->Core.pv = pMemLnxToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO
                             ? ioremap_nocache(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb)
                             : ioremap(pMemLnxToMap->Core.u.Phys.PhysBase, pMemLnxToMap->Core.cb);
            if (pMemLnx->Core.pv)
            {
                /** @todo fix protection. */
                rc = VINF_SUCCESS;
            }
        }
        if (RT_SUCCESS(rc))
        {
            pMemLnx->Core.u.Mapping.R0Process = NIL_RTR0PROCESS;
            *ppMem = &pMemLnx->Core;
            return VINF_SUCCESS;
        }
        rtR0MemObjDelete(&pMemLnx->Core);
    }

    return rc;
}
Esempio n. 21
0
/**
 * Worker for the two virtual address space reservers.
 *
 * We're leaning on the examples provided by mmap and vm_mmap in vm_mmap.c here.
 */
static int rtR0MemObjNativeReserveInMap(PPRTR0MEMOBJINTERNAL ppMem, void *pvFixed, size_t cb, size_t uAlignment, RTR0PROCESS R0Process, vm_map_t pMap)
{
    int rc;

    /*
     * The pvFixed address range must be within the VM space when specified.
     */
    if (   pvFixed != (void *)-1
        && (    (vm_offset_t)pvFixed      < vm_map_min(pMap)
            ||  (vm_offset_t)pvFixed + cb > vm_map_max(pMap)))
        return VERR_INVALID_PARAMETER;

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * Create the object.
     */
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_RES_VIRT, NULL, cb);
    if (!pMemFreeBSD)
        return VERR_NO_MEMORY;

    vm_offset_t MapAddress = pvFixed != (void *)-1
                           ? (vm_offset_t)pvFixed
                           : vm_map_min(pMap);
    if (pvFixed != (void *)-1)
        vm_map_remove(pMap,
                      MapAddress,
                      MapAddress + cb);

    rc = vm_map_find(pMap,                          /* map */
                     NULL,                          /* object */
                     0,                             /* offset */
                     &MapAddress,                   /* addr (IN/OUT) */
                     cb,                            /* length */
#if __FreeBSD_version >= 1000055
                     0,                             /* max addr */
#endif
                     pvFixed == (void *)-1 ? VMFS_ANY_SPACE : VMFS_NO_SPACE,
                                                    /* find_space */
                     VM_PROT_NONE,                  /* protection */
                     VM_PROT_ALL,                   /* max(_prot) ?? */
                     0);                            /* cow (copy-on-write) */
    if (rc == KERN_SUCCESS)
    {
        if (R0Process != NIL_RTR0PROCESS)
        {
            rc = vm_map_inherit(pMap,
                                MapAddress,
                                MapAddress + cb,
                                VM_INHERIT_SHARE);
            AssertMsg(rc == KERN_SUCCESS, ("%#x\n", rc));
        }
        pMemFreeBSD->Core.pv = (void *)MapAddress;
        pMemFreeBSD->Core.u.ResVirt.R0Process = R0Process;
        *ppMem = &pMemFreeBSD->Core;
        return VINF_SUCCESS;
    }

    rc = VERR_NO_MEMORY; /** @todo fix translation (borrow from darwin) */
    rtR0MemObjDelete(&pMemFreeBSD->Core);
    return rc;

}
Esempio n. 22
0
static int rtR0MemObjNativeAllocArea(PPRTR0MEMOBJINTERNAL ppMem, size_t cb,
                                     bool fExecutable, RTR0MEMOBJTYPE type, RTHCPHYS PhysHighest, size_t uAlignment)
{
    NOREF(fExecutable);

    int rc;
    void *pvMap         = NULL;
    const char *pszName = NULL;
    uint32 addressSpec  = B_ANY_KERNEL_ADDRESS;
    uint32 fLock        = ~0U;
    LogFlowFunc(("ppMem=%p cb=%u, fExecutable=%s, type=%08x, PhysHighest=%RX64 uAlignment=%u\n", ppMem,(unsigned)cb,
                 fExecutable ? "true" : "false", type, PhysHighest,(unsigned)uAlignment));

    switch (type)
    {
    case RTR0MEMOBJTYPE_PAGE:
        pszName = "IPRT R0MemObj Alloc";
        fLock = B_FULL_LOCK;
        break;
    case RTR0MEMOBJTYPE_LOW:
        pszName = "IPRT R0MemObj AllocLow";
        fLock = B_32_BIT_FULL_LOCK;
        break;
    case RTR0MEMOBJTYPE_CONT:
        pszName = "IPRT R0MemObj AllocCont";
        fLock = B_32_BIT_CONTIGUOUS;
        break;
#if 0
    case RTR0MEMOBJTYPE_MAPPING:
        pszName = "IPRT R0MemObj Mapping";
        fLock = B_FULL_LOCK;
        break;
#endif
    case RTR0MEMOBJTYPE_PHYS:
        /** @todo alignment  */
        if (uAlignment != PAGE_SIZE)
            return VERR_NOT_SUPPORTED;
    /** @todo r=ramshankar: no 'break' here?? */
    case RTR0MEMOBJTYPE_PHYS_NC:
        pszName = "IPRT R0MemObj AllocPhys";
        fLock   = (PhysHighest < _4G ? B_LOMEM : B_32_BIT_CONTIGUOUS);
        break;
#if 0
    case RTR0MEMOBJTYPE_LOCK:
        break;
#endif
    default:
        return VERR_INTERNAL_ERROR;
    }

    /* Create the object. */
    PRTR0MEMOBJHAIKU pMemHaiku;
    pMemHaiku = (PRTR0MEMOBJHAIKU)rtR0MemObjNew(sizeof(RTR0MEMOBJHAIKU), type, NULL, cb);
    if (RT_UNLIKELY(!pMemHaiku))
        return VERR_NO_MEMORY;

    rc = pMemHaiku->AreaId = create_area(pszName, &pvMap, addressSpec, cb, fLock, B_READ_AREA | B_WRITE_AREA);
    if (pMemHaiku->AreaId >= 0)
    {
        physical_entry physMap[2];
        pMemHaiku->Core.pv = pvMap;   /* store start address */
        switch (type)
        {
        case RTR0MEMOBJTYPE_CONT:
            rc = get_memory_map(pvMap, cb, physMap, 2);
            if (rc == B_OK)
                pMemHaiku->Core.u.Cont.Phys = physMap[0].address;
            break;

        case RTR0MEMOBJTYPE_PHYS:
        case RTR0MEMOBJTYPE_PHYS_NC:
            rc = get_memory_map(pvMap, cb, physMap, 2);
            if (rc == B_OK)
            {
                pMemHaiku->Core.u.Phys.PhysBase = physMap[0].address;
                pMemHaiku->Core.u.Phys.fAllocated = true;
            }
            break;

        default:
            break;
        }
        if (rc >= B_OK)
        {
            *ppMem = &pMemHaiku->Core;
            return VINF_SUCCESS;
        }

        delete_area(pMemHaiku->AreaId);
    }

    rtR0MemObjDelete(&pMemHaiku->Core);
    return RTErrConvertFromHaikuKernReturn(rc);
}