static vm_page_t rtR0MemObjFreeBSDContigPhysAllocHelper(vm_object_t pObject, vm_pindex_t iPIndex,
                                                        u_long cPages, vm_paddr_t VmPhysAddrHigh,
                                                        u_long uAlignment, bool fWire)
{
    vm_page_t pPages;
    int cTries = 0;

#if __FreeBSD_version > 1000000
    int fFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
    if (fWire)
        fFlags |= VM_ALLOC_WIRED;

    while (cTries <= 1)
    {
        VM_OBJECT_LOCK(pObject);
        pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0,
                                      VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
        VM_OBJECT_UNLOCK(pObject);
        if (pPages)
            break;
        vm_pageout_grow_cache(cTries, 0, VmPhysAddrHigh);
        cTries++;
    }

    return pPages;
#else
    while (cTries <= 1)
    {
        pPages = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
        if (pPages)
            break;
        vm_contig_grow_cache(cTries, 0, VmPhysAddrHigh);
        cTries++;
    }

    if (!pPages)
        return pPages;
    VM_OBJECT_LOCK(pObject);
    for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
    {
        vm_page_t pPage = pPages + iPage;
        vm_page_insert(pPage, pObject, iPIndex + iPage);
        pPage->valid = VM_PAGE_BITS_ALL;
        if (fWire)
        {
            pPage->wire_count = 1;
            atomic_add_int(&cnt.v_wire_count, 1);
        }
    }
    VM_OBJECT_UNLOCK(pObject);
    return pPages;
#endif
}
Beispiel #2
0
/*
 * vm_contig_pg_alloc:
 *
 * Allocate contiguous pages from the VM.  This function does not
 * map the allocated pages into the kernel map, otherwise it is
 * impossible to make large allocations (i.e. >2G).
 *
 * Malloc()'s data structures have been used for collection of
 * statistics and for allocations of less than a page.
 */
static int
vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high,
                   unsigned long alignment, unsigned long boundary, int mflags)
{
    int i, q, start, pass;
    vm_offset_t phys;
    vm_page_t pga = vm_page_array;
    vm_page_t m;
    int pqtype;

    size = round_page(size);
    if (size == 0)
        panic("vm_contig_pg_alloc: size must not be 0");
    if ((alignment & (alignment - 1)) != 0)
        panic("vm_contig_pg_alloc: alignment must be a power of 2");
    if ((boundary & (boundary - 1)) != 0)
        panic("vm_contig_pg_alloc: boundary must be a power of 2");

    /*
     * See if we can get the pages from the contiguous page reserve
     * alist.  The returned pages will be allocated and wired but not
     * busied.
     */
    m = vm_page_alloc_contig(low, high, alignment, boundary, size);
    if (m)
        return (m - &pga[0]);

    /*
     * Three passes (0, 1, 2).  Each pass scans the VM page list for
     * free or cached pages.  After each pass if the entire scan failed
     * we attempt to flush inactive pages and reset the start index back
     * to 0.  For passes 1 and 2 we also attempt to flush active pages.
     */
    start = 0;
    for (pass = 0; pass < 3; pass++) {
        /*
         * Find first page in array that is free, within range,
         * aligned, and such that the boundary won't be crossed.
         */
again:
        for (i = start; i < vmstats.v_page_count; i++) {
            m = &pga[i];
            phys = VM_PAGE_TO_PHYS(m);
            pqtype = m->queue - m->pc;
            if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
                    (phys >= low) && (phys < high) &&
                    ((phys & (alignment - 1)) == 0) &&
                    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) &&
                    m->busy == 0 && m->wire_count == 0 &&
                    m->hold_count == 0 &&
                    (m->flags & (PG_BUSY | PG_NEED_COMMIT)) == 0)
            {
                break;
            }
        }

        /*
         * If we cannot find the page in the given range, or we have
         * crossed the boundary, call the vm_contig_pg_clean() function
         * for flushing out the queues, and returning it back to
         * normal state.
         */
        if ((i == vmstats.v_page_count) ||
                ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {

            /*
             * Best effort flush of all inactive pages.
             * This is quite quick, for now stall all
             * callers, even if they've specified M_NOWAIT.
             */
            for (q = 0; q < PQ_L2_SIZE; ++q) {
                vm_contig_pg_clean(PQ_INACTIVE + q,
                                   vmstats.v_inactive_count);
                lwkt_yield();
            }

            /*
             * Best effort flush of active pages.
             *
             * This is very, very slow.
             * Only do this if the caller has agreed to M_WAITOK.
             *
             * If enough pages are flushed, we may succeed on
             * next (final) pass, if not the caller, contigmalloc(),
             * will fail in the index < 0 case.
             */
            if (pass > 0 && (mflags & M_WAITOK)) {
                for (q = 0; q < PQ_L2_SIZE; ++q) {
                    vm_contig_pg_clean(PQ_ACTIVE + q,
                                       vmstats.v_active_count);
                }
                lwkt_yield();
            }

            /*
             * We're already too high in the address space
             * to succeed, reset to 0 for the next iteration.
             */
            start = 0;
            continue;	/* next pass */
        }
        start = i;

        /*
         * Check successive pages for contiguous and free.
         *
         * (still in critical section)
         */
        for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
            m = &pga[i];
            pqtype = m->queue - m->pc;
            if ((VM_PAGE_TO_PHYS(&m[0]) !=
                    (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) ||
                    ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) ||
                    m->busy || m->wire_count ||
                    m->hold_count ||
                    (m->flags & (PG_BUSY | PG_NEED_COMMIT)))
            {
                start++;
                goto again;
            }
        }

        /*
         * Try to allocate the pages, wiring them as we go.
         *
         * (still in critical section)
         */
        for (i = start; i < (start + size / PAGE_SIZE); i++) {
            m = &pga[i];

            if (vm_page_busy_try(m, TRUE)) {
                vm_contig_pg_free(start,
                                  (i - start) * PAGE_SIZE);
                start++;
                goto again;
            }
            pqtype = m->queue - m->pc;
            if (pqtype == PQ_CACHE &&
                    m->hold_count == 0 &&
                    m->wire_count == 0 &&
                    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0) {
                vm_page_protect(m, VM_PROT_NONE);
                KKASSERT((m->flags & PG_MAPPED) == 0);
                KKASSERT(m->dirty == 0);
                vm_page_free(m);
                --i;
                continue;	/* retry the page */
            }
            if (pqtype != PQ_FREE || m->hold_count) {
                vm_page_wakeup(m);
                vm_contig_pg_free(start,
                                  (i - start) * PAGE_SIZE);
                start++;
                goto again;
            }
            KKASSERT((m->valid & m->dirty) == 0);
            KKASSERT(m->wire_count == 0);
            KKASSERT(m->object == NULL);
            vm_page_unqueue_nowakeup(m);
            m->valid = VM_PAGE_BITS_ALL;
            if (m->flags & PG_ZERO)
                vm_page_zero_count--;
            KASSERT(m->dirty == 0,
                    ("vm_contig_pg_alloc: page %p was dirty", m));
            KKASSERT(m->wire_count == 0);
            KKASSERT(m->busy == 0);

            /*
             * Clear all flags except PG_BUSY, PG_ZERO, and
             * PG_WANTED, then unbusy the now allocated page.
             */
            vm_page_flag_clear(m, ~(PG_BUSY | PG_SBUSY |
                                    PG_ZERO | PG_WANTED));
            vm_page_wire(m);
            vm_page_wakeup(m);
        }

        /*
         * Our job is done, return the index page of vm_page_array.
         */
        return (start); /* aka &pga[start] */
    }

    /*
     * Failed.
     */
    return (-1);
}