Пример #1
0
static void release_compat_l4(struct vcpu *v)
{
    if ( !pagetable_is_null(v->arch.guest_table) )
        free_domheap_page(pagetable_get_page(v->arch.guest_table));
    v->arch.guest_table = pagetable_null();
    v->arch.guest_table_user = pagetable_null();
}
Пример #2
0
void p2m_teardown(struct domain *d)
{
    struct p2m_domain *p2m = &d->arch.p2m;
    struct page_info *pg;

    spin_lock(&p2m->lock);

    while ( (pg = page_list_remove_head(&p2m->pages)) )
        free_domheap_page(pg);

    p2m->first_level = NULL;

    spin_unlock(&p2m->lock);
}
Пример #3
0
void hap_free_p2m_page(struct domain *d, struct page_info *pg)
{
    hap_lock(d);
    ASSERT(page_get_owner(pg) == d);
    /* Should have just the one ref we gave it in alloc_p2m_page() */
    if ( (pg->count_info & PGC_count_mask) != 1 )
        HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
                  pg->count_info, pg->u.inuse.type_info);
    pg->count_info = 0;
    /* Free should not decrement domain's total allocation, since
     * these pages were allocated without an owner. */
    page_set_owner(pg, NULL);
    free_domheap_page(pg);
    d->arch.paging.hap.p2m_pages--;
    ASSERT(d->arch.paging.hap.p2m_pages >= 0);
    hap_unlock(d);
}
Пример #4
0
/* Set the pool of pages to the required number of pages.
 * Returns 0 for success, non-zero for failure. */
static unsigned int
hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
{
    struct page_info *pg;

    ASSERT(hap_locked_by_me(d));

    while ( d->arch.paging.hap.total_pages != pages )
    {
        if ( d->arch.paging.hap.total_pages < pages )
        {
            /* Need to allocate more memory from domheap */
            pg = alloc_domheap_page(NULL);
            if ( pg == NULL )
            {
                HAP_PRINTK("failed to allocate hap pages.\n");
                return -ENOMEM;
            }
            d->arch.paging.hap.free_pages++;
            d->arch.paging.hap.total_pages++;
            list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
        }
        else if ( d->arch.paging.hap.total_pages > pages )
        {
            /* Need to return memory to domheap */
            ASSERT(!list_empty(&d->arch.paging.hap.freelist));
            pg = list_entry(d->arch.paging.hap.freelist.next,
                            struct page_info, list);
            list_del(&pg->list);
            d->arch.paging.hap.free_pages--;
            d->arch.paging.hap.total_pages--;
            pg->count_info = 0;
            free_domheap_page(pg);
        }

        /* Check to see if we need to yield and try again */
        if ( preempted && hypercall_preempt_check() )
        {
            *preempted = 1;
            return 0;
        }
    }
Пример #5
0
static struct page_info *hap_alloc_p2m_page(struct domain *d)
{
    struct page_info *pg;

    hap_lock(d);
    pg = hap_alloc(d);

#if CONFIG_PAGING_LEVELS == 3
    /* Under PAE mode, top-level P2M table should be allocated below 4GB space
     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
     * force this requirement, and exchange the guaranteed 32-bit-clean
     * page for the one we just hap_alloc()ed. */
    if ( d->arch.paging.hap.p2m_pages == 0
         && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
    {
        free_domheap_page(pg);
        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
        if ( likely(pg != NULL) )
        {
            void *p = hap_map_domain_page(page_to_mfn(pg));
            clear_page(p);
            hap_unmap_domain_page(p);
        }
    }
#endif

    if ( likely(pg != NULL) )
    {
        d->arch.paging.hap.total_pages--;
        d->arch.paging.hap.p2m_pages++;
        page_set_owner(pg, d);
        pg->count_info = 1;
    }

    hap_unlock(d);
    return pg;
}
Пример #6
0
Файл: p2m.c Проект: Fantu/Xen
/*
 * 0   == (P2M_ONE_DESCEND) continue to descend the tree
 * +ve == (P2M_ONE_PROGRESS_*) handled at this level, continue, flush,
 *        entry, addr and maddr updated.  Return value is an
 *        indication of the amount of work done (for preemption).
 * -ve == (-Exxx) error.
 */
static int apply_one_level(struct domain *d,
                           lpae_t *entry,
                           unsigned int level,
                           bool_t flush_cache,
                           enum p2m_operation op,
                           paddr_t start_gpaddr,
                           paddr_t end_gpaddr,
                           paddr_t *addr,
                           paddr_t *maddr,
                           bool_t *flush,
                           int mattr,
                           p2m_type_t t,
                           p2m_access_t a)
{
    const paddr_t level_size = level_sizes[level];
    const paddr_t level_mask = level_masks[level];
    const paddr_t level_shift = level_shifts[level];

    struct p2m_domain *p2m = &d->arch.p2m;
    lpae_t pte;
    const lpae_t orig_pte = *entry;
    int rc;

    BUG_ON(level > 3);

    switch ( op )
    {
    case ALLOCATE:
        ASSERT(level < 3 || !p2m_valid(orig_pte));
        ASSERT(*maddr == 0);

        if ( p2m_valid(orig_pte) )
            return P2M_ONE_DESCEND;

        if ( is_mapping_aligned(*addr, end_gpaddr, 0, level_size) &&
           /* We only create superpages when mem_access is not in use. */
             (level == 3 || (level < 3 && !p2m->mem_access_enabled)) )
        {
            struct page_info *page;

            page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
            if ( page )
            {
                rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
                if ( rc < 0 )
                {
                    free_domheap_page(page);
                    return rc;
                }

                pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
                if ( level < 3 )
                    pte.p2m.table = 0;
                p2m_write_pte(entry, pte, flush_cache);
                p2m->stats.mappings[level]++;

                *addr += level_size;

                return P2M_ONE_PROGRESS;
            }
            else if ( level == 3 )
                return -ENOMEM;
        }

        /* L3 is always suitably aligned for mapping (handled, above) */
        BUG_ON(level == 3);

        /*
         * If we get here then we failed to allocate a sufficiently
         * large contiguous region for this level (which can't be
         * L3) or mem_access is in use. Create a page table and
         * continue to descend so we try smaller allocations.
         */
        rc = p2m_create_table(d, entry, 0, flush_cache);
        if ( rc < 0 )
            return rc;

        return P2M_ONE_DESCEND;

    case INSERT:
        if ( is_mapping_aligned(*addr, end_gpaddr, *maddr, level_size) &&
           /*
            * We do not handle replacing an existing table with a superpage
            * or when mem_access is in use.
            */
             (level == 3 || (!p2m_table(orig_pte) && !p2m->mem_access_enabled)) )
        {
            rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
            if ( rc < 0 )
                return rc;

            /* New mapping is superpage aligned, make it */
            pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
            if ( level < 3 )
                pte.p2m.table = 0; /* Superpage entry */

            p2m_write_pte(entry, pte, flush_cache);

            *flush |= p2m_valid(orig_pte);

            *addr += level_size;
            *maddr += level_size;

            if ( p2m_valid(orig_pte) )
            {
                /*
                 * We can't currently get here for an existing table
                 * mapping, since we don't handle replacing an
                 * existing table with a superpage. If we did we would
                 * need to handle freeing (and accounting) for the bit
                 * of the p2m tree which we would be about to lop off.
                 */
                BUG_ON(level < 3 && p2m_table(orig_pte));
                if ( level == 3 )
                    p2m_put_l3_page(orig_pte);
            }
            else /* New mapping */
                p2m->stats.mappings[level]++;

            return P2M_ONE_PROGRESS;
        }
        else
        {
Пример #7
0
void *vm_alloc(unsigned int nr, unsigned int align)
{
    unsigned int start, bit;

    if ( !align )
        align = 1;
    else if ( align & (align - 1) )
        align &= -align;

    spin_lock(&vm_lock);
    for ( ; ; )
    {
        struct page_info *pg;

        ASSERT(vm_low == vm_top || !test_bit(vm_low, vm_bitmap));
        for ( start = vm_low; start < vm_top; )
        {
            bit = find_next_bit(vm_bitmap, vm_top, start + 1);
            if ( bit > vm_top )
                bit = vm_top;
            /*
             * Note that this skips the first bit, making the
             * corresponding page a guard one.
             */
            start = (start + align) & ~(align - 1);
            if ( bit < vm_top )
            {
                if ( start + nr < bit )
                    break;
                start = find_next_zero_bit(vm_bitmap, vm_top, bit + 1);
            }
            else
            {
                if ( start + nr <= bit )
                    break;
                start = bit;
            }
        }

        if ( start < vm_top )
            break;

        spin_unlock(&vm_lock);

        if ( vm_top >= vm_end )
            return NULL;

        pg = alloc_domheap_page(NULL, 0);
        if ( !pg )
            return NULL;

        spin_lock(&vm_lock);

        if ( start >= vm_top )
        {
            unsigned long va = (unsigned long)vm_bitmap + vm_top / 8;

            if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
            {
                clear_page((void *)va);
                vm_top += PAGE_SIZE * 8;
                if ( vm_top > vm_end )
                    vm_top = vm_end;
                continue;
            }
        }

        free_domheap_page(pg);

        if ( start >= vm_top )
        {
            spin_unlock(&vm_lock);
            return NULL;
        }
    }

    for ( bit = start; bit < start + nr; ++bit )
        __set_bit(bit, vm_bitmap);
    if ( bit < vm_top )
        ASSERT(!test_bit(bit, vm_bitmap));
    else
        ASSERT(bit == vm_top);
    if ( start <= vm_low + 2 )
        vm_low = bit;
    spin_unlock(&vm_lock);

    return vm_base + start * PAGE_SIZE;
}
Пример #8
0
void free_pgtable_maddr(u64 maddr)
{
    if ( maddr != 0 )
        free_domheap_page(maddr_to_page(maddr));
}