Exemple #1
0
static void populate_physmap(struct memop_args *a)
{
    struct page_info *page;
    unsigned long i, j;
    xen_pfn_t gpfn, mfn;
    struct domain *d = a->domain;

    if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                     a->nr_extents-1) )
        return;

    if ( (a->extent_order != 0) &&
         !multipage_allocation_permitted(current->domain) )
        return;

    for ( i = a->nr_done; i < a->nr_extents; i++ )
    {
        if ( hypercall_preempt_check() )
        {
            a->preempted = 1;
            goto out;
        }

        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
            goto out;

        page = alloc_domheap_pages(d, a->extent_order, a->memflags);
        if ( unlikely(page == NULL) ) 
        {
            gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
                     "id=%d memflags=%x (%ld of %d)\n",
                     a->extent_order, d->domain_id, a->memflags,
                     i, a->nr_extents);
            goto out;
        }

        mfn = page_to_mfn(page);
        guest_physmap_add_page(d, gpfn, mfn, a->extent_order);

        if ( !paging_mode_translate(d) )
        {
            for ( j = 0; j < (1 << a->extent_order); j++ )
                set_gpfn_from_mfn(mfn + j, gpfn + j);

            /* Inform the domain of the new page's machine address. */ 
            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                goto out;
        }
    }

 out:
    a->nr_done = i;
}
Exemple #2
0
/* Populate a HVM memory range using the biggest possible order. */
static int __init pvh_populate_memory_range(struct domain *d,
                                            unsigned long start,
                                            unsigned long nr_pages)
{
    unsigned int order, i = 0;
    struct page_info *page;
    int rc;
#define MAP_MAX_ITER 64

    order = MAX_ORDER;
    while ( nr_pages != 0 )
    {
        unsigned int range_order = get_order_from_pages(nr_pages + 1);

        order = min(range_order ? range_order - 1 : 0, order);
        page = alloc_domheap_pages(d, order, dom0_memflags);
        if ( page == NULL )
        {
            if ( order == 0 && dom0_memflags )
            {
                /* Try again without any dom0_memflags. */
                dom0_memflags = 0;
                order = MAX_ORDER;
                continue;
            }
            if ( order == 0 )
            {
                printk("Unable to allocate memory with order 0!\n");
                return -ENOMEM;
            }
            order--;
            continue;
        }

        rc = guest_physmap_add_page(d, _gfn(start), _mfn(page_to_mfn(page)),
                                    order);
        if ( rc != 0 )
        {
            printk("Failed to populate memory: [%#lx,%lx): %d\n",
                   start, start + (1UL << order), rc);
            return -ENOMEM;
        }
        start += 1UL << order;
        nr_pages -= 1UL << order;
        if ( (++i % MAP_MAX_ITER) == 0 )
            process_pending_softirqs();
    }

    return 0;
#undef MAP_MAX_ITER
}
Exemple #3
0
static void populate_physmap(struct memop_args *a)
{
    struct page_info *page;
    unsigned int i, j;
    xen_pfn_t gpfn, mfn;
    struct domain *d = a->domain;

    if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                     a->nr_extents-1) )
        return;

    if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER :
                            max_order(current->domain)) )
        return;

    for ( i = a->nr_done; i < a->nr_extents; i++ )
    {
        if ( i != a->nr_done && hypercall_preempt_check() )
        {
            a->preempted = 1;
            goto out;
        }

        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
            goto out;

        if ( a->memflags & MEMF_populate_on_demand )
        {
            if ( guest_physmap_mark_populate_on_demand(d, gpfn,
                                                       a->extent_order) < 0 )
                goto out;
        }
        else
        {
            if ( is_domain_direct_mapped(d) )
            {
                mfn = gpfn;

                for ( j = 0; j < (1U << a->extent_order); j++, mfn++ )
                {
                    if ( !mfn_valid(mfn) )
                    {
                        gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n",
                                 mfn);
                        goto out;
                    }

                    page = mfn_to_page(mfn);
                    if ( !get_page(page, d) )
                    {
                        gdprintk(XENLOG_INFO,
                                 "mfn %#"PRI_xen_pfn" doesn't belong to d%d\n",
                                  mfn, d->domain_id);
                        goto out;
                    }
                    put_page(page);
                }

                mfn = gpfn;
                page = mfn_to_page(mfn);
            }
            else
            {
                page = alloc_domheap_pages(d, a->extent_order, a->memflags);

                if ( unlikely(!page) )
                {
                    if ( !opt_tmem || a->extent_order )
                        gdprintk(XENLOG_INFO,
                                 "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n",
                                 a->extent_order, d->domain_id, a->memflags,
                                 i, a->nr_extents);
                    goto out;
                }

                mfn = page_to_mfn(page);
            }

            guest_physmap_add_page(d, gpfn, mfn, a->extent_order);

            if ( !paging_mode_translate(d) )
            {
                for ( j = 0; j < (1U << a->extent_order); j++ )
                    set_gpfn_from_mfn(mfn + j, gpfn + j);

                /* Inform the domain of the new page's machine address. */ 
                if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                    goto out;
            }
        }
    }

out:
    a->nr_done = i;
}
Exemple #4
0
Fichier : mm.c Projet : abligh/xen
static int xenmem_add_to_physmap_one(
    struct domain *d,
    uint16_t space,
    domid_t foreign_domid,
    unsigned long idx,
    xen_pfn_t gpfn)
{
    unsigned long mfn = 0;
    int rc;

    switch ( space )
    {
    case XENMAPSPACE_grant_table:
        spin_lock(&d->grant_table->lock);

        if ( d->grant_table->gt_version == 0 )
            d->grant_table->gt_version = 1;

        if ( d->grant_table->gt_version == 2 &&
                (idx & XENMAPIDX_grant_table_status) )
        {
            idx &= ~XENMAPIDX_grant_table_status;
            if ( idx < nr_status_frames(d->grant_table) )
                mfn = virt_to_mfn(d->grant_table->status[idx]);
        }
        else
        {
            if ( (idx >= nr_grant_frames(d->grant_table)) &&
                    (idx < max_nr_grant_frames) )
                gnttab_grow_table(d, idx + 1);

            if ( idx < nr_grant_frames(d->grant_table) )
                mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
        }
        
        d->arch.grant_table_gpfn[idx] = gpfn;

        spin_unlock(&d->grant_table->lock);
        break;
    case XENMAPSPACE_shared_info:
        if ( idx == 0 )
            mfn = virt_to_mfn(d->shared_info);
        break;
    case XENMAPSPACE_gmfn_foreign:
    {
        paddr_t maddr;
        struct domain *od;
        rc = rcu_lock_target_domain_by_id(foreign_domid, &od);
        if ( rc < 0 )
            return rc;

        maddr = p2m_lookup(od, idx << PAGE_SHIFT);
        if ( maddr == INVALID_PADDR )
        {
            dump_p2m_lookup(od, idx << PAGE_SHIFT);
            rcu_unlock_domain(od);
            return -EINVAL;
        }

        mfn = maddr >> PAGE_SHIFT;

        rcu_unlock_domain(od);
        break;
    }

    default:
        return -ENOSYS;
    }

    domain_lock(d);

    /* Map at new location. */
    rc = guest_physmap_add_page(d, gpfn, mfn, 0);

    domain_unlock(d);

    return rc;
}