Exemple #1
0
// Allocate a new p2m table for a domain.
//
// The structure of the p2m table is that of a pagetable for xen (i.e. it is
// controlled by CONFIG_PAGING_LEVELS).
//
// Returns 0 for success or -errno.
//
int p2m_alloc_table(struct p2m_domain *p2m)
{
    struct page_info *p2m_top;
    struct domain *d = p2m->domain;

    p2m_lock(p2m);

    if ( !p2m_is_nestedp2m(p2m)
         && !page_list_empty(&d->page_list) )
    {
        P2M_ERROR("dom %d already has memory allocated\n", d->domain_id);
        p2m_unlock(p2m);
        return -EINVAL;
    }

    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
    {
        P2M_ERROR("p2m already allocated for this domain\n");
        p2m_unlock(p2m);
        return -EINVAL;
    }

    P2M_PRINTK("allocating p2m table\n");

    p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table);
    if ( p2m_top == NULL )
    {
        p2m_unlock(p2m);
        return -ENOMEM;
    }

    p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));

    if ( hap_enabled(d) )
        iommu_share_p2m_table(d);

    P2M_PRINTK("populating p2m table\n");

    /* Initialise physmap tables for slot zero. Other code assumes this. */
    p2m->defer_nested_flush = 1;
    if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
                        p2m_invalid, p2m->default_access) )
        goto error;
    p2m->defer_nested_flush = 0;

    P2M_PRINTK("p2m table initialised (%u pages)\n", page_count);
    p2m_unlock(p2m);
    return 0;

    spin_unlock(&p2m->domain->page_alloc_lock);
 error:
    P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%"
               PRI_mfn "\n", gfn, mfn_x(mfn));
    p2m_unlock(p2m);
    return -ENOMEM;
}
Exemple #2
0
/* Returns: 0 for success, -errno for failure */
static int
p2m_next_level(struct p2m_domain *p2m, void **table,
               unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
               u32 max, unsigned long type, bool_t unmap)
{
    l1_pgentry_t *l1_entry;
    l1_pgentry_t *p2m_entry;
    l1_pgentry_t new_entry;
    void *next;
    int i;

    if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
                                      shift, max)) )
        return -ENOENT;

    /* PoD/paging: Not present doesn't imply empty. */
    if ( !l1e_get_flags(*p2m_entry) )
    {
        struct page_info *pg;

        pg = p2m_alloc_ptp(p2m, type);
        if ( pg == NULL )
            return -ENOMEM;

        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                 P2M_BASE_FLAGS | _PAGE_RW);

        switch ( type ) {
        case PGT_l3_page_table:
            p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable);
            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4);
            break;
        case PGT_l2_page_table:
            p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
            break;
        case PGT_l1_page_table:
            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
            p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
            break;
        default:
            BUG();
            break;
        }
    }

    ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE));

    /* split 1GB pages into 2MB pages */
    if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
    {
        unsigned long flags, pfn;
        struct page_info *pg;

        pg = p2m_alloc_ptp(p2m, PGT_l2_page_table);
        if ( pg == NULL )
            return -ENOMEM;

        flags = l1e_get_flags(*p2m_entry);
        pfn = l1e_get_pfn(*p2m_entry);

        l1_entry = __map_domain_page(pg);
        for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
        {
            new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags);
            p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2);
        }
        unmap_domain_page(l1_entry);
        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                 P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */
        p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable);
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3);
    }


    /* split single 2MB large page into 4KB page in P2M table */
    if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
    {
        unsigned long flags, pfn;
        struct page_info *pg;

        pg = p2m_alloc_ptp(p2m, PGT_l1_page_table);
        if ( pg == NULL )
            return -ENOMEM;

        /* New splintered mappings inherit the flags of the old superpage, 
         * with a little reorganisation for the _PAGE_PSE_PAT bit. */
        flags = l1e_get_flags(*p2m_entry);
        pfn = l1e_get_pfn(*p2m_entry);
        if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
            pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
        else
            flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
        
        l1_entry = __map_domain_page(pg);
        for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
        {
            new_entry = l1e_from_pfn(pfn + i, flags);
            p2m_add_iommu_flags(&new_entry, 0, 0);
            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1);
        }
        unmap_domain_page(l1_entry);
        
        new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)),
                                 P2M_BASE_FLAGS | _PAGE_RW);
        p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable);
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
    }

    next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
    if ( unmap )
        unmap_domain_page(*table);
    *table = next;

    return 0;
}