Beispiel #1
0
/* Returns: mfn for the given (hvm guest) vaddr */
static mfn_t
dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn)
{
    mfn_t mfn;
    uint32_t pfec = PFEC_page_present;
    p2m_type_t gfntype;

    DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);

    *gfn = _gfn(paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec));
    if ( gfn_eq(*gfn, INVALID_GFN) )
    {
        DBGP2("kdb:bad gfn from gva_to_gfn\n");
        return INVALID_MFN;
    }

    mfn = get_gfn(dp, gfn_x(*gfn), &gfntype);
    if ( p2m_is_readonly(gfntype) && toaddr )
    {
        DBGP2("kdb:p2m_is_readonly: gfntype:%x\n", gfntype);
        mfn = INVALID_MFN;
    }
    else
        DBGP2("X: vaddr:%lx domid:%d mfn:%#"PRI_mfn"\n",
              vaddr, dp->domain_id, mfn_x(mfn));

    if ( mfn_eq(mfn, INVALID_MFN) )
    {
        put_gfn(dp, gfn_x(*gfn));
        *gfn = INVALID_GFN;
    }

    return mfn;
}
Beispiel #2
0
static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
                          struct p2m_domain *ap2m, p2m_access_t a,
                          gfn_t gfn)
{
    int rc = 0;

    if ( ap2m )
    {
        rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
        /* If the corresponding mfn is invalid we will want to just skip it */
        if ( rc == -ESRCH )
            rc = 0;
    }
    else
    {
        mfn_t mfn;
        p2m_access_t _a;
        p2m_type_t t;
        unsigned long gfn_l = gfn_x(gfn);

        mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
        rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
    }

    return rc;
}
Beispiel #3
0
/*
 * Set access type for a region of gfns.
 * If gfn == INVALID_GFN, sets the default access type.
 */
long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
                        uint32_t start, uint32_t mask, xenmem_access_t access,
                        unsigned int altp2m_idx)
{
    struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
    p2m_access_t a;
    unsigned long gfn_l;
    long rc = 0;

    /* altp2m view 0 is treated as the hostp2m */
    if ( altp2m_idx )
    {
        if ( altp2m_idx >= MAX_ALTP2M ||
             d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) )
            return -EINVAL;

        ap2m = d->arch.altp2m_p2m[altp2m_idx];
    }

    if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
        return -EINVAL;

    /* If request to set default access. */
    if ( gfn_eq(gfn, INVALID_GFN) )
    {
        p2m->default_access = a;
        return 0;
    }

    p2m_lock(p2m);
    if ( ap2m )
        p2m_lock(ap2m);

    for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
    {
        rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));

        if ( rc )
            break;

        /* Check for continuation if it's not the last iteration. */
        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
        {
            rc = start;
            break;
        }
    }

    if ( ap2m )
        p2m_unlock(ap2m);
    p2m_unlock(p2m);

    return rc;
}
Beispiel #4
0
int domain_vpl011_init(struct domain *d, struct vpl011_init_info *info)
{
    int rc;
    struct vpl011 *vpl011 = &d->arch.vpl011;

    if ( vpl011->ring_buf )
        return -EINVAL;

    /* Map the guest PFN to Xen address space. */
    rc =  prepare_ring_for_helper(d,
                                  gfn_x(info->gfn),
                                  &vpl011->ring_page,
                                  &vpl011->ring_buf);
    if ( rc < 0 )
        goto out;

    rc = vgic_reserve_virq(d, GUEST_VPL011_SPI);
    if ( !rc )
    {
        rc = -EINVAL;
        goto out1;
    }

    rc = alloc_unbound_xen_event_channel(d, 0, info->console_domid,
                                         vpl011_notification);
    if ( rc < 0 )
        goto out2;

    vpl011->evtchn = info->evtchn = rc;

    spin_lock_init(&vpl011->lock);

    register_mmio_handler(d, &vpl011_mmio_handler,
                          GUEST_PL011_BASE, GUEST_PL011_SIZE, NULL);

    return 0;

out2:
    vgic_free_virq(d, GUEST_VPL011_SPI);

out1:
    destroy_ring_for_helper(&vpl011->ring_buf, vpl011->ring_page);

out:
    return rc;
}
Beispiel #5
0
/*
 * Get access type for a gfn.
 * If gfn == INVALID_GFN, gets the default access type.
 */
static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn,
                               xenmem_access_t *access)
{
    p2m_type_t t;
    p2m_access_t a;
    mfn_t mfn;

    static const xenmem_access_t memaccess[] = {
#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
            ACCESS(n),
            ACCESS(r),
            ACCESS(w),
            ACCESS(rw),
            ACCESS(x),
            ACCESS(rx),
            ACCESS(wx),
            ACCESS(rwx),
            ACCESS(rx2rw),
            ACCESS(n2rwx),
#undef ACCESS
    };

    /* If request to get default access. */
    if ( gfn_eq(gfn, INVALID_GFN) )
    {
        *access = memaccess[p2m->default_access];
        return 0;
    }

    gfn_lock(p2m, gfn, 0);
    mfn = p2m->get_entry(p2m, gfn_x(gfn), &t, &a, 0, NULL, NULL);
    gfn_unlock(p2m, gfn, 0);

    if ( mfn_eq(mfn, INVALID_MFN) )
        return -ESRCH;

    if ( (unsigned int)a >= ARRAY_SIZE(memaccess) )
        return -ERANGE;

    *access =  memaccess[a];
    return 0;
}
Beispiel #6
0
int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
                              struct p2m_domain *ap2m, p2m_access_t a,
                              gfn_t gfn)
{
    mfn_t mfn;
    p2m_type_t t;
    p2m_access_t old_a;
    unsigned int page_order;
    unsigned long gfn_l = gfn_x(gfn);
    int rc;

    mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);

    /* Check host p2m if no valid entry in alternate */
    if ( !mfn_valid(mfn) )
    {

        mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a,
                                    P2M_ALLOC | P2M_UNSHARE, &page_order, 0);

        rc = -ESRCH;
        if ( !mfn_valid(mfn) || t != p2m_ram_rw )
            return rc;

        /* If this is a superpage, copy that first */
        if ( page_order != PAGE_ORDER_4K )
        {
            unsigned long mask = ~((1UL << page_order) - 1);
            unsigned long gfn2_l = gfn_l & mask;
            mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);

            rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 1);
            if ( rc )
                return rc;
        }
    }

    return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
                         (current->domain != d));
}
Beispiel #7
0
unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
    struct vcpu *v, unsigned long gva, uint32_t *pfec)
{
    unsigned long cr3;
    uint32_t missing;
    mfn_t top_mfn;
    void *top_map;
    p2m_type_t p2mt;
    walk_t gw;

    /* Get the top-level table's MFN */
    cr3 = v->arch.hvm_vcpu.guest_cr[3];
    top_mfn = gfn_to_mfn(v->domain, _gfn(cr3 >> PAGE_SHIFT), &p2mt);
    if ( !p2m_is_ram(p2mt) )
    {
        pfec[0] &= ~PFEC_page_present;
        return INVALID_GFN;
    }

    /* Map the top-level table and call the tree-walker */
    ASSERT(mfn_valid(mfn_x(top_mfn)));
    top_map = map_domain_page(mfn_x(top_mfn));
#if GUEST_PAGING_LEVELS == 3
    top_map += (cr3 & ~(PAGE_MASK | 31));
#endif
    missing = guest_walk_tables(v, gva, &gw, pfec[0], top_mfn, top_map);
    unmap_domain_page(top_map);

    /* Interpret the answer */
    if ( missing == 0 ) 
        return gfn_x(guest_l1e_get_gfn(gw.l1e));
    
    if ( missing & _PAGE_PRESENT )
        pfec[0] &= ~PFEC_page_present;
    
    return INVALID_GFN;
}
Beispiel #8
0
int arch_iommu_populate_page_table(struct domain *d)
{
    const struct domain_iommu *hd = dom_iommu(d);
    struct page_info *page;
    int rc = 0, n = 0;

    d->need_iommu = -1;

    this_cpu(iommu_dont_flush_iotlb) = 1;
    spin_lock(&d->page_alloc_lock);

    if ( unlikely(d->is_dying) )
        rc = -ESRCH;

    while ( !rc && (page = page_list_remove_head(&d->page_list)) )
    {
        if ( has_hvm_container_domain(d) ||
            (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
        {
            unsigned long mfn = page_to_mfn(page);
            unsigned long gfn = mfn_to_gmfn(d, mfn);

            if ( gfn != gfn_x(INVALID_GFN) )
            {
                ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
                BUG_ON(SHARED_M2P(gfn));
                rc = hd->platform_ops->map_page(d, gfn, mfn,
                                                IOMMUF_readable |
                                                IOMMUF_writable);
            }
            if ( rc )
            {
                page_list_add(page, &d->page_list);
                break;
            }
        }
Beispiel #9
0
unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
    struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3,
    paddr_t ga, uint32_t *pfec, unsigned int *page_order)
{
    uint32_t missing;
    mfn_t top_mfn;
    void *top_map;
    p2m_type_t p2mt;
    walk_t gw;
    unsigned long top_gfn;
    struct page_info *top_page;

    /* Get the top-level table's MFN */
    top_gfn = cr3 >> PAGE_SHIFT;
    top_page = get_page_from_gfn_p2m(p2m->domain, p2m, top_gfn,
                                     &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE);
    if ( p2m_is_paging(p2mt) )
    {
        ASSERT(p2m_is_hostp2m(p2m));
        pfec[0] = PFEC_page_paged;
        if ( top_page )
            put_page(top_page);
        p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
        return INVALID_GFN;
    }
    if ( p2m_is_shared(p2mt) )
    {
        pfec[0] = PFEC_page_shared;
        if ( top_page )
            put_page(top_page);
        return INVALID_GFN;
    }
    if ( !top_page )
    {
        pfec[0] &= ~PFEC_page_present;
        return INVALID_GFN;
    }
    top_mfn = _mfn(page_to_mfn(top_page));

    /* Map the top-level table and call the tree-walker */
    ASSERT(mfn_valid(mfn_x(top_mfn)));
    top_map = map_domain_page(mfn_x(top_mfn));
#if GUEST_PAGING_LEVELS == 3
    top_map += (cr3 & ~(PAGE_MASK | 31));
#endif
    missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map);
    unmap_domain_page(top_map);
    put_page(top_page);

    /* Interpret the answer */
    if ( missing == 0 )
    {
        gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
        struct page_info *page;
        page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), &p2mt,
                                     NULL, P2M_ALLOC | P2M_UNSHARE);
        if ( page )
            put_page(page);
        if ( p2m_is_paging(p2mt) )
        {
            ASSERT(p2m_is_hostp2m(p2m));
            pfec[0] = PFEC_page_paged;
            p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
            return INVALID_GFN;
        }
        if ( p2m_is_shared(p2mt) )
        {
            pfec[0] = PFEC_page_shared;
            return INVALID_GFN;
        }

        if ( page_order )
            *page_order = guest_walk_to_page_order(&gw);

        return gfn_x(gfn);
    }

    if ( missing & _PAGE_PRESENT )
        pfec[0] &= ~PFEC_page_present;

    if ( missing & _PAGE_INVALID_BITS ) 
        pfec[0] |= PFEC_reserved_bit;

    if ( missing & _PAGE_PAGED )
        pfec[0] = PFEC_page_paged;

    if ( missing & _PAGE_SHARED )
        pfec[0] = PFEC_page_shared;

    return INVALID_GFN;
}
Beispiel #10
0
Datei: none.c Projekt: mirage/xen
static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
                                 unsigned long va, uint32_t *pfec)
{
    ASSERT_UNREACHABLE();
    return gfn_x(INVALID_GFN);
}
Beispiel #11
0
unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
    struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3,
    paddr_t ga, uint32_t *pfec, unsigned int *page_order)
{
    uint32_t missing;
    mfn_t top_mfn;
    void *top_map;
    p2m_type_t p2mt;
    walk_t gw;
    unsigned long top_gfn;
    struct page_info *top_page;

    /* Get the top-level table's MFN */
    top_gfn = cr3 >> PAGE_SHIFT;
    top_page = get_page_from_gfn_p2m(p2m->domain, p2m, top_gfn,
                                     &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE);
    if ( p2m_is_paging(p2mt) )
    {
        ASSERT(p2m_is_hostp2m(p2m));
        pfec[0] = PFEC_page_paged;
        if ( top_page )
            put_page(top_page);
        p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
        return gfn_x(INVALID_GFN);
    }
    if ( p2m_is_shared(p2mt) )
    {
        pfec[0] = PFEC_page_shared;
        if ( top_page )
            put_page(top_page);
        return gfn_x(INVALID_GFN);
    }
    if ( !top_page )
    {
        pfec[0] &= ~PFEC_page_present;
        goto out_tweak_pfec;
    }
    top_mfn = _mfn(page_to_mfn(top_page));

    /* Map the top-level table and call the tree-walker */
    ASSERT(mfn_valid(top_mfn));
    top_map = map_domain_page(top_mfn);
#if GUEST_PAGING_LEVELS == 3
    top_map += (cr3 & ~(PAGE_MASK | 31));
#endif
    missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map);
    unmap_domain_page(top_map);
    put_page(top_page);

    /* Interpret the answer */
    if ( missing == 0 )
    {
        gfn_t gfn = guest_walk_to_gfn(&gw);
        struct page_info *page;
        page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), &p2mt,
                                     NULL, P2M_ALLOC | P2M_UNSHARE);
        if ( page )
            put_page(page);
        if ( p2m_is_paging(p2mt) )
        {
            ASSERT(p2m_is_hostp2m(p2m));
            pfec[0] = PFEC_page_paged;
            p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
            return gfn_x(INVALID_GFN);
        }
        if ( p2m_is_shared(p2mt) )
        {
            pfec[0] = PFEC_page_shared;
            return gfn_x(INVALID_GFN);
        }

        if ( page_order )
            *page_order = guest_walk_to_page_order(&gw);

        return gfn_x(gfn);
    }

    if ( missing & _PAGE_PRESENT )
        pfec[0] &= ~PFEC_page_present;

    if ( missing & _PAGE_INVALID_BITS ) 
        pfec[0] |= PFEC_reserved_bit;

    if ( missing & _PAGE_PKEY_BITS )
        pfec[0] |= PFEC_prot_key;

    if ( missing & _PAGE_PAGED )
        pfec[0] = PFEC_page_paged;

    if ( missing & _PAGE_SHARED )
        pfec[0] = PFEC_page_shared;

 out_tweak_pfec:
    /*
     * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS:
     * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
     */
    if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
        pfec[0] &= ~PFEC_insn_fetch;

    return gfn_x(INVALID_GFN);
}
Beispiel #12
0
Datei: p2m.c Projekt: Fantu/Xen
static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
                                xenmem_access_t *access)
{
    struct p2m_domain *p2m = p2m_get_hostp2m(d);
    void *i;
    unsigned int index;

    static const xenmem_access_t memaccess[] = {
#define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
            ACCESS(n),
            ACCESS(r),
            ACCESS(w),
            ACCESS(rw),
            ACCESS(x),
            ACCESS(rx),
            ACCESS(wx),
            ACCESS(rwx),
            ACCESS(rx2rw),
            ACCESS(n2rwx),
#undef ACCESS
    };

    /* If no setting was ever set, just return rwx. */
    if ( !p2m->mem_access_enabled )
    {
        *access = XENMEM_access_rwx;
        return 0;
    }

    /* If request to get default access. */
    if ( gfn_x(gfn) == INVALID_GFN )
    {
        *access = memaccess[p2m->default_access];
        return 0;
    }

    i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));

    if ( !i )
    {
        /*
         * No setting was found in the Radix tree. Check if the
         * entry exists in the page-tables.
         */
        paddr_t maddr = p2m_lookup(d, gfn_x(gfn) << PAGE_SHIFT, NULL);
        if ( INVALID_PADDR == maddr )
            return -ESRCH;

        /* If entry exists then its rwx. */
        *access = XENMEM_access_rwx;
    }
    else
    {
        /* Setting was found in the Radix tree. */
        index = radix_tree_ptr_to_int(i);
        if ( index >= ARRAY_SIZE(memaccess) )
            return -ERANGE;

        *access = memaccess[index];
    }

    return 0;
}
Beispiel #13
0
/*
 * Set access type for a region of pfns.
 * If gfn == INVALID_GFN, sets the default access type.
 */
long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
                        uint32_t start, uint32_t mask, xenmem_access_t access,
                        unsigned int altp2m_idx)
{
    struct p2m_domain *p2m = p2m_get_hostp2m(d);
    p2m_access_t a;
    unsigned int order;
    long rc = 0;

    static const p2m_access_t memaccess[] = {
#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
        ACCESS(n),
        ACCESS(r),
        ACCESS(w),
        ACCESS(rw),
        ACCESS(x),
        ACCESS(rx),
        ACCESS(wx),
        ACCESS(rwx),
        ACCESS(rx2rw),
        ACCESS(n2rwx),
#undef ACCESS
    };

    switch ( access )
    {
    case 0 ... ARRAY_SIZE(memaccess) - 1:
        a = memaccess[access];
        break;
    case XENMEM_access_default:
        a = p2m->default_access;
        break;
    default:
        return -EINVAL;
    }

    /*
     * Flip mem_access_enabled to true when a permission is set, as to prevent
     * allocating or inserting super-pages.
     */
    p2m->mem_access_enabled = true;

    /* If request to set default access. */
    if ( gfn_eq(gfn, INVALID_GFN) )
    {
        p2m->default_access = a;
        return 0;
    }

    p2m_write_lock(p2m);

    for ( gfn = gfn_add(gfn, start); nr > start;
          gfn = gfn_next_boundary(gfn, order) )
    {
        p2m_type_t t;
        mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order);


        if ( !mfn_eq(mfn, INVALID_MFN) )
        {
            order = 0;
            rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a);
            if ( rc )
                break;
        }

        start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
        /* Check for continuation if it is not the last iteration */
        if ( nr > start && !(start & mask) && hypercall_preempt_check() )
        {
            rc = start;
            break;
        }
    }

    p2m_write_unlock(p2m);

    return rc;
}