Esempio n. 1
0
File: vmce.c Progetto: avsm/xen-1
/*
 * Currently all CPUs are redenzevous at the MCE softirq handler, no
 * need to consider paging p2m type
 * Currently only support HVM guest with EPT paging mode
 * XXX following situation missed:
 * PoD, Foreign mapped, Granted, Shared
 */
int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
{
    mfn_t r_mfn;
    p2m_type_t pt;
    int rc;

    /* Always trust dom0's MCE handler will prevent future access */
    if ( d == dom0 )
        return 0;

    if (!mfn_valid(mfn_x(mfn)))
        return -EINVAL;

    if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
        return -ENOSYS;

    rc = -1;
    r_mfn = get_gfn_query(d, gfn, &pt);
    if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
    {
        ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
        p2m_change_type(d, gfn, pt, p2m_ram_broken);
        rc = 0;
    }
    put_gfn(d, gfn);

    return rc;
}
Esempio n. 2
0
File: vmce.c Progetto: avsm/xen-1
/*
 * for Intel MCE, broadcast vMCE to all vcpus
 * for AMD MCE, only inject vMCE to vcpu0
 *
 * @ d, domain to which would inject vmce
 * @ vcpu,
 *   -1 (VMCE_INJECT_BROADCAST), broadcast vMCE to all vcpus
 *   >= 0, vcpu, the vMCE is injected to
 */
int inject_vmce(struct domain *d, int vcpu)
{
    struct vcpu *v;
    int ret = -ESRCH;

    for_each_vcpu ( d, v )
    {
        if ( vcpu != VMCE_INJECT_BROADCAST && vcpu != v->vcpu_id )
            continue;

        if ( (has_hvm_container_domain(d) ||
              guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) &&
             !test_and_set_bool(v->mce_pending) )
        {
            mce_printk(MCE_VERBOSE, "MCE: inject vMCE to d%d:v%d\n",
                       d->domain_id, v->vcpu_id);
            vcpu_kick(v);
            ret = 0;
        }
        else
        {
            mce_printk(MCE_QUIET, "Failed to inject vMCE to d%d:v%d\n",
                       d->domain_id, v->vcpu_id);
            ret = -EBUSY;
            break;
        }

        if ( vcpu != VMCE_INJECT_BROADCAST )
            break;
    }

    return ret;
}
Esempio n. 3
0
int arch_iommu_populate_page_table(struct domain *d)
{
    const struct domain_iommu *hd = dom_iommu(d);
    struct page_info *page;
    int rc = 0, n = 0;

    d->need_iommu = -1;

    this_cpu(iommu_dont_flush_iotlb) = 1;
    spin_lock(&d->page_alloc_lock);

    if ( unlikely(d->is_dying) )
        rc = -ESRCH;

    while ( !rc && (page = page_list_remove_head(&d->page_list)) )
    {
        if ( has_hvm_container_domain(d) ||
            (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
        {
            unsigned long mfn = page_to_mfn(page);
            unsigned long gfn = mfn_to_gmfn(d, mfn);

            if ( gfn != gfn_x(INVALID_GFN) )
            {
                ASSERT(!(gfn >> DEFAULT_DOMAIN_ADDRESS_WIDTH));
                BUG_ON(SHARED_M2P(gfn));
                rc = hd->platform_ops->map_page(d, gfn, mfn,
                                                IOMMUF_readable |
                                                IOMMUF_writable);
            }
            if ( rc )
            {
                page_list_add(page, &d->page_list);
                break;
            }
        }
Esempio n. 4
0
/* Returns: number of bytes remaining to be copied */
unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
                              void * __user buf, unsigned int len,
                              bool_t toaddr, uint64_t pgd3)
{
    while ( len > 0 )
    {
        char *va;
        unsigned long addr = (unsigned long)gaddr;
        mfn_t mfn;
        gfn_t gfn = INVALID_GFN;
        unsigned long pagecnt;

        pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);

        mfn = (has_hvm_container_domain(dp)
               ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
               : dbg_pv_va2mfn(addr, dp, pgd3));
        if ( mfn_eq(mfn, INVALID_MFN) )
            break;

        va = map_domain_page(mfn);
        va = va + (addr & (PAGE_SIZE-1));

        if ( toaddr )
        {
            copy_from_user(va, buf, pagecnt);    /* va = buf */
            paging_mark_dirty(dp, mfn_x(mfn));
        }
        else
        {