Beispiel #1
0
int amd_iommu_reserve_domain_unity_map(
    struct domain *domain,
    unsigned long phys_addr,
    unsigned long size, int iw, int ir)
{
    u64 iommu_l2e;
    unsigned long flags, npages, i;
    struct hvm_iommu *hd = domain_hvm_iommu(domain);

    npages = region_to_pages(phys_addr, size);

    spin_lock_irqsave(&hd->mapping_lock, flags);
    for ( i = 0; i < npages; ++i )
    {
        iommu_l2e = iommu_l2e_from_pfn(
            hd->root_table, hd->paging_mode, phys_addr >> PAGE_SHIFT);

        if ( iommu_l2e == 0 )
        {
            amd_iov_error(
            "Invalid IO pagetable entry phys_addr = %lx\n", phys_addr);
            spin_unlock_irqrestore(&hd->mapping_lock, flags);
            return -EFAULT;
        }

        set_iommu_l1e_present(iommu_l2e,
            (phys_addr >> PAGE_SHIFT), phys_addr, iw, ir);

        phys_addr += PAGE_SIZE;
    }
    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    return 0;
}
Beispiel #2
0
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
{
    u64 iommu_l2e;
    unsigned long flags;
    struct hvm_iommu *hd = domain_hvm_iommu(d);
    int iw = IOMMU_IO_WRITE_ENABLED;
    int ir = IOMMU_IO_READ_ENABLED;

    BUG_ON( !hd->root_table );

    spin_lock_irqsave(&hd->mapping_lock, flags);

    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
        goto out;

    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
    if ( iommu_l2e == 0 )
    {
        amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
        spin_unlock_irqrestore(&hd->mapping_lock, flags);
        return -EFAULT;
    }
    set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);

out:
    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    return 0;
}
Beispiel #3
0
void vmx_do_resume(struct vcpu *v)
{
    bool_t debug_state;

    if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
    {
        if ( v->arch.hvm_vmx.vmcs != this_cpu(current_vmcs) )
            vmx_load_vmcs(v);
    }
    else
    {
        /*
         * For pass-through domain, guest PCI-E device driver may leverage the
         * "Non-Snoop" I/O, and explicitly WBINVD or CLFLUSH to a RAM space.
         * Since migration may occur before WBINVD or CLFLUSH, we need to
         * maintain data consistency either by:
         *  1: flushing cache (wbinvd) when the guest is scheduled out if
         *     there is no wbinvd exit, or
         *  2: execute wbinvd on all dirty pCPUs when guest wbinvd exits.
         */
        if ( !list_empty(&(domain_hvm_iommu(v->domain)->pdev_list)) &&
             !cpu_has_wbinvd_exiting )
        {
            int cpu = v->arch.hvm_vmx.active_cpu;
            if ( cpu != -1 )
                on_selected_cpus(cpumask_of_cpu(cpu), wbinvd_ipi, NULL, 1, 1);
        }

        vmx_clear_vmcs(v);
        vmx_load_vmcs(v);
        hvm_migrate_timers(v);
        vmx_set_host_env(v);
    }

    debug_state = v->domain->debugger_attached;
    if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
    {
        unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
        unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
        v->arch.hvm_vcpu.debug_state_latch = debug_state;
        if ( debug_state )
            intercepts |= mask;
        else
            intercepts &= ~mask;
        __vmwrite(EXCEPTION_BITMAP, intercepts);
    }

    hvm_do_resume(v);
    reset_stack_and_jump(vmx_asm_do_vmentry);
}
Beispiel #4
0
int amd_iommu_sync_p2m(struct domain *d)
{
    unsigned long mfn, gfn, flags;
    u64 iommu_l2e;
    struct list_head *entry;
    struct page_info *page;
    struct hvm_iommu *hd;
    int iw = IOMMU_IO_WRITE_ENABLED;
    int ir = IOMMU_IO_READ_ENABLED;

    if ( !is_hvm_domain(d) )
        return 0;

    hd = domain_hvm_iommu(d);

    spin_lock_irqsave(&hd->mapping_lock, flags);

    if ( hd->p2m_synchronized )
        goto out;

    for ( entry = d->page_list.next; entry != &d->page_list;
            entry = entry->next )
    {
        page = list_entry(entry, struct page_info, list);
        mfn = page_to_mfn(page);
        gfn = get_gpfn_from_mfn(mfn);

        if ( gfn == INVALID_M2P_ENTRY )
            continue;

        iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);

        if ( iommu_l2e == 0 )
        {
            amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
            spin_unlock_irqrestore(&hd->mapping_lock, flags);
            return -EFAULT;
        }

        set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
    }

    hd->p2m_synchronized = 1;

out:
    spin_unlock_irqrestore(&hd->mapping_lock, flags);
    return 0;
}
Beispiel #5
0
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
    u64 iommu_l2e;
    unsigned long flags;
    struct amd_iommu *iommu;
    struct hvm_iommu *hd = domain_hvm_iommu(d);

    BUG_ON( !hd->root_table );

    spin_lock_irqsave(&hd->mapping_lock, flags);

    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
    {
        spin_unlock_irqrestore(&hd->mapping_lock, flags);
        return 0;
    }

    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);

    if ( iommu_l2e == 0 )
    {
        amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
        spin_unlock_irqrestore(&hd->mapping_lock, flags);
        return -EFAULT;
    }

    /* mark PTE as 'page not present' */
    clear_iommu_l1e_present(iommu_l2e, gfn);
    spin_unlock_irqrestore(&hd->mapping_lock, flags);

    /* send INVALIDATE_IOMMU_PAGES command */
    for_each_amd_iommu ( iommu )
    {
        spin_lock_irqsave(&iommu->lock, flags);
        invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
        flush_command_buffer(iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
    }

    return 0;
}