Ejemplo n.º 1
0
void invalidate_all_iommu_pages(struct domain *d)
{
    u32 cmd[4], entry;
    unsigned long flags;
    struct amd_iommu *iommu;
    int domain_id = d->domain_id;
    u64 addr_lo = 0x7FFFFFFFFFFFF000ULL & DMA_32BIT_MASK;
    u64 addr_hi = 0x7FFFFFFFFFFFF000ULL >> 32;

    set_field_in_reg_u32(domain_id, 0,
                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
                         &entry);
    cmd[1] = entry;

    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
    cmd[2] = entry;

    set_field_in_reg_u32((u32)addr_hi, 0,
                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
    cmd[3] = entry;

    cmd[0] = 0;

    for_each_amd_iommu ( iommu )
    {
        spin_lock_irqsave(&iommu->lock, flags);
        send_iommu_command(iommu, cmd);
        flush_command_buffer(iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
    }
}
Ejemplo n.º 2
0
int amd_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
    u64 iommu_l2e;
    unsigned long flags;
    struct amd_iommu *iommu;
    struct hvm_iommu *hd = domain_hvm_iommu(d);

    BUG_ON( !hd->root_table );

    spin_lock_irqsave(&hd->mapping_lock, flags);

    if ( is_hvm_domain(d) && !hd->p2m_synchronized )
    {
        spin_unlock_irqrestore(&hd->mapping_lock, flags);
        return 0;
    }

    iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);

    if ( iommu_l2e == 0 )
    {
        amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
        spin_unlock_irqrestore(&hd->mapping_lock, flags);
        return -EFAULT;
    }

    /* mark PTE as 'page not present' */
    clear_iommu_l1e_present(iommu_l2e, gfn);
    spin_unlock_irqrestore(&hd->mapping_lock, flags);

    /* send INVALIDATE_IOMMU_PAGES command */
    for_each_amd_iommu ( iommu )
    {
        spin_lock_irqsave(&iommu->lock, flags);
        invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
        flush_command_buffer(iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
    }

    return 0;
}
Ejemplo n.º 3
0
		void reset_command_buffer() {
			flush_command_buffer();
			create_command_buffer();
		}