Example #1
0
static int gen_cc_inv_dsc(struct iommu *iommu, int index,
    u16 did, u16 source_id, u8 function_mask, u8 granu)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
    qinval_entry->q.cc_inv_dsc.lo.granu = granu;
    qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.cc_inv_dsc.lo.did = did;
    qinval_entry->q.cc_inv_dsc.lo.sid = source_id;
    qinval_entry->q.cc_inv_dsc.lo.fm = function_mask;
    qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.cc_inv_dsc.hi.res = 0;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);

    return 0;
}
Example #2
0
static int gen_iec_inv_dsc(struct iommu *iommu, int index,
    u8 granu, u8 im, u16 iidx)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
    qinval_entry->q.iec_inv_dsc.lo.granu = granu;
    qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.iec_inv_dsc.lo.im = im;
    qinval_entry->q.iec_inv_dsc.lo.iidx = iidx;
    qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.iec_inv_dsc.hi.res = 0;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Example #3
0
static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, int index,
    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = sid;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;

    qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
    qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Example #4
0
static int gen_wait_dsc(struct iommu *iommu, int index,
    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
    qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
    qinval_entry->q.inv_wait_dsc.lo.sw = sw;
    qinval_entry->q.inv_wait_dsc.lo.fn = fn;
    qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
    qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
    qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
    qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Example #5
0
static int gen_iotlb_inv_dsc(struct iommu *iommu, int index,
    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
    qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
    qinval_entry->q.iotlb_inv_dsc.lo.dr = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.dw = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.did = did;
    qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0;

    qinval_entry->q.iotlb_inv_dsc.hi.am = am;
    qinval_entry->q.iotlb_inv_dsc.hi.ih = ih;
    qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
    qinval_entry->q.iotlb_inv_dsc.hi.addr = addr;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Example #6
0
File: utils.c Project: 0day-ci/xen
void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn)
{
    struct context_entry *ctxt_entry;
    struct root_entry *root_entry;
    struct dma_pte pte;
    u64 *l, val;
    u32 l_index, level;

    printk("print_vtd_entries: iommu %p dev %04x:%02x:%02x.%u gmfn %"PRIx64"\n",
           iommu, iommu->intel->drhd->segment, bus,
           PCI_SLOT(devfn), PCI_FUNC(devfn), gmfn);

    if ( iommu->root_maddr == 0 )
    {
        printk("    iommu->root_maddr = 0\n");
        return;
    }

    root_entry = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr);
    if ( root_entry == NULL )
    {
        printk("    root_entry == NULL\n");
        return;
    }

    printk("    root_entry = %p\n", root_entry);
    printk("    root_entry[%x] = %"PRIx64"\n", bus, root_entry[bus].val);
    if ( !root_present(root_entry[bus]) )
    {
        unmap_vtd_domain_page(root_entry);
        printk("    root_entry[%x] not present\n", bus);
        return;
    }

    val = root_entry[bus].val;
    unmap_vtd_domain_page(root_entry);
    ctxt_entry = map_vtd_domain_page(val);
    if ( ctxt_entry == NULL )
    {
        printk("    ctxt_entry == NULL\n");
        return;
    }

    printk("    context = %p\n", ctxt_entry);
    val = ctxt_entry[devfn].lo;
    printk("    context[%x] = %"PRIx64"_%"PRIx64"\n",
           devfn, ctxt_entry[devfn].hi, val);
    if ( !context_present(ctxt_entry[devfn]) )
    {
        unmap_vtd_domain_page(ctxt_entry);
        printk("    ctxt_entry[%x] not present\n", devfn);
        return;
    }

    level = agaw_to_level(context_address_width(ctxt_entry[devfn]));
    unmap_vtd_domain_page(ctxt_entry);
    if ( level != VTD_PAGE_TABLE_LEVEL_3 &&
         level != VTD_PAGE_TABLE_LEVEL_4)
    {
        printk("Unsupported VTD page table level (%d)!\n", level);
        return;
    }

    do
    {
        l = map_vtd_domain_page(val);
        printk("    l%d = %p\n", level, l);
        if ( l == NULL )
        {
            printk("    l%d == NULL\n", level);
            break;
        }
        l_index = get_level_index(gmfn, level);
        printk("    l%d_index = %x\n", level, l_index);

        pte.val = l[l_index];
        unmap_vtd_domain_page(l);
        printk("    l%d[%x] = %"PRIx64"\n", level, l_index, pte.val);

        if ( !dma_pte_present(pte) )
        {
            printk("    l%d[%x] not present\n", level, l_index);
            break;
        }
        if ( dma_pte_superpage(pte) )
            break;
        val = dma_pte_addr(pte);
    } while ( --level );
}