Exemple #1
0
/*
 * Look for a free intr remap entry.
 * Need hold iremap_lock, and setup returned entry before releasing lock.
 */
static int alloc_remap_entry(struct iommu *iommu)
{
    struct iremap_entry *iremap_entries = NULL;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
    int i;

    ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );

    for ( i = 0; i < IREMAP_ENTRY_NR; i++ )
    {
        struct iremap_entry *p;
        if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
        {
            /* This entry across page boundry */
            if ( iremap_entries )
                unmap_vtd_domain_page(iremap_entries);

            GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i,
                             iremap_entries, p);
        }
        else
            p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];

        if ( p->lo_val == 0 && p->hi_val == 0 ) /* a free entry */
            break;
    }

    if ( iremap_entries )
        unmap_vtd_domain_page(iremap_entries);

    if ( i < IREMAP_ENTRY_NR ) 
        ir_ctrl->iremap_num++;
    return i;
}
Exemple #2
0
static int remap_entry_to_ioapic_rte(
    struct iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte)
{
    struct iremap_entry *iremap_entry = NULL, *iremap_entries;
    unsigned long flags;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);

    if ( ir_ctrl == NULL )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
        return -EFAULT;
    }

    if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "%s: index (%d) for remap table is invalid !\n",
                __func__, index);
        return -EFAULT;
    }

    spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);

    GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
                     iremap_entries, iremap_entry);

    if ( iremap_entry->hi_val == 0 && iremap_entry->lo_val == 0 )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "%s: index (%d) get an empty entry!\n",
                __func__, index);
        unmap_vtd_domain_page(iremap_entries);
        spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
        return -EFAULT;
    }

    old_rte->vector = iremap_entry->lo.vector;
    old_rte->delivery_mode = iremap_entry->lo.dlm;
    old_rte->dest_mode = iremap_entry->lo.dm;
    old_rte->trigger = iremap_entry->lo.tm;
    old_rte->__reserved_2 = 0;
    old_rte->dest.logical.__reserved_1 = 0;
    old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;

    unmap_vtd_domain_page(iremap_entries);
    spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
    return 0;
}
Exemple #3
0
static int gen_iec_inv_dsc(struct iommu *iommu, int index,
    u8 granu, u8 im, u16 iidx)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC;
    qinval_entry->q.iec_inv_dsc.lo.granu = granu;
    qinval_entry->q.iec_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.iec_inv_dsc.lo.im = im;
    qinval_entry->q.iec_inv_dsc.lo.iidx = iidx;
    qinval_entry->q.iec_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.iec_inv_dsc.hi.res = 0;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Exemple #4
0
static int gen_cc_inv_dsc(struct iommu *iommu, int index,
    u16 did, u16 source_id, u8 function_mask, u8 granu)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT;
    qinval_entry->q.cc_inv_dsc.lo.granu = granu;
    qinval_entry->q.cc_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.cc_inv_dsc.lo.did = did;
    qinval_entry->q.cc_inv_dsc.lo.sid = source_id;
    qinval_entry->q.cc_inv_dsc.lo.fm = function_mask;
    qinval_entry->q.cc_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.cc_inv_dsc.hi.res = 0;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);

    return 0;
}
Exemple #5
0
static int gen_dev_iotlb_inv_dsc(struct iommu *iommu, int index,
    u32 max_invs_pend, u16 sid, u16 size, u64 addr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = max_invs_pend;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = sid;
    qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;

    qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
    qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0;
    qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Exemple #6
0
static int gen_wait_dsc(struct iommu *iommu, int index,
    u8 iflag, u8 sw, u8 fn, u32 sdata, volatile u32 *saddr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);
    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT;
    qinval_entry->q.inv_wait_dsc.lo.iflag = iflag;
    qinval_entry->q.inv_wait_dsc.lo.sw = sw;
    qinval_entry->q.inv_wait_dsc.lo.fn = fn;
    qinval_entry->q.inv_wait_dsc.lo.res_1 = 0;
    qinval_entry->q.inv_wait_dsc.lo.sdata = sdata;
    qinval_entry->q.inv_wait_dsc.hi.res_1 = 0;
    qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(saddr) >> 2;
    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Exemple #7
0
static int gen_iotlb_inv_dsc(struct iommu *iommu, int index,
    u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
{
    unsigned long flags;
    struct qinval_entry *qinval_entry = NULL, *qinval_entries;
    struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu);

    if ( index == -1 )
        return -1;
    spin_lock_irqsave(&qi_ctrl->qinval_lock, flags);

    qinval_entries =
        (struct qinval_entry *)map_vtd_domain_page(qi_ctrl->qinval_maddr);
    qinval_entry = &qinval_entries[index];
    qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB;
    qinval_entry->q.iotlb_inv_dsc.lo.granu = granu;
    qinval_entry->q.iotlb_inv_dsc.lo.dr = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.dw = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0;
    qinval_entry->q.iotlb_inv_dsc.lo.did = did;
    qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0;

    qinval_entry->q.iotlb_inv_dsc.hi.am = am;
    qinval_entry->q.iotlb_inv_dsc.hi.ih = ih;
    qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0;
    qinval_entry->q.iotlb_inv_dsc.hi.addr = addr;

    unmap_vtd_domain_page(qinval_entries);
    spin_unlock_irqrestore(&qi_ctrl->qinval_lock, flags);
    return 0;
}
Exemple #8
0
/*
 * Look for a free intr remap entry (or a contiguous set thereof).
 * Need hold iremap_lock, and setup returned entry before releasing lock.
 */
static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr)
{
    struct iremap_entry *iremap_entries = NULL;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
    unsigned int i, found;

    ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );

    for ( found = i = 0; i < IREMAP_ENTRY_NR; i++ )
    {
        struct iremap_entry *p;
        if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
        {
            /* This entry across page boundry */
            if ( iremap_entries )
                unmap_vtd_domain_page(iremap_entries);

            GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i,
                             iremap_entries, p);
        }
        else
            p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];

        if ( p->lo_val || p->hi_val ) /* not a free entry */
            found = 0;
        else if ( ++found == nr )
            break;
    }

    if ( iremap_entries )
        unmap_vtd_domain_page(iremap_entries);

    if ( i < IREMAP_ENTRY_NR ) 
        ir_ctrl->iremap_num += nr;
    return i;
}
Exemple #9
0
/* Mark specified intr remap entry as free */
static void free_remap_entry(struct iommu *iommu, int index)
{
    struct iremap_entry *iremap_entry = NULL, *iremap_entries;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);

    if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
        return;

    ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );

    GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
                     iremap_entries, iremap_entry);

    memset(iremap_entry, 0, sizeof(struct iremap_entry));
    iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
    iommu_flush_iec_index(iommu, 0, index);

    unmap_vtd_domain_page(iremap_entries);
    ir_ctrl->iremap_num--;
}
Exemple #10
0
void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn)
{
    struct context_entry *ctxt_entry;
    struct root_entry *root_entry;
    struct dma_pte pte;
    u64 *l, val;
    u32 l_index, level;

    printk("print_vtd_entries: iommu %p dev %04x:%02x:%02x.%u gmfn %"PRIx64"\n",
           iommu, iommu->intel->drhd->segment, bus,
           PCI_SLOT(devfn), PCI_FUNC(devfn), gmfn);

    if ( iommu->root_maddr == 0 )
    {
        printk("    iommu->root_maddr = 0\n");
        return;
    }

    root_entry = (struct root_entry *)map_vtd_domain_page(iommu->root_maddr);
    if ( root_entry == NULL )
    {
        printk("    root_entry == NULL\n");
        return;
    }

    printk("    root_entry = %p\n", root_entry);
    printk("    root_entry[%x] = %"PRIx64"\n", bus, root_entry[bus].val);
    if ( !root_present(root_entry[bus]) )
    {
        unmap_vtd_domain_page(root_entry);
        printk("    root_entry[%x] not present\n", bus);
        return;
    }

    val = root_entry[bus].val;
    unmap_vtd_domain_page(root_entry);
    ctxt_entry = map_vtd_domain_page(val);
    if ( ctxt_entry == NULL )
    {
        printk("    ctxt_entry == NULL\n");
        return;
    }

    printk("    context = %p\n", ctxt_entry);
    val = ctxt_entry[devfn].lo;
    printk("    context[%x] = %"PRIx64"_%"PRIx64"\n",
           devfn, ctxt_entry[devfn].hi, val);
    if ( !context_present(ctxt_entry[devfn]) )
    {
        unmap_vtd_domain_page(ctxt_entry);
        printk("    ctxt_entry[%x] not present\n", devfn);
        return;
    }

    level = agaw_to_level(context_address_width(ctxt_entry[devfn]));
    unmap_vtd_domain_page(ctxt_entry);
    if ( level != VTD_PAGE_TABLE_LEVEL_3 &&
         level != VTD_PAGE_TABLE_LEVEL_4)
    {
        printk("Unsupported VTD page table level (%d)!\n", level);
        return;
    }

    do
    {
        l = map_vtd_domain_page(val);
        printk("    l%d = %p\n", level, l);
        if ( l == NULL )
        {
            printk("    l%d == NULL\n", level);
            break;
        }
        l_index = get_level_index(gmfn, level);
        printk("    l%d_index = %x\n", level, l_index);

        pte.val = l[l_index];
        unmap_vtd_domain_page(l);
        printk("    l%d[%x] = %"PRIx64"\n", level, l_index, pte.val);

        if ( !dma_pte_present(pte) )
        {
            printk("    l%d[%x] not present\n", level, l_index);
            break;
        }
        if ( dma_pte_superpage(pte) )
            break;
        val = dma_pte_addr(pte);
    } while ( --level );
}
Exemple #11
0
void vtd_dump_iommu_info(unsigned char key)
{
    struct acpi_drhd_unit *drhd;
    struct iommu *iommu;
    int i;

    for_each_drhd_unit ( drhd )
    {
        u32 status = 0;

        iommu = drhd->iommu;
        printk("\niommu %x: nr_pt_levels = %x.\n", iommu->index,
            iommu->nr_pt_levels);

        if ( ecap_queued_inval(iommu->ecap) ||  ecap_intr_remap(iommu->ecap) )
            status = dmar_readl(iommu->reg, DMAR_GSTS_REG);

        printk("  Queued Invalidation: %ssupported%s.\n",
            ecap_queued_inval(iommu->ecap) ? "" : "not ",
           (status & DMA_GSTS_QIES) ? " and enabled" : "" );


        printk("  Interrupt Remapping: %ssupported%s.\n",
            ecap_intr_remap(iommu->ecap) ? "" : "not ",
            (status & DMA_GSTS_IRES) ? " and enabled" : "" );

        printk("  Interrupt Posting: %ssupported.\n",
               cap_intr_post(iommu->cap) ? "" : "not ");

        if ( status & DMA_GSTS_IRES )
        {
            /* Dump interrupt remapping table. */
            u64 iremap_maddr = dmar_readq(iommu->reg, DMAR_IRTA_REG);
            int nr_entry = 1 << ((iremap_maddr & 0xF) + 1);
            struct iremap_entry *iremap_entries = NULL;
            int print_cnt = 0;

            printk("  Interrupt remapping table (nr_entry=%#x. "
                "Only dump P=1 entries here):\n", nr_entry);
            printk("R means remapped format, P means posted format.\n");
            printk("R:       SVT  SQ   SID  V  AVL FPD      DST DLM TM RH DM P\n");
            printk("P:       SVT  SQ   SID  V  AVL FPD              PDA  URG P\n");
            for ( i = 0; i < nr_entry; i++ )
            {
                struct iremap_entry *p;
                if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
                {
                    /* This entry across page boundry */
                    if ( iremap_entries )
                        unmap_vtd_domain_page(iremap_entries);

                    GET_IREMAP_ENTRY(iremap_maddr, i,
                                     iremap_entries, p);
                }
                else
                    p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];

                if ( !p->remap.p )
                    continue;
                if ( !p->remap.im )
                    printk("R:  %04x:  %x   %x  %04x %02x    %x   %x %08x   %x  %x  %x  %x %x\n",
                           i,
                           p->remap.svt, p->remap.sq, p->remap.sid,
                           p->remap.vector, p->remap.avail, p->remap.fpd,
                           p->remap.dst, p->remap.dlm, p->remap.tm, p->remap.rh,
                           p->remap.dm, p->remap.p);
                else
                    printk("P:  %04x:  %x   %x  %04x %02x    %x   %x %16lx    %x %x\n",
                           i,
                           p->post.svt, p->post.sq, p->post.sid, p->post.vector,
                           p->post.avail, p->post.fpd,
                           ((u64)p->post.pda_h << 32) | (p->post.pda_l << 6),
                           p->post.urg, p->post.p);

                print_cnt++;
            }
            if ( iremap_entries )
                unmap_vtd_domain_page(iremap_entries);
            if ( iommu_ir_ctrl(iommu)->iremap_num != print_cnt )
                printk("Warning: Print %d IRTE (actually have %d)!\n",
                        print_cnt, iommu_ir_ctrl(iommu)->iremap_num);

        }
    }

    /* Dump the I/O xAPIC redirection table(s). */
    if ( iommu_enabled )
    {
        int apic;
        union IO_APIC_reg_01 reg_01;
        struct IO_APIC_route_remap_entry *remap;
        struct ir_ctrl *ir_ctrl;

        for ( apic = 0; apic < nr_ioapics; apic++ )
        {
            iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
            ir_ctrl = iommu_ir_ctrl(iommu);
            if ( !ir_ctrl || !ir_ctrl->iremap_maddr || !ir_ctrl->iremap_num )
                continue;

            printk( "\nRedirection table of IOAPIC %x:\n", apic);

            /* IO xAPIC Version Register. */
            reg_01.raw = __io_apic_read(apic, 1);

            printk("  #entry IDX FMT MASK TRIG IRR POL STAT DELI  VECTOR\n");
            for ( i = 0; i <= reg_01.bits.entries; i++ )
            {
                struct IO_APIC_route_entry rte =
                    __ioapic_read_entry(apic, i, TRUE);

                remap = (struct IO_APIC_route_remap_entry *) &rte;
                if ( !remap->format )
                    continue;

                printk("   %02x:  %04x   %x    %x   %x   %x   %x    %x"
                    "    %x     %02x\n", i,
                    remap->index_0_14 | (remap->index_15 << 15),
                    remap->format, remap->mask, remap->trigger, remap->irr,
                    remap->polarity, remap->delivery_status, remap->delivery_mode,
                    remap->vector);
            }
        }
    }
}