Пример #1
0
/*
 * Look for a free intr remap entry.
 * Need hold iremap_lock, and setup returned entry before releasing lock.
 */
static int alloc_remap_entry(struct iommu *iommu)
{
    struct iremap_entry *iremap_entries = NULL;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
    int i;

    ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );

    for ( i = 0; i < IREMAP_ENTRY_NR; i++ )
    {
        struct iremap_entry *p;
        if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
        {
            /* This entry across page boundry */
            if ( iremap_entries )
                unmap_vtd_domain_page(iremap_entries);

            GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i,
                             iremap_entries, p);
        }
        else
            p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];

        if ( p->lo_val == 0 && p->hi_val == 0 ) /* a free entry */
            break;
    }

    if ( iremap_entries )
        unmap_vtd_domain_page(iremap_entries);

    if ( i < IREMAP_ENTRY_NR ) 
        ir_ctrl->iremap_num++;
    return i;
}
Пример #2
0
static int ioapic_rte_to_remap_entry(struct iommu *iommu,
                                     int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
                                     unsigned int rte_upper, unsigned int value)
{
    struct iremap_entry *iremap_entry = NULL, *iremap_entries;
    struct iremap_entry new_ire;
    struct IO_APIC_route_remap_entry *remap_rte;
    struct IO_xAPIC_route_entry new_rte;
    int index;
    unsigned long flags;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);

    remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
    spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);

    index = apic_pin_2_ir_idx[apic][ioapic_pin];
    if ( index < 0 )
    {
        index = alloc_remap_entry(iommu);
        if ( index < IREMAP_ENTRY_NR )
            apic_pin_2_ir_idx[apic][ioapic_pin] = index;
    }

    if ( index > IREMAP_ENTRY_NR - 1 )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "%s: intremap index (%d) is larger than"
                " the maximum index (%d)!\n",
                __func__, index, IREMAP_ENTRY_NR - 1);
        spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
        return -EFAULT;
    }

    GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
                     iremap_entries, iremap_entry);

    memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));

    if ( rte_upper )
    {
#if defined(__i386__) || defined(__x86_64__)
        if ( x2apic_enabled )
            new_ire.lo.dst = value;
        else
            new_ire.lo.dst = (value >> 24) << 8;
#endif
    }
    else
    {
Пример #3
0
static int remap_entry_to_ioapic_rte(
    struct iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte)
{
    struct iremap_entry *iremap_entry = NULL, *iremap_entries;
    unsigned long flags;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);

    if ( ir_ctrl == NULL )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
        return -EFAULT;
    }

    if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "%s: index (%d) for remap table is invalid !\n",
                __func__, index);
        return -EFAULT;
    }

    spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);

    GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
                     iremap_entries, iremap_entry);

    if ( iremap_entry->hi_val == 0 && iremap_entry->lo_val == 0 )
    {
        dprintk(XENLOG_ERR VTDPREFIX,
                "%s: index (%d) get an empty entry!\n",
                __func__, index);
        unmap_vtd_domain_page(iremap_entries);
        spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
        return -EFAULT;
    }

    old_rte->vector = iremap_entry->lo.vector;
    old_rte->delivery_mode = iremap_entry->lo.dlm;
    old_rte->dest_mode = iremap_entry->lo.dm;
    old_rte->trigger = iremap_entry->lo.tm;
    old_rte->__reserved_2 = 0;
    old_rte->dest.logical.__reserved_1 = 0;
    old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;

    unmap_vtd_domain_page(iremap_entries);
    spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
    return 0;
}
Пример #4
0
/* Mark specified intr remap entry as free */
static void free_remap_entry(struct iommu *iommu, int index)
{
    struct iremap_entry *iremap_entry = NULL, *iremap_entries;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);

    if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
        return;

    ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );

    GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
                     iremap_entries, iremap_entry);

    memset(iremap_entry, 0, sizeof(struct iremap_entry));
    iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
    iommu_flush_iec_index(iommu, 0, index);

    unmap_vtd_domain_page(iremap_entries);
    ir_ctrl->iremap_num--;
}
Пример #5
0
/*
 * Look for a free intr remap entry (or a contiguous set thereof).
 * Need hold iremap_lock, and setup returned entry before releasing lock.
 */
static unsigned int alloc_remap_entry(struct iommu *iommu, unsigned int nr)
{
    struct iremap_entry *iremap_entries = NULL;
    struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
    unsigned int i, found;

    ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );

    for ( found = i = 0; i < IREMAP_ENTRY_NR; i++ )
    {
        struct iremap_entry *p;
        if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
        {
            /* This entry across page boundry */
            if ( iremap_entries )
                unmap_vtd_domain_page(iremap_entries);

            GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i,
                             iremap_entries, p);
        }
        else
            p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];

        if ( p->lo_val || p->hi_val ) /* not a free entry */
            found = 0;
        else if ( ++found == nr )
            break;
    }

    if ( iremap_entries )
        unmap_vtd_domain_page(iremap_entries);

    if ( i < IREMAP_ENTRY_NR ) 
        ir_ctrl->iremap_num += nr;
    return i;
}
Пример #6
0
void vtd_dump_iommu_info(unsigned char key)
{
    struct acpi_drhd_unit *drhd;
    struct iommu *iommu;
    int i;

    for_each_drhd_unit ( drhd )
    {
        u32 status = 0;

        iommu = drhd->iommu;
        printk("\niommu %x: nr_pt_levels = %x.\n", iommu->index,
            iommu->nr_pt_levels);

        if ( ecap_queued_inval(iommu->ecap) ||  ecap_intr_remap(iommu->ecap) )
            status = dmar_readl(iommu->reg, DMAR_GSTS_REG);

        printk("  Queued Invalidation: %ssupported%s.\n",
            ecap_queued_inval(iommu->ecap) ? "" : "not ",
           (status & DMA_GSTS_QIES) ? " and enabled" : "" );


        printk("  Interrupt Remapping: %ssupported%s.\n",
            ecap_intr_remap(iommu->ecap) ? "" : "not ",
            (status & DMA_GSTS_IRES) ? " and enabled" : "" );

        printk("  Interrupt Posting: %ssupported.\n",
               cap_intr_post(iommu->cap) ? "" : "not ");

        if ( status & DMA_GSTS_IRES )
        {
            /* Dump interrupt remapping table. */
            u64 iremap_maddr = dmar_readq(iommu->reg, DMAR_IRTA_REG);
            int nr_entry = 1 << ((iremap_maddr & 0xF) + 1);
            struct iremap_entry *iremap_entries = NULL;
            int print_cnt = 0;

            printk("  Interrupt remapping table (nr_entry=%#x. "
                "Only dump P=1 entries here):\n", nr_entry);
            printk("R means remapped format, P means posted format.\n");
            printk("R:       SVT  SQ   SID  V  AVL FPD      DST DLM TM RH DM P\n");
            printk("P:       SVT  SQ   SID  V  AVL FPD              PDA  URG P\n");
            for ( i = 0; i < nr_entry; i++ )
            {
                struct iremap_entry *p;
                if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
                {
                    /* This entry across page boundry */
                    if ( iremap_entries )
                        unmap_vtd_domain_page(iremap_entries);

                    GET_IREMAP_ENTRY(iremap_maddr, i,
                                     iremap_entries, p);
                }
                else
                    p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];

                if ( !p->remap.p )
                    continue;
                if ( !p->remap.im )
                    printk("R:  %04x:  %x   %x  %04x %02x    %x   %x %08x   %x  %x  %x  %x %x\n",
                           i,
                           p->remap.svt, p->remap.sq, p->remap.sid,
                           p->remap.vector, p->remap.avail, p->remap.fpd,
                           p->remap.dst, p->remap.dlm, p->remap.tm, p->remap.rh,
                           p->remap.dm, p->remap.p);
                else
                    printk("P:  %04x:  %x   %x  %04x %02x    %x   %x %16lx    %x %x\n",
                           i,
                           p->post.svt, p->post.sq, p->post.sid, p->post.vector,
                           p->post.avail, p->post.fpd,
                           ((u64)p->post.pda_h << 32) | (p->post.pda_l << 6),
                           p->post.urg, p->post.p);

                print_cnt++;
            }
            if ( iremap_entries )
                unmap_vtd_domain_page(iremap_entries);
            if ( iommu_ir_ctrl(iommu)->iremap_num != print_cnt )
                printk("Warning: Print %d IRTE (actually have %d)!\n",
                        print_cnt, iommu_ir_ctrl(iommu)->iremap_num);

        }
    }

    /* Dump the I/O xAPIC redirection table(s). */
    if ( iommu_enabled )
    {
        int apic;
        union IO_APIC_reg_01 reg_01;
        struct IO_APIC_route_remap_entry *remap;
        struct ir_ctrl *ir_ctrl;

        for ( apic = 0; apic < nr_ioapics; apic++ )
        {
            iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid);
            ir_ctrl = iommu_ir_ctrl(iommu);
            if ( !ir_ctrl || !ir_ctrl->iremap_maddr || !ir_ctrl->iremap_num )
                continue;

            printk( "\nRedirection table of IOAPIC %x:\n", apic);

            /* IO xAPIC Version Register. */
            reg_01.raw = __io_apic_read(apic, 1);

            printk("  #entry IDX FMT MASK TRIG IRR POL STAT DELI  VECTOR\n");
            for ( i = 0; i <= reg_01.bits.entries; i++ )
            {
                struct IO_APIC_route_entry rte =
                    __ioapic_read_entry(apic, i, TRUE);

                remap = (struct IO_APIC_route_remap_entry *) &rte;
                if ( !remap->format )
                    continue;

                printk("   %02x:  %04x   %x    %x   %x   %x   %x    %x"
                    "    %x     %02x\n", i,
                    remap->index_0_14 | (remap->index_15 << 15),
                    remap->format, remap->mask, remap->trigger, remap->irr,
                    remap->polarity, remap->delivery_status, remap->delivery_mode,
                    remap->vector);
            }
        }
    }
}