static void print_qi_regs(struct iommu *iommu) { u64 val; val = dmar_readq(iommu->reg, DMAR_IQA_REG); printk("DMAR_IQA_REG = %"PRIx64"\n", val); val = dmar_readq(iommu->reg, DMAR_IQH_REG); printk("DMAR_IQH_REG = %"PRIx64"\n", val); val = dmar_readq(iommu->reg, DMAR_IQT_REG); printk("DMAR_IQT_REG = %"PRIx64"\n", val); }
/* * Disable Interrupt Remapping. */ static void iommu_disable_intr_remapping(struct intel_iommu *iommu) { unsigned long flags; u32 sts; if (!ecap_ir_support(iommu->ecap)) return; /* * global invalidation of interrupt entry cache before disabling * interrupt-remapping. */ qi_global_iec(iommu); spin_lock_irqsave(&iommu->register_lock, flags); sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); if (!(sts & DMA_GSTS_IRES)) goto end; iommu->gcmd &= ~DMA_GCMD_IRE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, !(sts & DMA_GSTS_IRES), sts); end: spin_unlock_irqrestore(&iommu->register_lock, flags); }
void print_iommu_regs(struct acpi_drhd_unit *drhd) { struct iommu *iommu = drhd->iommu; u64 cap; printk("---- print_iommu_regs ----\n"); printk(" drhd->address = %"PRIx64"\n", drhd->address); printk(" VER = %x\n", dmar_readl(iommu->reg, DMAR_VER_REG)); printk(" CAP = %"PRIx64"\n", cap = dmar_readq(iommu->reg, DMAR_CAP_REG)); printk(" n_fault_reg = %"PRIx64"\n", cap_num_fault_regs(cap)); printk(" fault_recording_offset = %"PRIx64"\n", cap_fault_reg_offset(cap)); if ( cap_fault_reg_offset(cap) < PAGE_SIZE ) { printk(" fault_recording_reg_l = %"PRIx64"\n", dmar_readq(iommu->reg, cap_fault_reg_offset(cap))); printk(" fault_recording_reg_h = %"PRIx64"\n", dmar_readq(iommu->reg, cap_fault_reg_offset(cap) + 8)); } printk(" ECAP = %"PRIx64"\n", dmar_readq(iommu->reg, DMAR_ECAP_REG)); printk(" GCMD = %x\n", dmar_readl(iommu->reg, DMAR_GCMD_REG)); printk(" GSTS = %x\n", dmar_readl(iommu->reg, DMAR_GSTS_REG)); printk(" RTADDR = %"PRIx64"\n", dmar_readq(iommu->reg,DMAR_RTADDR_REG)); printk(" CCMD = %"PRIx64"\n", dmar_readq(iommu->reg, DMAR_CCMD_REG)); printk(" FSTS = %x\n", dmar_readl(iommu->reg, DMAR_FSTS_REG)); printk(" FECTL = %x\n", dmar_readl(iommu->reg, DMAR_FECTL_REG)); printk(" FEDATA = %x\n", dmar_readl(iommu->reg, DMAR_FEDATA_REG)); printk(" FEADDR = %x\n", dmar_readl(iommu->reg, DMAR_FEADDR_REG)); printk(" FEUADDR = %x\n", dmar_readl(iommu->reg, DMAR_FEUADDR_REG)); }
int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx) { int ret; ret = queue_invalidate_iec(iommu, granu, im, iidx); ret |= invalidate_sync(iommu); /* * reading vt-d architecture register will ensure * draining happens in implementation independent way. */ (void)dmar_readq(iommu->reg, DMAR_CAP_REG); return ret; }
static int iommu_load_old_irte(struct intel_iommu *iommu) { struct irte *old_ir_table; phys_addr_t irt_phys; unsigned int i; size_t size; u64 irta; if (!is_kdump_kernel()) { pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", iommu->name); clear_ir_pre_enabled(iommu); iommu_disable_irq_remapping(iommu); return -EINVAL; } /* Check whether the old ir-table has the same size as ours */ irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK) != INTR_REMAP_TABLE_REG_SIZE) return -EINVAL; irt_phys = irta & VTD_PAGE_MASK; size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte); /* Map the old IR table */ old_ir_table = memremap(irt_phys, size, MEMREMAP_WB); if (!old_ir_table) return -ENOMEM; /* Copy data over */ memcpy(iommu->ir_table->base, old_ir_table, size); __iommu_flush_cache(iommu, iommu->ir_table->base, size); /* * Now check the table for used entries and mark those as * allocated in the bitmap */ for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) { if (iommu->ir_table->base[i].present) bitmap_set(iommu->ir_table->bitmap, i, 1); } memunmap(old_ir_table); return 0; }
/* * Disable Interrupt Remapping. */ static void disable_intr_remapping(struct intel_iommu *iommu) { unsigned long flags; u32 sts; if (!ecap_ir_support(iommu->ecap)) return; spin_lock_irqsave(&iommu->register_lock, flags); sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); if (!(sts & DMA_GSTS_IRES)) goto end; iommu->gcmd &= ~DMA_GCMD_IRE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, !(sts & DMA_GSTS_IRES), sts); end: spin_unlock_irqrestore(&iommu->register_lock, flags); }
static int qinval_next_index(struct iommu *iommu) { u64 val; val = dmar_readq(iommu->reg, DMAR_IQT_REG); return (val >> 4); }
void vtd_dump_iommu_info(unsigned char key) { struct acpi_drhd_unit *drhd; struct iommu *iommu; int i; for_each_drhd_unit ( drhd ) { u32 status = 0; iommu = drhd->iommu; printk("\niommu %x: nr_pt_levels = %x.\n", iommu->index, iommu->nr_pt_levels); if ( ecap_queued_inval(iommu->ecap) || ecap_intr_remap(iommu->ecap) ) status = dmar_readl(iommu->reg, DMAR_GSTS_REG); printk(" Queued Invalidation: %ssupported%s.\n", ecap_queued_inval(iommu->ecap) ? "" : "not ", (status & DMA_GSTS_QIES) ? " and enabled" : "" ); printk(" Interrupt Remapping: %ssupported%s.\n", ecap_intr_remap(iommu->ecap) ? "" : "not ", (status & DMA_GSTS_IRES) ? " and enabled" : "" ); printk(" Interrupt Posting: %ssupported.\n", cap_intr_post(iommu->cap) ? "" : "not "); if ( status & DMA_GSTS_IRES ) { /* Dump interrupt remapping table. */ u64 iremap_maddr = dmar_readq(iommu->reg, DMAR_IRTA_REG); int nr_entry = 1 << ((iremap_maddr & 0xF) + 1); struct iremap_entry *iremap_entries = NULL; int print_cnt = 0; printk(" Interrupt remapping table (nr_entry=%#x. " "Only dump P=1 entries here):\n", nr_entry); printk("R means remapped format, P means posted format.\n"); printk("R: SVT SQ SID V AVL FPD DST DLM TM RH DM P\n"); printk("P: SVT SQ SID V AVL FPD PDA URG P\n"); for ( i = 0; i < nr_entry; i++ ) { struct iremap_entry *p; if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 ) { /* This entry across page boundry */ if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); GET_IREMAP_ENTRY(iremap_maddr, i, iremap_entries, p); } else p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)]; if ( !p->remap.p ) continue; if ( !p->remap.im ) printk("R: %04x: %x %x %04x %02x %x %x %08x %x %x %x %x %x\n", i, p->remap.svt, p->remap.sq, p->remap.sid, p->remap.vector, p->remap.avail, p->remap.fpd, p->remap.dst, p->remap.dlm, p->remap.tm, p->remap.rh, p->remap.dm, p->remap.p); else printk("P: %04x: %x %x %04x %02x %x %x %16lx %x %x\n", i, p->post.svt, p->post.sq, p->post.sid, p->post.vector, p->post.avail, p->post.fpd, ((u64)p->post.pda_h << 32) | (p->post.pda_l << 6), p->post.urg, p->post.p); print_cnt++; } if ( iremap_entries ) unmap_vtd_domain_page(iremap_entries); if ( iommu_ir_ctrl(iommu)->iremap_num != print_cnt ) printk("Warning: Print %d IRTE (actually have %d)!\n", print_cnt, iommu_ir_ctrl(iommu)->iremap_num); } } /* Dump the I/O xAPIC redirection table(s). */ if ( iommu_enabled ) { int apic; union IO_APIC_reg_01 reg_01; struct IO_APIC_route_remap_entry *remap; struct ir_ctrl *ir_ctrl; for ( apic = 0; apic < nr_ioapics; apic++ ) { iommu = ioapic_to_iommu(mp_ioapics[apic].mpc_apicid); ir_ctrl = iommu_ir_ctrl(iommu); if ( !ir_ctrl || !ir_ctrl->iremap_maddr || !ir_ctrl->iremap_num ) continue; printk( "\nRedirection table of IOAPIC %x:\n", apic); /* IO xAPIC Version Register. */ reg_01.raw = __io_apic_read(apic, 1); printk(" #entry IDX FMT MASK TRIG IRR POL STAT DELI VECTOR\n"); for ( i = 0; i <= reg_01.bits.entries; i++ ) { struct IO_APIC_route_entry rte = __ioapic_read_entry(apic, i, TRUE); remap = (struct IO_APIC_route_remap_entry *) &rte; if ( !remap->format ) continue; printk(" %02x: %04x %x %x %x %x %x %x" " %x %02x\n", i, remap->index_0_14 | (remap->index_15 << 15), remap->format, remap->mask, remap->trigger, remap->irr, remap->polarity, remap->delivery_status, remap->delivery_mode, remap->vector); } } } }