int intel_svm_finish_prq(struct intel_iommu *iommu) { dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); free_irq(iommu->pr_irq, iommu); dmar_free_hwirq(iommu->pr_irq); iommu->pr_irq = 0; free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; return 0; }
static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) { unsigned long flags; u64 addr; u32 sts; addr = virt_to_phys((void *)iommu->ir_table->base); raw_spin_lock_irqsave(&iommu->register_lock, flags); dmar_writeq(iommu->reg + DMAR_IRTA_REG, (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); /* Set interrupt-remapping table pointer */ writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRTPS), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); /* * Global invalidation of interrupt entry cache to make sure the * hardware uses the new irq remapping table. */ qi_global_iec(iommu); }
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) { u64 addr; u32 sts; unsigned long flags; addr = virt_to_phys((void *)iommu->ir_table->base); raw_spin_lock_irqsave(&iommu->register_lock, flags); dmar_writeq(iommu->reg + DMAR_IRTA_REG, (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); iommu->gcmd |= DMA_GCMD_SIRTP; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRTPS), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); qi_global_iec(iommu); raw_spin_lock_irqsave(&iommu->register_lock, flags); iommu->gcmd |= DMA_GCMD_IRE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); }
static int qinval_update_qtail(struct iommu *iommu, int index) { u64 val; /* Need an ASSERT to insure that we have got register lock */ val = (index < (QINVAL_ENTRY_NR-1)) ? (index + 1) : 0; dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << 4)); return 0; }
int intel_svm_enable_prq(struct intel_iommu *iommu) { struct page *pages; int irq, ret; pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); if (!pages) { pr_warn("IOMMU: %s: Failed to allocate page request queue\n", iommu->name); return -ENOMEM; } iommu->prq = page_address(pages); irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu); if (irq <= 0) { pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", iommu->name); ret = -EINVAL; err: free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; return ret; } iommu->pr_irq = irq; snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT, iommu->prq_name, iommu); if (ret) { pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); iommu->pr_irq = 0; goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); return 0; }
static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) { u64 addr; u32 sts; unsigned long flags; addr = virt_to_phys((void *)iommu->ir_table->base); raw_spin_lock_irqsave(&iommu->register_lock, flags); dmar_writeq(iommu->reg + DMAR_IRTA_REG, (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); /* Set interrupt-remapping table pointer */ iommu->gcmd |= DMA_GCMD_SIRTP; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRTPS), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); /* * global invalidation of interrupt entry cache before enabling * interrupt-remapping. */ qi_global_iec(iommu); raw_spin_lock_irqsave(&iommu->register_lock, flags); /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts * regardless of x2apic status. Check just to be sure. */ if (sts & DMA_GSTS_CFIS) WARN(1, KERN_WARNING "Compatibility-format IRQs enabled despite intr remapping;\n" "you are vulnerable to IRQ injection.\n"); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); }
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) { u64 addr; u32 cmd, sts; unsigned long flags; addr = virt_to_phys((void *)iommu->ir_table->base); spin_lock_irqsave(&iommu->register_lock, flags); dmar_writeq(iommu->reg + DMAR_IRTA_REG, (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); /* Set interrupt-remapping table pointer */ cmd = iommu->gcmd | DMA_GCMD_SIRTP; writel(cmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRTPS), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); /* * global invalidation of interrupt entry cache before enabling * interrupt-remapping. */ qi_global_iec(iommu); spin_lock_irqsave(&iommu->register_lock, flags); /* Enable interrupt-remapping */ cmd = iommu->gcmd | DMA_GCMD_IRE; iommu->gcmd |= DMA_GCMD_IRE; writel(cmd, iommu->reg + DMAR_GCMD_REG); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); spin_unlock_irqrestore(&iommu->register_lock, flags); }