int modify_irte(int irq, struct irte *irte_modified) { int rc; int index; struct irte *irte; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; unsigned long flags; spin_lock_irqsave(&irq_2_ir_lock, flags); irq_iommu = valid_irq_2_iommu(irq); if (!irq_iommu) { spin_unlock_irqrestore(&irq_2_ir_lock, flags); return -1; } iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; irte = &iommu->ir_table->base[index]; set_64bit((unsigned long *)&irte->low, irte_modified->low); set_64bit((unsigned long *)&irte->high, irte_modified->high); __iommu_flush_cache(iommu, irte, sizeof(*irte)); rc = qi_flush_iec(iommu, index, 0); spin_unlock_irqrestore(&irq_2_ir_lock, flags); return rc; }
int free_irte(int irq) { int index, i; struct irte *irte; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; spin_lock(&irq_2_ir_lock); irq_iommu = valid_irq_2_iommu(irq); if (!irq_iommu) { spin_unlock(&irq_2_ir_lock); return -1; } iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; irte = &iommu->ir_table->base[index]; if (!irq_iommu->sub_handle) { for (i = 0; i < (1 << irq_iommu->irte_mask); i++) set_64bit((unsigned long *)irte, 0); qi_flush_iec(iommu, index, irq_iommu->irte_mask); } irq_iommu->iommu = NULL; irq_iommu->irte_index = 0; irq_iommu->sub_handle = 0; irq_iommu->irte_mask = 0; spin_unlock(&irq_2_ir_lock); return 0; }
int modify_irte(int irq, struct irte *irte_modified) { int index; struct irte *irte; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; spin_lock(&irq_2_ir_lock); irq_iommu = valid_irq_2_iommu(irq); if (!irq_iommu) { spin_unlock(&irq_2_ir_lock); return -1; } iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; irte = &iommu->ir_table->base[index]; set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); __iommu_flush_cache(iommu, irte, sizeof(*irte)); qi_flush_iec(iommu, index, 0); spin_unlock(&irq_2_ir_lock); return 0; }
static int modify_irte(struct irq_2_iommu *irq_iommu, struct irte *irte_modified) { struct intel_iommu *iommu; unsigned long flags; struct irte *irte; int rc, index; if (!irq_iommu) return -1; raw_spin_lock_irqsave(&irq_2_ir_lock, flags); iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; irte = &iommu->ir_table->base[index]; #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) if ((irte->pst == 1) || (irte_modified->pst == 1)) { bool ret; ret = cmpxchg_double(&irte->low, &irte->high, irte->low, irte->high, irte_modified->low, irte_modified->high); /* * We use cmpxchg16 to atomically update the 128-bit IRTE, * and it cannot be updated by the hardware or other processors * behind us, so the return value of cmpxchg16 should be the * same as the old value. */ WARN_ON(!ret); } else #endif { set_64bit(&irte->low, irte_modified->low); set_64bit(&irte->high, irte_modified->high); } __iommu_flush_cache(iommu, irte, sizeof(*irte)); rc = qi_flush_iec(iommu, index, 0); /* Update iommu mode according to the IRTE mode */ irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING; raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); return rc; }
static int clear_entries(struct irq_2_iommu *irq_iommu) { struct irte *start, *entry, *end; struct intel_iommu *iommu; int index; if (irq_iommu->sub_handle) return 0; iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; start = iommu->ir_table->base + index; end = start + (1 << irq_iommu->irte_mask); for (entry = start; entry < end; entry++) { set_64bit((unsigned long *)&entry->low, 0); set_64bit((unsigned long *)&entry->high, 0); } return qi_flush_iec(iommu, index, irq_iommu->irte_mask); }
int flush_irte(int irq) { int index; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; spin_lock(&irq_2_ir_lock); irq_iommu = valid_irq_2_iommu(irq); if (!irq_iommu) { spin_unlock(&irq_2_ir_lock); return -1; } iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; qi_flush_iec(iommu, index, irq_iommu->irte_mask); spin_unlock(&irq_2_ir_lock); return 0; }
int flush_irte(int irq) { int rc; int index; struct intel_iommu *iommu; struct irq_2_iommu *irq_iommu; unsigned long flags; spin_lock_irqsave(&irq_2_ir_lock, flags); irq_iommu = valid_irq_2_iommu(irq); if (!irq_iommu) { spin_unlock_irqrestore(&irq_2_ir_lock, flags); return -1; } iommu = irq_iommu->iommu; index = irq_iommu->irte_index + irq_iommu->sub_handle; rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); spin_unlock_irqrestore(&irq_2_ir_lock, flags); return rc; }