int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) { struct irq_cfg *cfg; struct ht_irq_msg msg; unsigned dest; int err; if (disable_apic) return -ENXIO; cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, apic->target_cpus()); if (err) return err; err = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), &dest); if (err) return err; msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); msg.address_lo = HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | ((apic->irq_dest_mode == 0) ? HT_IRQ_LOW_DM_PHYSICAL : HT_IRQ_LOW_DM_LOGICAL) | HT_IRQ_LOW_RQEOI_EDGE | ((apic->irq_delivery_mode != dest_LowestPrio) ? HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; write_ht_irq_msg(irq, &msg); irq_set_chip_and_handler_name(irq, &ht_irq_chip, handle_edge_irq, "edge"); dev_dbg(&dev->dev, "irq %d for HT\n", irq); return 0; }
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) { struct ir_table *table = iommu->ir_table; struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct irq_cfg *cfg = irq_cfg(irq); unsigned int mask = 0; unsigned long flags; int index; if (!count || !irq_iommu) return -1; if (count > 1) { count = __roundup_pow_of_two(count); mask = ilog2(count); } if (mask > ecap_max_handle_mask(iommu->ecap)) { printk(KERN_ERR "Requested mask %x exceeds the max invalidation handle" " mask value %Lx\n", mask, ecap_max_handle_mask(iommu->ecap)); return -1; } raw_spin_lock_irqsave(&irq_2_ir_lock, flags); index = bitmap_find_free_region(table->bitmap, INTR_REMAP_TABLE_ENTRIES, mask); if (index < 0) { pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); } else { cfg->remapped = 1; irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; irq_iommu->irte_mask = mask; } raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); return index; }
static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) { struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct irq_cfg *cfg = irq_cfg(irq); unsigned long flags; if (!irq_iommu) return -1; raw_spin_lock_irqsave(&irq_2_ir_lock, flags); cfg->remapped = 1; irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = subhandle; irq_iommu->irte_mask = 0; raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); return 0; }
/* * IRQ autodetection code.. * * This depends on the fact that any interrupt that * comes in on to an unassigned handler will get stuck * with "IRQ_WAITING" cleared and the interrupt * disabled. */ int setup_vector(unsigned int vector, struct irqaction * new) { unsigned long flags; struct irqaction *old, **p; irq_desc_t *desc = irq_descp(vector); struct irq_cfg *cfg = irq_cfg(vector); /* * The following block of code has to be executed atomically */ spin_lock_irqsave(&desc->lock,flags); p = &desc->action; if ((old = *p) != NULL) { spin_unlock_irqrestore(&desc->lock,flags); return -EBUSY; } *p = new; desc->depth = 0; desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_GUEST);
static struct irq_2_iommu *irq_2_iommu(unsigned int irq) { struct irq_cfg *cfg = irq_cfg(irq); return cfg ? &cfg->irq_2_iommu : NULL; }