static int pci_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip(virq, &m82xx_pci_ic); return 0; }
static int i8259_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); /* We block the internal cascade */ if (hw == 2) get_irq_desc(virq)->status |= IRQ_NOREQUEST; /* We use the level handler only for now, we might want to * be more cautious here but that works for now */ get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); return 0; }
static int pci_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_data(virq, h->host_data); set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); return 0; }
static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); return 0; }
/* When an interrupt is being configured, this call allows some flexibilty * in deciding which irq_chip structure is used */ static int gef_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { /* All interrupts are LEVEL sensitive */ get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); return 0; }
static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type) { struct irq_desc *desc = get_irq_desc(virq); desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) desc->status |= IRQ_LEVEL; return 0; }
static int pci_irq_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { unsigned int irq; DBG("%s(%d, 0x%lx)\n", __FUNCTION__, virq, hw); if ((virq >= 1) && (virq <= 4)){ irq = virq + IRQ_PCI_INTAD_BASE - 1; get_irq_desc(irq)->status |= IRQ_LEVEL; set_irq_chip(irq, &tsi108_pci_irq); } return 0; }
static int xics_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); /* Insert the interrupt mapping into the radix tree for fast lookup */ irq_radix_revmap_insert(xics_host, virq, hw); get_irq_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); return 0; }
static int mv64x60_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hwirq) { int level1; get_irq_desc(virq)->status |= IRQ_LEVEL; level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; BUG_ON(level1 > MV64x60_LEVEL1_GPP); set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq); return 0; }
/* * This is called out of iSeries_fixup to activate interrupt * generation for usable slots */ void __init iSeries_activate_IRQs() { int irq; unsigned long flags; for_each_irq (irq) { irq_desc_t *desc = get_irq_desc(irq); if (desc && desc->chip && desc->chip->startup) { spin_lock_irqsave(&desc->lock, flags); desc->chip->startup(irq); spin_unlock_irqrestore(&desc->lock, flags); } } }
static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) { unsigned int src = virq_to_hw(virq); struct irq_desc *desc = get_irq_desc(virq); unsigned int vold, vnew, edibit; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; if (flow_type & IRQ_TYPE_EDGE_RISING) { printk(KERN_ERR "CPM2 PIC: sense type 0x%x not supported\n", flow_type); return -EINVAL; } desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; if (flow_type & IRQ_TYPE_LEVEL_LOW) { desc->status |= IRQ_LEVEL; desc->handle_irq = handle_level_irq; } else desc->handle_irq = handle_edge_irq; /* internal IRQ senses are LEVEL_LOW * EXT IRQ and Port C IRQ senses are programmable */ if (src >= CPM2_IRQ_EXT1 && src <= CPM2_IRQ_EXT7) edibit = (14 - (src - CPM2_IRQ_EXT1)); else if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) edibit = (31 - (src - CPM2_IRQ_PORTC15)); else return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; vold = in_be32(&cpm2_intctl->ic_siexr); if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) vnew = vold | (1 << edibit); else vnew = vold & ~(1 << edibit); if (vold != vnew) out_be32(&cpm2_intctl->ic_siexr, vnew); return 0; }
static int pmac_pfm_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { struct irq_desc *desc = get_irq_desc(virq); int level = 0; if (hw >= max_irqs) return -EINVAL; /* Mark level interrupts, set delayed disable for edge ones and set * handlers */ // level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); // if (level) // desc->status |= IRQ_LEVEL; desc->status &= ~IRQ_LEVEL; set_irq_chip_and_handler(virq, &pmac_pfm, level ? handle_level_irq : handle_edge_irq); return 0; }
[0 ... NR_IRQS-1] = { .lock = SPIN_LOCK_UNLOCKED } }; int __irq_offset_value; int ppc_spurious_interrupts; unsigned long lpevent_count; int setup_irq(unsigned int irq, struct irqaction * new) { int shared = 0; unsigned long flags; struct irqaction *old, **p; irq_desc_t *desc = get_irq_desc(irq); /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a * running system. */ if (new->flags & SA_SAMPLE_RANDOM) { /* * This function might sleep, we want to call it first, * outside of the atomic block. * Yes, this might clear the entropy pool if the wrong * driver is attempted to be loaded, without actually * installing a new handler, but is this really a problem, * only the sysadmin is able to do this. */