int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { u32 intr_mask = (u32)d->host_data; if (intr_mask & (1 << hw)) { irq_set_chip_and_handler_name(irq, &intc_dev, handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else { irq_set_chip_and_handler_name(irq, &intc_dev, handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } return 0; }
void __init sh7372_init_irq(void) { void __iomem *intevtsa; int n; intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE); intevtsa = intcs_ffd2 + 0x100; intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intca_irq_pins_lo_desc); register_intc_controller(&intca_irq_pins_hi_desc); register_intc_controller(&intcs_desc); /* setup dummy cascade chip for INTCS */ n = evt2irq(0xf80); irq_alloc_desc_at(n, numa_node_id()); irq_set_chip_and_handler_name(n, &dummy_irq_chip, handle_level_irq, "level"); set_irq_flags(n, IRQF_VALID); /* yuck */ /* demux using INTEVTSA */ irq_set_handler_data(n, (void *)intevtsa); irq_set_chained_handler(n, intcs_demux); /* unmask INTCS in INTAMASK */ iowrite16(0, intcs_ffd2 + 0x104); }
static void make_se7206_irq(unsigned int irq) { disable_irq_nosync(irq); irq_set_chip_and_handler_name(irq, &se7206_irq_chip, handle_level_irq, "level"); disable_se7206_irq(irq_get_irq_data(irq)); }
__devinit int ce4100_gpio_irq_setup(struct intelce_gpio_chip *c, struct pci_dev *pdev) { int i; int irq; int ret; c->irq_base = irq_alloc_descs(-1, 0, CE4100_PUB_GPIOS_PER_BANK, -1); if (c->irq_base < 0) return c->irq_base; /* mask + ACK all interrupt sources */ intelce_gpio_mmio_write32(0, c->reg_base + CE4100_PUB_GPIO_INT_EN); intelce_gpio_mmio_write32(0xFFF, c->reg_base + CE4100_PUB_GPIO_INT_STAT); ret = request_irq(pdev->irq, ce4100_gpio_irq_handler, IRQF_SHARED, "ce4100_gpio", c); if (ret) goto out_free_desc; /* * This gpio irq controller latches level irqs. Testing shows that if * we unmask & ACK the IRQ before the source of the interrupt is gone * then the interrupt is active again. */ irq = c->irq_base; for (i=0; i < c->chip.ngpio; i++) { irq_set_chip_and_handler_name(irq, &ce4100_irq_chip, handle_fasteoi_irq, "gpio_irq"); irq_set_chip_data(irq, c); irq++; } return 0; out_free_desc: irq_free_descs(c->irq_base, CE4100_PUB_GPIOS_PER_BANK); return ret; }
/* * The shift value is now the number of bits to shift, not the number of * bits/4. This is to make it easier to read the value directly from the * datasheets. The IPR address is calculated using the ipr_offset table. */ void register_ipr_controller(struct ipr_desc *desc) { int i; desc->chip.irq_mask = disable_ipr_irq; desc->chip.irq_unmask = enable_ipr_irq; for (i = 0; i < desc->nr_irqs; i++) { struct ipr_data *p = desc->ipr_data + i; int res; BUG_ON(p->ipr_idx >= desc->nr_offsets); BUG_ON(!desc->ipr_offsets[p->ipr_idx]); res = irq_alloc_desc_at(p->irq, numa_node_id()); if (unlikely(res != p->irq && res != -EEXIST)) { printk(KERN_INFO "can not get irq_desc for %d\n", p->irq); continue; } disable_irq_nosync(p->irq); irq_set_chip_and_handler_name(p->irq, &desc->chip, handle_level_irq, "level"); irq_set_chip_data(p->irq, p); disable_ipr_irq(irq_get_irq_data(p->irq)); } }
void __init init_7343se_IRQ(void) { int i, irq; __raw_writew(0, PA_CPLD_IMSK); __raw_writew(0x2000, 0xb03fffec); for (i = 0; i < SE7343_FPGA_IRQ_NR; i++) { irq = create_irq(); if (irq < 0) return; se7343_fpga_irq[i] = irq; irq_set_chip_and_handler_name(se7343_fpga_irq[i], &se7343_irq_chip, handle_level_irq, "level"); irq_set_chip_data(se7343_fpga_irq[i], (void *)i); } irq_set_chained_handler(IRQ0_IRQ, se7343_irq_demux); irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ1_IRQ, se7343_irq_demux); irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ4_IRQ, se7343_irq_demux); irq_set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ5_IRQ, se7343_irq_demux); irq_set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW); }
void __init init_rtc_irq(void) { irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, handle_simple_irq, "RTC"); setup_irq(RTC_IRQ, &timer_irqaction); }
static int x3proto_gpio_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) { irq_set_chip_and_handler_name(virq, &dummy_irq_chip, handle_simple_irq, "gpio"); return 0; }
int __init x3proto_gpio_setup(void) { int ilsel; int ret, i; ilsel = ilsel_enable(ILSEL_KEY); if (unlikely(ilsel < 0)) return ilsel; ret = gpiochip_add(&x3proto_gpio_chip); if (unlikely(ret)) goto err_gpio; for (i = 0; i < NR_BASEBOARD_GPIOS; i++) { unsigned long flags; int irq = create_irq(); if (unlikely(irq < 0)) { ret = -EINVAL; goto err_irq; } spin_lock_irqsave(&x3proto_gpio_lock, flags); x3proto_gpio_irq_map[i] = irq; irq_set_chip_and_handler_name(irq, &dummy_irq_chip, handle_simple_irq, "gpio"); spin_unlock_irqrestore(&x3proto_gpio_lock, flags); } pr_info("registering '%s' support, handling GPIOs %u -> %u, " "bound to IRQ %u\n", x3proto_gpio_chip.label, x3proto_gpio_chip.base, x3proto_gpio_chip.base + x3proto_gpio_chip.ngpio, ilsel); irq_set_chained_handler(ilsel, x3proto_gpio_irq_handler); irq_set_irq_wake(ilsel, 1); return 0; err_irq: for (; i >= 0; --i) if (x3proto_gpio_irq_map[i]) destroy_irq(x3proto_gpio_irq_map[i]); ret = gpiochip_remove(&x3proto_gpio_chip); if (unlikely(ret)) pr_err("Failed deregistering GPIO\n"); err_gpio: synchronize_irq(ilsel); ilsel_disable(ILSEL_KEY); return ret; }
static int crystalcove_gpio_probe(struct platform_device *pdev) { int irq = platform_get_irq(pdev, 0); struct crystalcove_gpio *cg = &gpio_info; int retval; int i; int gpio_base, irq_base; struct device *dev = intel_mid_pmic_dev(); mutex_init(&cg->buslock); cg->irq_base = VV_PMIC_GPIO_IRQBASE; cg->chip.label = "intel_crystalcove"; cg->chip.direction_input = crystalcove_gpio_direction_input; cg->chip.direction_output = crystalcove_gpio_direction_output; cg->chip.get = crystalcove_gpio_get; cg->chip.set = crystalcove_gpio_set; cg->chip.to_irq = crystalcove_gpio_to_irq; cg->chip.base = VV_PMIC_GPIO_BASE; cg->chip.ngpio = NUM_GPIO; cg->chip.can_sleep = 1; cg->chip.dev = dev; cg->chip.dbg_show = crystalcove_gpio_dbg_show; retval = gpiochip_add(&cg->chip); if (retval) { pr_warn("crystalcove: add gpio chip error: %d\n", retval); return retval; } irq_base = irq_alloc_descs(cg->irq_base, 0, NUM_GPIO, 0); if (cg->irq_base != irq_base) panic("gpio base irq fail, needs %d, return %d\n", cg->irq_base, irq_base); for (i = 0; i < NUM_GPIO; i++) { pr_err("gpio %x: set handler: %d\n", cg, i + cg->irq_base); irq_set_chip_data(i + cg->irq_base, cg); irq_set_chip_and_handler_name(i + cg->irq_base, &crystalcove_irqchip, handle_simple_irq, "demux"); } retval = request_threaded_irq(irq, NULL, crystalcove_gpio_irq_handler, IRQF_ONESHOT, "crystalcove_gpio", cg); if (retval) { pr_warn("Interrupt request failed\n"); return retval; } retval = sysfs_create_file(&dev->kobj, &platform_hwid_attr.attr); if(retval) pr_warn("%s, sysfs_create_file failed, %d\n", __func__, retval); return 0; }
static void sch_gpio_resume_irqs_deinit(struct sch_gpio *chip, unsigned int num) { int i; for (i = 0; i < num; i++) { irq_set_chip_data(i + chip->irq_base_core, 0); irq_set_chip_and_handler_name(i + chip->irq_base_core, 0, 0, 0); } }
static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { if (hw < HW_IRQ_IPI_COUNT) { struct irq_chip *irq_chip = d->host_data; irq_set_chip_and_handler_name(irq, irq_chip, handle_percpu_irq, "ipi"); irq_set_status_flags(irq, IRQ_LEVEL); return 0; } return xtensa_irq_map(d, irq, hw); }
static int pl061_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { struct pl061_gpio *chip = d->host_data; irq_set_chip_and_handler_name(virq, &pl061_irqchip, handle_simple_irq, "pl061"); irq_set_chip_data(virq, chip); irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; }
static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { int irq = dev_priv->lpe_audio.irq; WARN_ON(!intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip, handle_simple_irq, "hdmi_lpe_audio_irq_handler"); return irq_set_chip_data(irq, dev_priv); }
static int hi6401_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { struct hi6401_irq *irq = d->host_data; irq_set_chip_and_handler_name(virq, &hi6401_irqchip, handle_simple_irq, "hi6401_irq"); irq_set_chip_data(virq, irq); irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; }
static int lnw_gpio_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { struct lnw_gpio *lnw = d->host_data; irq_set_chip_and_handler_name(virq, &lnw_irqchip, handle_simple_irq, "demux"); irq_set_chip_data(virq, lnw); irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; }
static void sch_gpio_resume_irqs_init(struct sch_gpio *chip, unsigned int num) { int i; for (i = 0; i < num; i++) { irq_set_chip_data(i + chip->irq_base_resume, chip); irq_set_chip_and_handler_name(i + chip->irq_base_resume, &sch_irq_resume, handle_simple_irq, "sch_gpio_irq_resume"); } }
void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq) { _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000); MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT); board_bind_eic_interrupt = &msc_bind_eic_interrupt; for (; nirq >= 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { case MSC01_IRQ_EDGE: irq_set_chip_and_handler_name(irqbase + n, &msc_edgeirq_type, handle_edge_irq, "edge"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); else MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); break; case MSC01_IRQ_LEVEL: irq_set_chip_and_handler_name(irqbase + n, &msc_levelirq_type, handle_level_irq, "level"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); else MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); } } irq_base = irqbase; MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); }
/* * Interrupt descriptors are allocated as-needed, but low-numbered ones are * reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it * tells us the irq is already used: other errors (ie. ENOMEM) we take * seriously. */ int lguest_setup_irq(unsigned int irq) { int err; /* Returns -ve error or vector number. */ err = irq_alloc_desc_at(irq, 0); if (err < 0 && err != -EEXIST) return err; irq_set_chip_and_handler_name(irq, &lguest_irq_controller, handle_level_irq, "level"); return 0; }
int arch_setup_dmar_msi(unsigned int irq) { int ret; struct msi_msg msg; ret = msi_compose_msg(NULL, irq, &msg); if (ret < 0) return ret; dmar_msi_write(irq, &msg); irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, "edge"); return 0; }
void __init init_ISA_irqs(void) { struct irq_chip *chip = legacy_pic->chip; const char *name = chip->name; int i; #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) init_bsp_APIC(); #endif legacy_pic->init(0); for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); }
static int davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct davinci_gpio_regs __iomem *g = gpio2regs(hw); irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq, "davinci_gpio"); irq_set_irq_type(irq, IRQ_TYPE_NONE); irq_set_chip_data(irq, (__force void *)g); irq_set_handler_data(irq, (void *)__gpio_mask(hw)); return 0; }
void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq) { unsigned int irq; /* mask & enable & ack all */ __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR); __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET); __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT); wmb(); bcsr_csc_base = csc_start; for (irq = csc_start; irq <= csc_end; irq++) irq_set_chip_and_handler_name(irq, &bcsr_irq_type, handle_level_irq, "level"); irq_set_chained_handler(hook_irq, bcsr_csc_handler); }
void __init init_se7724_IRQ(void) { int i, nid = cpu_to_node(boot_cpu_data); __raw_writew(0xffff, IRQ0_MR); __raw_writew(0xffff, IRQ1_MR); __raw_writew(0xffff, IRQ2_MR); __raw_writew(0x0000, IRQ0_SR); __raw_writew(0x0000, IRQ1_SR); __raw_writew(0x0000, IRQ2_SR); __raw_writew(0x002a, IRQ_MODE); for (i = 0; i < SE7724_FPGA_IRQ_NR; i++) { int irq, wanted; wanted = SE7724_FPGA_IRQ_BASE + i; irq = create_irq_nr(wanted, nid); if (unlikely(irq == 0)) { pr_err("%s: failed hooking irq %d for FPGA\n", __func__, wanted); return; } if (unlikely(irq != wanted)) { pr_err("%s: got irq %d but wanted %d, bailing.\n", __func__, irq, wanted); destroy_irq(irq); return; } irq_set_chip_and_handler_name(irq, &se7724_irq_chip, handle_level_irq, "level"); } irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ1_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW); irq_set_chained_handler(IRQ2_IRQ, se7724_irq_demux); irq_set_irq_type(IRQ2_IRQ, IRQ_TYPE_LEVEL_LOW); }
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) { struct irq_cfg *cfg; struct ht_irq_msg msg; unsigned dest; int err; if (disable_apic) return -ENXIO; cfg = irq_cfg(irq); err = assign_irq_vector(irq, cfg, apic->target_cpus()); if (err) return err; err = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(), &dest); if (err) return err; msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); msg.address_lo = HT_IRQ_LOW_BASE | HT_IRQ_LOW_DEST_ID(dest) | HT_IRQ_LOW_VECTOR(cfg->vector) | ((apic->irq_dest_mode == 0) ? HT_IRQ_LOW_DM_PHYSICAL : HT_IRQ_LOW_DM_LOGICAL) | HT_IRQ_LOW_RQEOI_EDGE | ((apic->irq_delivery_mode != dest_LowestPrio) ? HT_IRQ_LOW_MT_FIXED : HT_IRQ_LOW_MT_ARBITRATED) | HT_IRQ_LOW_IRQ_MASKED; write_ht_irq_msg(irq, &msg); irq_set_chip_and_handler_name(irq, &ht_irq_chip, handle_edge_irq, "edge"); dev_dbg(&dev->dev, "irq %d for HT\n", irq); return 0; }
static int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, unsigned long mmr_offset, int limit) { const struct cpumask *eligible_cpu = cpumask_of(cpu); struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned long mmr_value; struct uv_IO_APIC_route_entry *entry; int mmr_pnode, err; BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); err = assign_irq_vector(irq, cfg, eligible_cpu); if (err != 0) return err; if (limit == UV_AFFINITY_CPU) irq_set_status_flags(irq, IRQ_NO_BALANCING); else irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, irq_name); mmr_value = 0; entry = (struct uv_IO_APIC_route_entry *)&mmr_value; entry->vector = cfg->vector; entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; entry->polarity = 0; entry->trigger = 0; entry->mask = 0; entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); mmr_pnode = uv_blade_to_pnode(mmr_blade); uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); if (cfg->move_in_progress) send_cleanup_vector(cfg); return irq; }
/** * arch_setup_msi_irq() - Set up an MSI for Endpoint * @pdev: Pointer to PCI device structure of requesting EP * @desc: Pointer to MSI descriptor data * * Assigns an MSI to endpoint and sets up corresponding irq. Also passes the MSI * information to the endpont. */ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { int ret, irq; struct msi_msg msg; if (msi_irq < 0) { printk(KERN_ERR "PCI: MSI irq pin not specified\n"); return msi_irq; } ret = get_free_msi(); if (ret < 0) { printk(KERN_ERR "PCI: Failed to get free MSI\n"); } else { irq = msi_irq_base + ret; #if defined(MSI_SUPPORT_1_DEV_1_BIT) msg.data = ret; #elif defined(MSI_SUPPORT_1_DEV_32_BIT) msg.data = (ret&0x7)<<5; #endif dynamic_irq_init(irq); ret = irq_set_msi_desc(irq, desc); if (!ret) { msg.address_hi = MSI_LOCAL_UPPER_ADDR; msg.address_lo = MSI_LOCAL_LOWER_ADDR; #if defined(MSI_SUPPORT_1_DEV_1_BIT) printk(KERN_ERR "PCI:MSI %d @%x:%x, irq = %d\n", msg.data, msg.address_hi, msg.address_lo, irq); #elif defined(MSI_SUPPORT_1_DEV_32_BIT) printk(KERN_ERR "PCI:MSI#%d[0-31] @%x:%x, irq = %d\n", (msg.data>>5)&0x7, msg.address_hi, msg.address_lo, irq); #endif write_msi_msg(irq, &msg); irq_set_chip_and_handler_name(irq, &balong_msi_chip, handle_level_irq,NULL); set_irq_flags(irq, IRQF_VALID); } }
static unsigned int sun4c_build_device_irq(struct platform_device *op, unsigned int real_irq) { unsigned int irq; if (real_irq >= 16) { prom_printf("Bogus sun4c IRQ %u\n", real_irq); prom_halt(); } irq = irq_alloc(real_irq, real_irq); if (irq) { unsigned long mask = 0UL; switch (real_irq) { case 1: mask = SUN4C_INT_E1; break; case 8: mask = SUN4C_INT_E8; break; case 10: mask = SUN4C_INT_E10; break; case 14: mask = SUN4C_INT_E14; break; default: /* All the rest are either always enabled, * or are for signalling software interrupts. */ break; } irq_set_chip_and_handler_name(irq, &sun4c_irq, handle_level_irq, "level"); irq_set_chip_data(irq, (void *)mask); } return irq; }
static int sparc64_setup_msi_irq(unsigned int *irq_p, struct pci_dev *pdev, struct msi_desc *entry) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; const struct sparc64_msiq_ops *ops = pbm->msi_ops; struct msi_msg msg; int msi, err; u32 msiqid; *irq_p = irq_alloc(0, 0); err = -ENOMEM; if (!*irq_p) goto out_err; irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq, "MSI"); err = alloc_msi(pbm); if (unlikely(err < 0)) goto out_irq_free; msi = err; msiqid = pick_msiq(pbm); err = ops->msi_setup(pbm, msiqid, msi, (entry->msi_attrib.is_64 ? 1 : 0)); if (err) goto out_msi_free; pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p; if (entry->msi_attrib.is_64) { msg.address_hi = pbm->msi64_start >> 32; msg.address_lo = pbm->msi64_start & 0xffffffff; } else {
static unsigned int sun4m_build_device_irq(struct platform_device *op, unsigned int real_irq) { struct sun4m_handler_data *handler_data; unsigned int irq; unsigned int pil; if (real_irq >= OBP_INT_LEVEL_VME) { prom_printf("Bogus sun4m IRQ %u\n", real_irq); prom_halt(); } pil = (real_irq & 0xf); irq = irq_alloc(real_irq, pil); if (irq == 0) goto out; handler_data = irq_get_handler_data(irq); if (unlikely(handler_data)) goto out; handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC); if (unlikely(!handler_data)) { prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n"); prom_halt(); } handler_data->mask = sun4m_imask[real_irq]; handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; irq_set_chip_and_handler_name(irq, &sun4m_irq, handle_level_irq, "level"); irq_set_handler_data(irq, handler_data); out: return irq; }