static int __init mxc_init_extuart(void) { serial_platform_data[0].irq = irq_find_mapping(domain, EXPIO_INT_XUART_INTA); serial_platform_data[1].irq = irq_find_mapping(domain, EXPIO_INT_XUART_INTB); return platform_device_register(&serial_device); }
static irqreturn_t arizona_irq_thread(int irq, void *data) { struct arizona *arizona = data; bool poll; unsigned int val, nest_irq; int ret; ret = pm_runtime_get_sync(arizona->dev); if (ret < 0) { dev_err(arizona->dev, "Failed to resume device: %d\n", ret); return IRQ_NONE; } do { poll = false; if (arizona->aod_irq_chip) handle_nested_irq(irq_find_mapping(arizona->virq, 0)); if (arizona->irq_chip) { /* * Check if one of the main interrupts is asserted and * only check that domain if it is. */ ret = regmap_read(arizona->regmap, ARIZONA_IRQ_PIN_STATUS, &val); if (ret == 0 && val & ARIZONA_IRQ1_STS) { nest_irq = irq_find_mapping(arizona->virq, 1); handle_nested_irq(nest_irq); } else if (ret != 0) { dev_err(arizona->dev, "Failed to read main IRQ status: %d\n", ret); } } /* * Poll the IRQ pin status to see if we're really done * if the interrupt controller can't do it for us. */ if (!arizona->pdata.irq_gpio) { break; } else if (arizona->pdata.irq_flags & IRQF_TRIGGER_RISING && gpio_get_value_cansleep(arizona->pdata.irq_gpio)) { poll = true; } else if (arizona->pdata.irq_flags & IRQF_TRIGGER_FALLING && !gpio_get_value_cansleep(arizona->pdata.irq_gpio)) { poll = true; } } while (poll); pm_runtime_mark_last_busy(arizona->dev); pm_runtime_put_autosuspend(arizona->dev); return IRQ_HANDLED; }
static void __init mxc_init_ext_ethernet(void) { mx31ads_cs8900_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET_INT); mx31ads_cs8900_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET_INT); platform_device_register_full( (struct platform_device_info *)&mx31ads_cs8900_devinfo); }
static int __init sh7343se_devices_setup(void) { /* Wire-up dynamic vectors */ serial_platform_data[0].irq = irq_find_mapping(se7343_irq_domain, SE7343_FPGA_IRQ_UARTA); serial_platform_data[1].irq = irq_find_mapping(se7343_irq_domain, SE7343_FPGA_IRQ_UARTB); usb_resources[2].start = usb_resources[2].end = irq_find_mapping(se7343_irq_domain, SE7343_FPGA_IRQ_USB); return platform_add_devices(sh7343se_platform_devices, ARRAY_SIZE(sh7343se_platform_devices)); }
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_data *idata = irq_desc_get_irq_data(desc); struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata); void __iomem *pio = at91_gpio->regbase; unsigned long isr; int n; chained_irq_enter(chip, desc); for (;;) { /* Reading ISR acks pending (edge triggered) GPIO interrupts. * When there none are pending, we're finished unless we need * to process multiple banks (like ID_PIOCDE on sam9263). */ isr = __raw_readl(pio + PIO_ISR) & __raw_readl(pio + PIO_IMR); if (!isr) { if (!at91_gpio->next) break; at91_gpio = at91_gpio->next; pio = at91_gpio->regbase; continue; } n = find_first_bit(&isr, BITS_PER_LONG); while (n < BITS_PER_LONG) { generic_handle_irq(irq_find_mapping(at91_gpio->domain, n)); n = find_next_bit(&isr, BITS_PER_LONG, n + 1); } } chained_irq_exit(chip, desc); /* now it may re-trigger */ }
void opal_handle_events(void) { __be64 events = 0; u64 e; e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask; again: while (e) { int virq, hwirq; hwirq = fls64(e) - 1; e &= ~BIT_ULL(hwirq); local_irq_disable(); virq = irq_find_mapping(opal_event_irqchip.domain, hwirq); if (virq) { irq_enter(); generic_handle_irq(virq); irq_exit(); } local_irq_enable(); cond_resched(); } last_outstanding_events = 0; if (opal_poll_events(&events) != OPAL_SUCCESS) return; e = be64_to_cpu(events) & opal_event_irqchip.mask; if (e) goto again; }
static int altera_gpio_remove(struct platform_device *pdev) { unsigned int irq, i; int status; struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev); status = gpiochip_remove(&altera_gc->mmchip.gc); if (status < 0) return status; if (altera_gc->irq) { irq_dispose_mapping(altera_gc->hwirq); for (i = 0; i < altera_gc->mmchip.gc.ngpio; i++) { irq = irq_find_mapping(altera_gc->irq, i); if (irq > 0) irq_dispose_mapping(irq); } irq_domain_remove(altera_gc->irq); } irq_set_handler_data(altera_gc->hwirq, NULL); irq_set_chained_handler(altera_gc->hwirq, NULL); devm_kfree(&pdev->dev, altera_gc); return -EIO; }
static void intel_mid_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct intel_mid_gpio *priv = gpiochip_get_data(gc); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, gpio, mask; unsigned long pending; void __iomem *gedr; /* check GPIO controller to check which pin triggered the interrupt */ for (base = 0; base < priv->chip.ngpio; base += 32) { gedr = gpio_reg(&priv->chip, base, GEDR); while ((pending = readl(gedr))) { gpio = __ffs(pending); mask = BIT(gpio); /* Clear before handling so we can't lose an edge */ writel(mask, gedr); generic_handle_irq(irq_find_mapping(gc->irqdomain, base + gpio)); } } chip->irq_eoi(data); }
static asmlinkage void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs) { u32 stat, i; int irqnr, virq; void __iomem *base; /* Loop through each active controller */ for (i=0; i<active_cnt; i++) { base = intc[i].base; irqnr = readl_relaxed(base) & 0x3F; /* Highest Priority register default = 63, so check that this is a real interrupt by checking the status register */ if (irqnr == 63) { stat = readl_relaxed(base + VT8500_ICIS + 4); if (!(stat & BIT(31))) continue; } virq = irq_find_mapping(intc[i].domain, irqnr); handle_IRQ(virq, regs); } }
static void s3c_irq_mask(struct irq_data *data) { struct s3c_irq_data *irq_data = irq_data_get_irq_chip_data(data); struct s3c_irq_intc *intc = irq_data->intc; struct s3c_irq_intc *parent_intc = intc->parent; struct s3c_irq_data *parent_data; unsigned long mask; unsigned int irqno; mask = __raw_readl(intc->reg_mask); mask |= (1UL << irq_data->offset); __raw_writel(mask, intc->reg_mask); if (parent_intc) { parent_data = &parent_intc->irqs[irq_data->parent_irq]; /* check to see if we need to mask the parent IRQ * The parent_irq is always in main_intc, so the hwirq * for find_mapping does not need an offset in any case. */ if ((mask & parent_data->sub_bits) == parent_data->sub_bits) { irqno = irq_find_mapping(parent_intc->domain, irq_data->parent_irq); s3c_irq_mask(irq_get_irq_data(irqno)); } } }
static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct combiner_chip_data *chip_data = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); unsigned int cascade_irq, combiner_irq; unsigned long status; chained_irq_enter(chip, desc); spin_lock(&irq_controller_lock); status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); spin_unlock(&irq_controller_lock); status &= chip_data->irq_mask; if (status == 0) goto out; combiner_irq = chip_data->hwirq_offset + __ffs(status); cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq); if (unlikely(!cascade_irq)) do_bad_IRQ(irq, desc); else generic_handle_irq(cascade_irq); out: chained_irq_exit(chip, desc); }
/** * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * @lookup: Whether to perform the domain lookup or not * @regs: Register file coming from the low-level handling code * * Returns: 0 on success, or -EINVAL if conversion has failed */ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, bool lookup, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned int irq = hwirq; int ret = 0; irq_enter(); #ifdef CONFIG_IRQ_DOMAIN if (lookup) irq = irq_find_mapping(domain, hwirq); #endif /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(!irq || irq >= nr_irqs)) { ack_bad_irq(irq); ret = -EINVAL; } else { generic_handle_irq(irq); } irq_exit(); set_irq_regs(old_regs); return ret; }
static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) { struct crystalcove_gpio *cg = data; unsigned int p0, p1; int pending; int gpio; unsigned int virq; if (regmap_read(cg->regmap, GPIO0IRQ, &p0) || regmap_read(cg->regmap, GPIO1IRQ, &p1)) return IRQ_NONE; regmap_write(cg->regmap, GPIO0IRQ, p0); regmap_write(cg->regmap, GPIO1IRQ, p1); pending = p0 | p1 << 8; for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { if (pending & BIT(gpio)) { virq = irq_find_mapping(cg->chip.irqdomain, gpio); handle_nested_irq(virq); } } return IRQ_HANDLED; }
int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) { int err; if (!mv88e6352_port_has_serdes(chip, port)) return 0; chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ); if (chip->ports[port].serdes_irq < 0) { dev_err(chip->dev, "Unable to map SERDES irq: %d\n", chip->ports[port].serdes_irq); return chip->ports[port].serdes_irq; } /* Requesting the IRQ will trigger irq callbacks. So we cannot * hold the reg_lock. */ mutex_unlock(&chip->reg_lock); err = request_threaded_irq(chip->ports[port].serdes_irq, NULL, mv88e6352_serdes_thread_fn, IRQF_ONESHOT, "mv88e6xxx-serdes", &chip->ports[port]); mutex_lock(&chip->reg_lock); if (err) { dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n", err); return err; } return mv88e6352_serdes_irq_enable(chip); }
/** * amdgpu_irq_dispatch - dispatch irq to IP blocks * * @adev: amdgpu device pointer * @entry: interrupt vector * * Dispatches the irq to the different IP blocks */ void amdgpu_irq_dispatch(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { unsigned src_id = entry->src_id; struct amdgpu_irq_src *src; int r; if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); return; } if (adev->irq.virq[src_id]) { generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); } else { src = adev->irq.sources[src_id]; if (!src) { DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); return; } r = src->funcs->process(adev, src, entry); if (r) DRM_ERROR("error processing interrupt (%d)\n", r); } }
static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; irq_hw_number_t hwirq = ops->get_hwirq(info, arg); int i, ret; if (irq_find_mapping(domain, hwirq) > 0) return -EEXIST; ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); if (ret < 0) return ret; for (i = 0; i < nr_irqs; i++) { ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); if (ret < 0) { if (ops->msi_free) { for (i--; i > 0; i--) ops->msi_free(domain, info, virq + i); } irq_domain_free_irqs_top(domain, virq, nr_irqs); return ret; } } return 0; }
static int nmk_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); return irq_find_mapping(nmk_chip->domain, offset); }
static void s3c_irq_demux(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); struct s3c_irq_intc *intc = irq_data->intc; struct s3c_irq_intc *sub_intc = irq_data->sub_intc; unsigned int n, offset, irq; unsigned long src, msk; /* we're using individual domains for the non-dt case * and one big domain for the dt case where the subintc * starts at hwirq number 32. */ offset = irq_domain_get_of_node(intc->domain) ? 32 : 0; chained_irq_enter(chip, desc); src = __raw_readl(sub_intc->reg_pending); msk = __raw_readl(sub_intc->reg_mask); src &= ~msk; src &= irq_data->sub_bits; while (src) { n = __ffs(src); src &= ~(1 << n); irq = irq_find_mapping(sub_intc->domain, offset + n); generic_handle_irq(irq); } chained_irq_exit(chip, desc); }
static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); unsigned int idu_irq; idu_irq = core_irq - idu_first_irq; generic_handle_irq(irq_find_mapping(domain, idu_irq)); }
/** * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping * @gsi: GSI IRQ number */ void acpi_unregister_gsi(u32 gsi) { struct irq_domain *d = irq_find_matching_fwnode(acpi_gsi_domain_id, DOMAIN_BUS_ANY); int irq = irq_find_mapping(d, gsi); irq_dispose_mapping(irq); }
static int sci_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct irq_domain *irq_domain ; struct sci_gpio_chip *sci_gpio = to_sci_gpio(chip); irq_domain = sci_gpio->irq_domain; return irq_find_mapping(irq_domain, offset); }
static void idu_cascade_isr(struct irq_desc *desc) { struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); }
static int __init wr_rtu_init(void) { int err; int irq; struct irq_domain *irqdomain; irqdomain = irq_find_host((struct device_node *)irqdomain_name); if (!irqdomain) { pr_err("pstat: The IRQ domain %s does not exist\n", irqdomain_name); return -EINVAL; } // register misc device err = misc_register(&wr_rtu_misc); if (err < 0) { printk(KERN_ERR "%s: Can't register misc device\n", KBUILD_MODNAME); return err; } // map RTU memory regs = ioremap( FPGA_BASE_RTU, sizeof(struct RTU_WB) ); if (!regs) { misc_deregister(&wr_rtu_misc); return -ENOMEM; } // register interrupt handler wr_rtu_disable_irq(); irq = irq_find_mapping(irqdomain, WR_RTU_IRQ); err = request_irq(irq, wr_rtu_interrupt, IRQF_SHARED, "wr-rtu", (void*)regs ); // if succeeded, enable interrupts if (err) { printk(KERN_ERR "%s: Cant' request IRQ, error %i\n", KBUILD_MODNAME, err); iounmap(regs); misc_deregister(&wr_rtu_misc); return err; } wr_rtu_enable_irq(); // Init wait queue init_waitqueue_head(&dev.q); printk(KERN_INFO "%s: initialized\n", KBUILD_MODNAME); return err; }
static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); if (bus->bustype == SSB_BUSTYPE_SSB) return irq_find_mapping(bus->irq_domain, gpio); else return -EINVAL; }
static void mx25_tsadc_irq_handler(struct irq_desc *desc) { struct mx25_tsadc *tsadc = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); u32 status; chained_irq_enter(chip, desc); regmap_read(tsadc->regs, MX25_TSC_TGSR, &status); if (status & MX25_TGSR_GCQ_INT) generic_handle_irq(irq_find_mapping(tsadc->domain, 1)); if (status & MX25_TGSR_TCQ_INT) generic_handle_irq(irq_find_mapping(tsadc->domain, 0)); chained_irq_exit(chip, desc); }
static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) { struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) return irq_find_mapping(cc->irq_domain, gpio); else return -EINVAL; }
static int sci_irq_to_gpio(struct gpio_chip *chip, unsigned irq) { int base_irq; struct irq_domain *irq_domain ; struct sci_gpio_chip *sci_gpio = to_sci_gpio(chip); irq_domain = sci_gpio->irq_domain; base_irq = irq_find_mapping(irq_domain, 0); return irq - base_irq; }
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) { void __iomem *base = sirfsoc_irqdomain->host_data; u32 irqstat, irqnr; irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID); irqnr = irq_find_mapping(sirfsoc_irqdomain, irqstat & 0xff); handle_IRQ(irqnr, regs); }
static irqreturn_t max77693_irq_thread(int irq, void *data) { struct max77693_dev *max77693 = data; u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {}; u8 irq_src; int ret; int i, cur_irq; ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC, &irq_src); if (ret < 0) { dev_err(max77693->dev, "Failed to read interrupt source: %d\n", ret); return IRQ_NONE; } if (irq_src & MAX77693_IRQSRC_CHG) /* CHG_INT */ ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT, &irq_reg[CHG_INT]); if (irq_src & MAX77693_IRQSRC_TOP) /* TOPSYS_INT */ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]); if (irq_src & MAX77693_IRQSRC_FLASH) /* LED_INT */ ret = max77693_read_reg(max77693->regmap, MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]); if (irq_src & MAX77693_IRQSRC_MUIC) /* MUIC INT1 ~ INT3 */ max77693_bulk_read(max77693->regmap_muic, MAX77693_MUIC_REG_INT1, MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]); /* Apply masking */ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) { if (i >= MUIC_INT1 && i <= MUIC_INT3) irq_reg[i] &= max77693->irq_masks_cur[i]; else irq_reg[i] &= ~max77693->irq_masks_cur[i]; } /* Report */ for (i = 0; i < MAX77693_IRQ_NR; i++) { if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) { cur_irq = irq_find_mapping(max77693->irq_domain, i); if (cur_irq) handle_nested_irq(cur_irq); } } return IRQ_HANDLED; }
static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc) { u32 pending = rt_intc_r32(INTC_REG_STATUS0); if (pending) { struct irq_domain *domain = irq_desc_get_handler_data(desc); generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); } else { spurious_interrupt(); } }