static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip) { unsigned int first_irq; int i; first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); for (i = first_irq; i < first_irq + nmk_chip->chip.ngpio; i++) { irq_set_chip_and_handler(i, &nmk_gpio_irq_chip, handle_edge_irq); set_irq_flags(i, IRQF_VALID); irq_set_chip_data(i, nmk_chip); irq_set_irq_type(i, IRQ_TYPE_EDGE_FALLING); } irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); irq_set_handler_data(nmk_chip->parent_irq, nmk_chip); if (nmk_chip->secondary_parent_irq >= 0) { irq_set_chained_handler(nmk_chip->secondary_parent_irq, nmk_gpio_secondary_irq_handler); irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip); } return 0; }
void __init psc_register_interrupts(void) { irq_set_chained_handler(IRQ_AUTO_3, psc_irq); irq_set_handler_data(IRQ_AUTO_3, (void *)0x30); irq_set_chained_handler(IRQ_AUTO_4, psc_irq); irq_set_handler_data(IRQ_AUTO_4, (void *)0x40); irq_set_chained_handler(IRQ_AUTO_5, psc_irq); irq_set_handler_data(IRQ_AUTO_5, (void *)0x50); irq_set_chained_handler(IRQ_AUTO_6, psc_irq); irq_set_handler_data(IRQ_AUTO_6, (void *)0x60); }
static int nmk_gpio_init_irq(struct nmk_gpio_chip *nmk_chip) { irq_set_chained_handler(nmk_chip->parent_irq, nmk_gpio_irq_handler); irq_set_handler_data(nmk_chip->parent_irq, nmk_chip); if (nmk_chip->secondary_parent_irq >= 0) { irq_set_chained_handler(nmk_chip->secondary_parent_irq, nmk_gpio_secondary_irq_handler); irq_set_handler_data(nmk_chip->secondary_parent_irq, nmk_chip); } return 0; }
static void __init alive_init(void __iomem *base, unsigned int irq_start, u32 irq_sources, u32 resume_sources) { int irq_alive = IRQ_PHY_CLKPWR_ALIVEIRQ + GIC_PHY_OFFSET; int num = IRQ_ALIVE_END - IRQ_ALIVE_START; int i = 0; printk(KERN_INFO "ALIVE @%p: start %3d, mask 0x%08x (alive %d, num %d)\n", base, irq_start, irq_sources, irq_alive, num); /* set alive irq handler */ for (i = 0; num > i; i++) { if (irq_sources & (1 << i)) { int irq = irq_start + i; irq_set_chip_data(irq, base); irq_set_chip_and_handler(irq, &alive_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } } /* register alive irq handler data */ irq_set_handler_data(irq_alive, base); /* * call alive_mask_irq * chip and chip data is registerd at gic_init */ irq_set_chained_handler(irq_alive, alive_handler); }
/** * s3c_init_vic_timer_irq() - initialise timer irq chanined off VIC.\ * @num: Number of timers to initialize * @timer_irq: Base IRQ number to be used for the timers. * * Register the necessary IRQ chaining and support for the timer IRQs * chained of the VIC. */ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) { unsigned int pirq[5] = { IRQ_TIMER0_VIC, IRQ_TIMER1_VIC, IRQ_TIMER2_VIC, IRQ_TIMER3_VIC, IRQ_TIMER4_VIC }; struct irq_chip_generic *s3c_tgc; struct irq_chip_type *ct; unsigned int i; s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, S3C64XX_TINT_CSTAT, handle_level_irq); if (!s3c_tgc) { pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n", __func__, timer_irq); return; } ct = s3c_tgc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_ack = s3c_irq_timer_ack; irq_setup_generic_chip(s3c_tgc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST | IRQ_NOPROBE, 0); /* Clear the upper bits of the mask_cache*/ s3c_tgc->mask_cache &= 0x1f; for (i = 0; i < num; i++, timer_irq++) { irq_set_chained_handler(pirq[i], s3c_irq_demux_vic_timer); irq_set_handler_data(pirq[i], (void *)timer_irq); } }
static int altera_gpio_remove(struct platform_device *pdev) { unsigned int irq, i; int status; struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev); status = gpiochip_remove(&altera_gc->mmchip.gc); if (status < 0) return status; if (altera_gc->irq) { irq_dispose_mapping(altera_gc->hwirq); for (i = 0; i < altera_gc->mmchip.gc.ngpio; i++) { irq = irq_find_mapping(altera_gc->irq, i); if (irq > 0) irq_dispose_mapping(irq); } irq_domain_remove(altera_gc->irq); } irq_set_handler_data(altera_gc->hwirq, NULL); irq_set_chained_handler(altera_gc->hwirq, NULL); devm_kfree(&pdev->dev, altera_gc); return -EIO; }
static int cs75xx_gpio_remove(struct platform_device *pdev) { int i, j; gpio_dbgmsg("Function: %s\n", __func__); /* disable irq and deregister to gpiolib */ for (i = 0; i < GPIO_BANK_NUM; i++) { /* disable, unmask and clear all interrupts */ __raw_writel(0x0, cs75xx_gpio_base[i] + CS75XX_GPIO_IE); __raw_writel(~0x0, cs75xx_gpio_base[i] + CS75XX_GPIO_INT); for (j = GPIO_IRQ_BASE + i * GPIO_BANK_SIZE; j < GPIO_IRQ_BASE + (i + 1) * GPIO_BANK_SIZE; j++) { irq_set_chip(j, NULL); irq_set_handler(j, NULL); set_irq_flags(j, 0); } irq_set_chained_handler(cs75xx_irq_gpio[i], NULL); irq_set_handler_data(cs75xx_irq_gpio[i], NULL); } BUG_ON(gpiochip_remove(&cs75xx_gpio_chip)); for (i = 0; i < GPIO_BANK_NUM; i++) __raw_writel(0x0, cs75xx_global_base + CS75XX_GPIO_MUX_0 + i*4); // disable valid gpio pin return 0; }
static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) { void __iomem *reg_base = uirq->regs; struct irq_chip_generic *gc; struct irq_chip_type *ct; /* mask all interrupts at the start. */ __raw_writel(0xf, reg_base + S3C64XX_UINTM); gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, handle_level_irq); if (!gc) { pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", __func__, uirq->base_irq); return; } ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->chip.irq_mask_ack = irq_gc_mask_and_ack_set; ct->chip.irq_disable = irq_gc_mask_and_ack_set; ct->regs.ack = S3C64XX_UINTP; ct->regs.mask = S3C64XX_UINTM; irq_setup_generic_chip(gc, IRQ_MSK(4), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST | IRQ_NOPROBE, 0); irq_set_handler_data(uirq->parent_irq, uirq); irq_set_chained_handler(uirq->parent_irq, s3c_irq_demux_uart); }
static int mx25_tsadc_setup_irq(struct platform_device *pdev, struct mx25_tsadc *tsadc) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; int irq; irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "Failed to get irq\n"); return irq; } tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops, tsadc); if (!tsadc->domain) { dev_err(dev, "Failed to add irq domain\n"); return -ENOMEM; } irq_set_chained_handler(irq, mx25_tsadc_irq_handler); irq_set_handler_data(irq, tsadc); return 0; }
static void __init mpic_init_IRQ(void) { struct device_node *dn; struct mpic *mpic; unsigned int virq; for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) { if (!of_device_is_compatible(dn, "CBEA,platform-open-pic")) continue; /* The MPIC driver will get everything it needs from the * device-tree, just pass 0 to all arguments */ mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC "); if (mpic == NULL) continue; mpic_init(mpic); virq = irq_of_parse_and_map(dn, 0); if (virq == NO_IRQ) continue; printk(KERN_INFO "%s : hooking up to IRQ %d\n", dn->full_name, virq); irq_set_handler_data(virq, mpic); irq_set_chained_handler(virq, cell_mpic_cascade); } }
static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, unsigned int irq) { if (irq_set_handler_data(irq, combiner_data) != 0) BUG(); irq_set_chained_handler(irq, combiner_handle_cascade_irq); }
void __init sh7372_init_irq(void) { void __iomem *intevtsa; int n; intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE); intevtsa = intcs_ffd2 + 0x100; intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intca_irq_pins_lo_desc); register_intc_controller(&intca_irq_pins_hi_desc); register_intc_controller(&intcs_desc); /* setup dummy cascade chip for INTCS */ n = evt2irq(0xf80); irq_alloc_desc_at(n, numa_node_id()); irq_set_chip_and_handler_name(n, &dummy_irq_chip, handle_level_irq, "level"); set_irq_flags(n, IRQF_VALID); /* yuck */ /* demux using INTEVTSA */ irq_set_handler_data(n, (void *)intevtsa); irq_set_chained_handler(n, intcs_demux); /* unmask INTCS in INTAMASK */ iowrite16(0, intcs_ffd2 + 0x104); }
static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) { if (combiner_nr >= MAX_COMBINER_NR) BUG(); if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) BUG(); irq_set_chained_handler(irq, combiner_handle_cascade_irq); }
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) { if (gic_nr >= MAX_GIC_NR) BUG(); if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) BUG(); irq_set_chained_handler(irq, gic_handle_cascade_irq); }
static int mpc8xxx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mpc8xxx_gpio_chip *mpc8xxx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; const struct of_device_id *id; int ret; mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL); if (!mpc8xxx_gc) return -ENOMEM; platform_set_drvdata(pdev, mpc8xxx_gc); spin_lock_init(&mpc8xxx_gc->lock); mm_gc = &mpc8xxx_gc->mm_gc; gc = &mm_gc->gc; mm_gc->save_regs = mpc8xxx_gpio_save_regs; gc->ngpio = MPC8XXX_GPIO_PINS; gc->direction_input = mpc8xxx_gpio_dir_in; gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ? mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out; gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ? mpc8572_gpio_get : mpc8xxx_gpio_get; gc->set = mpc8xxx_gpio_set; gc->set_multiple = mpc8xxx_gpio_set_multiple; gc->to_irq = mpc8xxx_gpio_to_irq; ret = of_mm_gpiochip_add(np, mm_gc); if (ret) return ret; mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0); if (mpc8xxx_gc->irqn == NO_IRQ) return 0; mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, mpc8xxx_gc); if (!mpc8xxx_gc->irq) return 0; id = of_match_node(mpc8xxx_gpio_ids, np); if (id) mpc8xxx_gc->of_dev_id_data = id->data; /* ack and mask all irqs */ out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IMR, 0); irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc); irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade); return 0; }
void __init sh7367_init_irq(void) { void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intcs_desc); /* demux using INTEVTSA */ irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); irq_set_chained_handler(evt2irq(0xf80), intcs_demux); }
void __init sh7377_init_irq(void) { void __iomem *intevtsa = ioremap_nocache(INTEVTSA, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intca_irq_pins_desc); register_intc_controller(&intcs_desc); irq_set_handler_data(evt2irq(INTCS_INTVECT), (void *)intevtsa); irq_set_chained_handler(evt2irq(INTCS_INTVECT), intcs_demux); }
struct pm_irq_chip * pm8xxx_irq_init(struct device *dev, const struct pm8xxx_irq_platform_data *pdata) { struct pm_irq_chip *chip; int devirq, rc; unsigned int pmirq; if (!pdata) { pr_err("No platform data\n"); return ERR_PTR(-EINVAL); } devirq = pdata->devirq; if (devirq < 0) { pr_err("missing devirq\n"); rc = devirq; return ERR_PTR(-EINVAL); } chip = kzalloc(sizeof(struct pm_irq_chip) + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL); if (!chip) { pr_err("Cannot alloc pm_irq_chip struct\n"); return ERR_PTR(-EINVAL); } chip->dev = dev; chip->devirq = devirq; chip->irq_base = pdata->irq_base; chip->num_irqs = pdata->irq_cdata.nirqs; chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8); chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8); spin_lock_init(&chip->pm_irq_lock); for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) { irq_set_chip_and_handler(chip->irq_base + pmirq, &pm8xxx_irq_chip, handle_level_irq); irq_set_chip_data(chip->irq_base + pmirq, chip); #ifdef CONFIG_ARM set_irq_flags(chip->irq_base + pmirq, IRQF_VALID); #else irq_set_noprobe(chip->irq_base + pmirq); #endif } irq_set_irq_type(devirq, pdata->irq_trigger_flag); irq_set_handler_data(devirq, chip); irq_set_chained_handler(devirq, pm8xxx_irq_handler); set_irq_wake(devirq, 1); return chip; }
static int __init exynos_init_irq_eint(void) { int irq; if (soc_is_exynos5250()) exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K); else exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K); if (exynos_eint_base == NULL) { pr_err("unable to ioremap for EINT base address\n"); return -ENOMEM; } for (irq = 0 ; irq <= 31 ; irq++) { irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint, handle_level_irq); set_irq_flags(IRQ_EINT(irq), IRQF_VALID); } irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31); for (irq = 0 ; irq <= 15 ; irq++) { eint0_15_data[irq] = IRQ_EINT(irq); if (soc_is_exynos5250()) { irq_set_handler_data(exynos5_eint0_15_src_int[irq], &eint0_15_data[irq]); irq_set_chained_handler(exynos5_eint0_15_src_int[irq], exynos_irq_eint0_15); } else { irq_set_handler_data(exynos4_eint0_15_src_int[irq], &eint0_15_data[irq]); irq_set_chained_handler(exynos4_eint0_15_src_int[irq], exynos_irq_eint0_15); } } return 0; }
static int mpc8xxx_remove(struct platform_device *pdev) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev); if (mpc8xxx_gc->irq) { irq_set_handler_data(mpc8xxx_gc->irqn, NULL); irq_set_chained_handler(mpc8xxx_gc->irqn, NULL); irq_domain_remove(mpc8xxx_gc->irq); } of_mm_gpiochip_remove(&mpc8xxx_gc->mm_gc); return 0; }
static int davinci_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct davinci_gpio_regs __iomem *g = gpio2regs(hw); irq_set_chip_and_handler_name(irq, &gpio_irqchip, handle_simple_irq, "davinci_gpio"); irq_set_irq_type(irq, IRQ_TYPE_NONE); irq_set_chip_data(irq, (__force void *)g); irq_set_handler_data(irq, (void *)__gpio_mask(hw)); return 0; }
static int msi_domain_ops_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, info->chip_data); if (info->handler && info->handler_name) { __irq_set_handler(virq, info->handler, 0, info->handler_name); if (info->handler_data) irq_set_handler_data(virq, info->handler_data); } return 0; }
/* * Cascading */ static void __init xgold_irq_cascade_irq(unsigned irq, void *chipdata, unsigned index) { struct xgold_irq_chip_data *data = (struct xgold_irq_chip_data *)chipdata; pr_debug("%s: cascading %d irq\n", __func__, irq); if (irq_set_handler_data(irq, data) != 0) BUG(); if (!data->virq || !data->virq[index]) irq_set_chained_handler(irq, xgold_irq_handle_cascade_irq); else pr_debug("%s: Don't chain irq%d as it's VLX/VMM interrupt - index:%d\n", __func__, irq, index); }
void __init gpio_irq_init(int irq, struct gpio_chip *gpiochip, struct irq_chip *irqchip) { int n = gpiochip->to_irq(gpiochip, 0); int irqend = n + gpiochip->ngpio; /* setup the cascade irq handlers */ irq_set_chained_handler(irq, gpio_muxed_handler); irq_set_handler_data(irq, gpiochip); for (; n < irqend; n++) { irq_set_chip_and_handler(n, irqchip, handle_level_irq); irq_set_chip_data(n, gpiochip); set_irq_flags(n, IRQF_VALID); } }
static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq) { unsigned int max_nr; if (soc_is_exynos5250()) max_nr = EXYNOS5_MAX_COMBINER_NR; else max_nr = EXYNOS4_MAX_COMBINER_NR; if (combiner_nr >= max_nr) BUG(); if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0) BUG(); irq_set_chained_handler(irq, combiner_handle_cascade_irq); }
static int __init intc_of_init(struct device_node *node, struct device_node *parent) { struct resource res; struct irq_domain *domain; int irq; if (!of_property_read_u32_array(node, "ralink,intc-registers", rt_intc_regs, 6)) pr_info("intc: using register map from devicetree\n"); irq = irq_of_parse_and_map(node, 0); if (!irq) panic("Failed to get INTC IRQ"); if (of_address_to_resource(node, 0, &res)) panic("Failed to get intc memory range"); if (request_mem_region(res.start, resource_size(&res), res.name) < 0) pr_err("Failed to request intc memory"); rt_intc_membase = ioremap_nocache(res.start, resource_size(&res)); if (!rt_intc_membase) panic("Failed to remap intc memory"); /* disable all interrupts */ rt_intc_w32(~0, INTC_REG_DISABLE); /* route all INTC interrupts to MIPS HW0 interrupt */ rt_intc_w32(0, INTC_REG_TYPE); domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain"); rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE); irq_set_chained_handler(irq, ralink_intc_irq_handler); irq_set_handler_data(irq, domain); /* tell the kernel which irq is used for performance monitoring */ rt_perfcount_irq = irq_create_mapping(domain, 9); return 0; }
void __init sh7372_init_irq(void) { void __iomem *intevtsa; intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE); intevtsa = intcs_ffd2 + 0x100; intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE); register_intc_controller(&intca_desc); register_intc_controller(&intca_irq_pins_desc); register_intc_controller(&intcs_desc); irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa); irq_set_chained_handler(evt2irq(0xf80), intcs_demux); }
static void tc6393xb_detach_irq(struct platform_device *dev) { struct tc6393xb *tc6393xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_set_chained_handler(tc6393xb->irq, NULL); irq_set_handler_data(tc6393xb->irq, NULL); irq_base = tc6393xb->irq_base; for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { set_irq_flags(irq, 0); irq_set_chip(irq, NULL); irq_set_chip_data(irq, NULL); } }
static void __init spear_shirq_register(struct spear_shirq *shirq) { int i; if (shirq->invalid_irq) return; irq_set_chained_handler(shirq->irq, shirq_handler); for (i = 0; i < shirq->irq_nr; i++) { irq_set_chip_and_handler(shirq->irq_base + i, &shirq_chip, handle_simple_irq); set_irq_flags(shirq->irq_base + i, IRQF_VALID); irq_set_chip_data(shirq->irq_base + i, shirq); } irq_set_handler_data(shirq->irq, shirq); }
static void tc6393xb_attach_irq(struct platform_device *dev) { struct tc6393xb *tc6393xb = platform_get_drvdata(dev); unsigned int irq, irq_base; irq_base = tc6393xb->irq_base; for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) { irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq); irq_set_chip_data(irq, tc6393xb); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING); irq_set_handler_data(tc6393xb->irq, tc6393xb); irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq); }