static int platform_msic_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct intel_msic_gpio_pdata *pdata = dev_get_platdata(dev); struct msic_gpio *mg; int irq = platform_get_irq(pdev, 0); int retval; int i; if (irq < 0) { dev_err(dev, "no IRQ line\n"); return -EINVAL; } if (!pdata || !pdata->gpio_base) { dev_err(dev, "incorrect or missing platform data\n"); return -EINVAL; } mg = kzalloc(sizeof(*mg), GFP_KERNEL); if (!mg) return -ENOMEM; dev_set_drvdata(dev, mg); mg->pdev = pdev; mg->irq = irq; mg->irq_base = pdata->gpio_base + MSIC_GPIO_IRQ_OFFSET; mg->chip.label = "msic_gpio"; mg->chip.direction_input = msic_gpio_direction_input; mg->chip.direction_output = msic_gpio_direction_output; mg->chip.get = msic_gpio_get; mg->chip.set = msic_gpio_set; mg->chip.to_irq = msic_gpio_to_irq; mg->chip.base = pdata->gpio_base; mg->chip.ngpio = MSIC_NUM_GPIO; mg->chip.can_sleep = true; mg->chip.parent = dev; mutex_init(&mg->buslock); retval = gpiochip_add(&mg->chip); if (retval) { dev_err(dev, "Adding MSIC gpio chip failed\n"); goto err; } for (i = 0; i < mg->chip.ngpio; i++) { irq_set_chip_data(i + mg->irq_base, mg); irq_set_chip_and_handler(i + mg->irq_base, &msic_irqchip, handle_simple_irq); } irq_set_chained_handler_and_data(mg->irq, msic_gpio_irq_handler, mg); return 0; err: kfree(mg); return retval; }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; /* Read IDU BCR to confirm nr_irqs */ int nr_irqs = of_irq_count(intc); int i, irq; if (!idu_detected) panic("IDU not detected, but DeviceTree using it"); pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ irq = irq_of_parse_and_map(intc, i); if (!i) idu_first_irq = irq; irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }
static int __init intc_of_init(struct device_node *node, struct device_node *parent) { struct resource res; struct irq_domain *domain; int irq; if (!of_property_read_u32_array(node, "ralink,intc-registers", rt_intc_regs, 6)) pr_info("intc: using register map from devicetree\n"); irq = irq_of_parse_and_map(node, 0); if (!irq) panic("Failed to get INTC IRQ"); if (of_address_to_resource(node, 0, &res)) panic("Failed to get intc memory range"); if (request_mem_region(res.start, resource_size(&res), res.name) < 0) pr_err("Failed to request intc memory"); rt_intc_membase = ioremap_nocache(res.start, resource_size(&res)); if (!rt_intc_membase) panic("Failed to remap intc memory"); /* disable all interrupts */ rt_intc_w32(~0, INTC_REG_DISABLE); /* route all INTC interrupts to MIPS HW0 interrupt */ rt_intc_w32(0, INTC_REG_TYPE); domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain"); rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE); irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain); /* tell the kernel which irq is used for performance monitoring */ rt_perfcount_irq = irq_create_mapping(domain, 9); return 0; }
static int jz4740_adc_remove(struct platform_device *pdev) { struct jz4740_adc *adc = platform_get_drvdata(pdev); mfd_remove_devices(&pdev->dev); irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0); kfree(adc->gc); irq_set_chained_handler_and_data(adc->irq, NULL, NULL); iounmap(adc->base); release_mem_region(adc->mem->start, resource_size(adc->mem)); clk_put(adc->clk); return 0; }
static void __init spear_shirq_register(struct spear_shirq *shirq, int parent_irq) { int i; if (!shirq->irq_chip) return; irq_set_chained_handler_and_data(parent_irq, shirq_handler, shirq); for (i = 0; i < shirq->nr_irqs; i++) { irq_set_chip_and_handler(shirq->virq_base + i, shirq->irq_chip, handle_simple_irq); set_irq_flags(shirq->virq_base + i, IRQF_VALID); irq_set_chip_data(shirq->virq_base + i, shirq); } }
static int __init idu_of_init(struct device_node *intc, struct device_node *parent) { struct irq_domain *domain; int nr_irqs; int i, virq; struct mcip_bcr mp; struct mcip_idu_bcr idu_bcr; READ_BCR(ARC_REG_MCIP_BCR, mp); if (!mp.idu) panic("IDU not detected, but DeviceTree using it"); READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr); nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr); pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs); domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); /* Parent interrupts (core-intc) are already mapped */ for (i = 0; i < nr_irqs; i++) { /* Mask all common interrupts by default */ idu_irq_mask_raw(i); /* * Return parent uplink IRQs (towards core intc) 24,25,..... * this step has been done before already * however we need it to get the parent virq and set IDU handler * as first level isr */ virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ); BUG_ON(!virq); irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain); } __mcip_cmd(CMD_IDU_ENABLE, 0); return 0; }
static int jz4740_adc_probe(struct platform_device *pdev) { struct irq_chip_generic *gc; struct irq_chip_type *ct; struct jz4740_adc *adc; struct resource *mem_base; int ret; int irq_base; adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL); if (!adc) { dev_err(&pdev->dev, "Failed to allocate driver structure\n"); return -ENOMEM; } adc->irq = platform_get_irq(pdev, 0); if (adc->irq < 0) { ret = adc->irq; dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); return ret; } irq_base = platform_get_irq(pdev, 1); if (irq_base < 0) { dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base); return irq_base; } mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_base) { dev_err(&pdev->dev, "Failed to get platform mmio resource\n"); return -ENOENT; } /* Only request the shared registers for the MFD driver */ adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS, pdev->name); if (!adc->mem) { dev_err(&pdev->dev, "Failed to request mmio memory region\n"); return -EBUSY; } adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem)); if (!adc->base) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); goto err_release_mem_region; } adc->clk = clk_get(&pdev->dev, "adc"); if (IS_ERR(adc->clk)) { ret = PTR_ERR(adc->clk); dev_err(&pdev->dev, "Failed to get clock: %d\n", ret); goto err_iounmap; } spin_lock_init(&adc->lock); atomic_set(&adc->clk_ref, 0); platform_set_drvdata(pdev, adc); gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base, handle_level_irq); ct = gc->chip_types; ct->regs.mask = JZ_REG_ADC_CTRL; ct->regs.ack = JZ_REG_ADC_STATUS; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->chip.irq_ack = irq_gc_ack_set_bit; irq_setup_generic_chip(gc, IRQ_MSK(5), IRQ_GC_INIT_MASK_CACHE, 0, IRQ_NOPROBE | IRQ_LEVEL); adc->gc = gc; irq_set_chained_handler_and_data(adc->irq, jz4740_adc_irq_demux, gc); writeb(0x00, adc->base + JZ_REG_ADC_ENABLE); writeb(0xff, adc->base + JZ_REG_ADC_CTRL); ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells, ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base, NULL); if (ret < 0) goto err_clk_put; return 0; err_clk_put: clk_put(adc->clk); err_iounmap: iounmap(adc->base); err_release_mem_region: release_mem_region(adc->mem->start, resource_size(adc->mem)); return ret; }
static int __init goldfish_pic_of_init(struct device_node *of_node, struct device_node *parent) { struct goldfish_pic_data *gfpic; struct irq_chip_generic *gc; struct irq_chip_type *ct; unsigned int parent_irq; int ret = 0; gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL); if (!gfpic) { ret = -ENOMEM; goto out_err; } parent_irq = irq_of_parse_and_map(of_node, 0); if (!parent_irq) { pr_err("Failed to map parent IRQ!\n"); ret = -EINVAL; goto out_free; } gfpic->base = of_iomap(of_node, 0); if (!gfpic->base) { pr_err("Failed to map base address!\n"); ret = -ENOMEM; goto out_unmap_irq; } /* Mask interrupts. */ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL); gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base, handle_level_irq); if (!gc) { pr_err("Failed to allocate chip structures!\n"); ret = -ENOMEM; goto out_iounmap; } ct = gc->chip_types; ct->regs.enable = GFPIC_REG_IRQ_ENABLE; ct->regs.disable = GFPIC_REG_IRQ_DISABLE; ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct->chip.irq_mask = irq_gc_mask_disable_reg; irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0, IRQ_NOPROBE | IRQ_LEVEL, 0); gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS, GFPIC_IRQ_BASE, 0, &goldfish_irq_domain_ops, NULL); if (!gfpic->irq_domain) { pr_err("Failed to add irqdomain!\n"); ret = -ENOMEM; goto out_destroy_generic_chip; } irq_set_chained_handler_and_data(parent_irq, goldfish_pic_cascade, gfpic); pr_info("Successfully registered.\n"); return 0; out_destroy_generic_chip: irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), IRQ_NOPROBE | IRQ_LEVEL, 0); out_iounmap: iounmap(gfpic->base); out_unmap_irq: irq_dispose_mapping(parent_irq); out_free: kfree(gfpic); out_err: pr_err("Failed to initialize! (errno = %d)\n", ret); return ret; }
static int davinci_gpio_irq_setup(struct platform_device *pdev) { unsigned gpio, bank; int irq; struct clk *clk; u32 binten = 0; unsigned ngpio, bank_irq; struct device *dev = &pdev->dev; struct resource *res; struct davinci_gpio_controller *chips = platform_get_drvdata(pdev); struct davinci_gpio_platform_data *pdata = dev->platform_data; struct davinci_gpio_regs __iomem *g; struct irq_domain *irq_domain = NULL; const struct of_device_id *match; struct irq_chip *irq_chip; gpio_get_irq_chip_cb_t gpio_get_irq_chip; /* * Use davinci_gpio_get_irq_chip by default to handle non DT cases */ gpio_get_irq_chip = davinci_gpio_get_irq_chip; match = of_match_device(of_match_ptr(davinci_gpio_ids), dev); if (match) gpio_get_irq_chip = (gpio_get_irq_chip_cb_t)match->data; ngpio = pdata->ngpio; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "Invalid IRQ resource\n"); return -EBUSY; } bank_irq = res->start; if (!bank_irq) { dev_err(dev, "Invalid IRQ resource\n"); return -ENODEV; } clk = devm_clk_get(dev, "gpio"); if (IS_ERR(clk)) { printk(KERN_ERR "Error %ld getting gpio clock?\n", PTR_ERR(clk)); return PTR_ERR(clk); } clk_prepare_enable(clk); if (!pdata->gpio_unbanked) { irq = irq_alloc_descs(-1, 0, ngpio, 0); if (irq < 0) { dev_err(dev, "Couldn't allocate IRQ numbers\n"); return irq; } irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0, &davinci_gpio_irq_ops, chips); if (!irq_domain) { dev_err(dev, "Couldn't register an IRQ domain\n"); return -ENODEV; } } /* * Arrange gpio_to_irq() support, handling either direct IRQs or * banked IRQs. Having GPIOs in the first GPIO bank use direct * IRQs, while the others use banked IRQs, would need some setup * tweaks to recognize hardware which can do that. */ for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) { chips[bank].chip.to_irq = gpio_to_irq_banked; chips[bank].irq_domain = irq_domain; } /* * AINTC can handle direct/unbanked IRQs for GPIOs, with the GPIO * controller only handling trigger modes. We currently assume no * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. */ if (pdata->gpio_unbanked) { /* pass "bank 0" GPIO IRQs to AINTC */ chips[0].chip.to_irq = gpio_to_irq_unbanked; chips[0].gpio_irq = bank_irq; chips[0].gpio_unbanked = pdata->gpio_unbanked; binten = GENMASK(pdata->gpio_unbanked / 16, 0); /* AINTC handles mask/unmask; GPIO handles triggering */ irq = bank_irq; irq_chip = gpio_get_irq_chip(irq); irq_chip->name = "GPIO-AINTC"; irq_chip->irq_set_type = gpio_irq_type_unbanked; /* default trigger: both edges */ g = gpio2regs(0); writel_relaxed(~0, &g->set_falling); writel_relaxed(~0, &g->set_rising); /* set the direct IRQs up to use that irqchip */ for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) { irq_set_chip(irq, irq_chip); irq_set_handler_data(irq, &chips[gpio / 32]); irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); } goto done; } /* * Or, AINTC can handle IRQs for banks of 16 GPIO IRQs, which we * then chain through our own handler. */ for (gpio = 0, bank = 0; gpio < ngpio; bank++, bank_irq++, gpio += 16) { /* disabled by default, enabled only as needed */ g = gpio2regs(gpio); writel_relaxed(~0, &g->clr_falling); writel_relaxed(~0, &g->clr_rising); /* * Each chip handles 32 gpios, and each irq bank consists of 16 * gpio irqs. Pass the irq bank's corresponding controller to * the chained irq handler. */ irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler, &chips[gpio / 32]); binten |= BIT(bank); } done: /* * BINTEN -- per-bank interrupt enable. genirq would also let these * bits be set/cleared dynamically. */ writel_relaxed(binten, gpio_base + BINTEN); return 0; }
static int ezx_pcap_probe(struct spi_device *spi) { struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev); struct pcap_chip *pcap; int i, adc_irq; int ret = -ENODEV; /* platform data is required */ if (!pdata) goto ret; pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL); if (!pcap) { ret = -ENOMEM; goto ret; } mutex_init(&pcap->io_mutex); mutex_init(&pcap->adc_mutex); INIT_WORK(&pcap->isr_work, pcap_isr_work); INIT_WORK(&pcap->msr_work, pcap_msr_work); spi_set_drvdata(spi, pcap); /* setup spi */ spi->bits_per_word = 32; spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0); ret = spi_setup(spi); if (ret) goto ret; pcap->spi = spi; /* setup irq */ pcap->irq_base = pdata->irq_base; pcap->workqueue = create_singlethread_workqueue("pcapd"); if (!pcap->workqueue) { ret = -ENOMEM; dev_err(&spi->dev, "can't create pcap thread\n"); goto ret; } /* redirect interrupts to AP, except adcdone2 */ if (!(pdata->config & PCAP_SECOND_PORT)) ezx_pcap_write(pcap, PCAP_REG_INT_SEL, (1 << PCAP_IRQ_ADCDONE2)); /* setup irq chip */ for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); irq_set_chip_data(i, pcap); irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } /* mask/ack all PCAP interrupts */ ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT); ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); pcap->msr = PCAP_MASK_ALL_INTERRUPT; irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap); irq_set_irq_wake(spi->irq, 1); /* ADC */ adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC", pcap); if (ret) goto free_irqchip; /* setup subdevs */ for (i = 0; i < pdata->num_subdevs; i++) { ret = pcap_add_subdev(pcap, &pdata->subdevs[i]); if (ret) goto remove_subdevs; } /* board specific quirks */ if (pdata->init) pdata->init(pcap); return 0; remove_subdevs: device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); free_irqchip: for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) irq_set_chip_and_handler(i, NULL, NULL); /* destroy_workqueue: */ destroy_workqueue(pcap->workqueue); ret: return ret; }
static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, unsigned int irq) { irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq, combiner_data); }
static int __init orion_bridge_irq_init(struct device_node *np, struct device_node *parent) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct resource r; struct irq_domain *domain; struct irq_chip_generic *gc; int ret, irq, nrirqs = 32; /* get optional number of interrupts provided */ of_property_read_u32(np, "marvell,#interrupts", &nrirqs); domain = irq_domain_add_linear(np, nrirqs, &irq_generic_chip_ops, NULL); if (!domain) { pr_err("%s: unable to add irq domain\n", np->name); return -ENOMEM; } ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { pr_err("%s: unable to alloc irq domain gc\n", np->name); return ret; } ret = of_address_to_resource(np, 0, &r); if (ret) { pr_err("%s: unable to get resource\n", np->name); return ret; } if (!request_mem_region(r.start, resource_size(&r), np->name)) { pr_err("%s: unable to request mem region\n", np->name); return -ENOMEM; } /* Map the parent interrupt for the chained handler */ irq = irq_of_parse_and_map(np, 0); if (irq <= 0) { pr_err("%s: unable to parse irq\n", np->name); return -EINVAL; } gc = irq_get_domain_generic_chip(domain, 0); gc->reg_base = ioremap(r.start, resource_size(&r)); if (!gc->reg_base) { pr_err("%s: unable to map resource\n", np->name); return -ENOMEM; } gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup; gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; /* mask and clear all interrupts */ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE); irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler, domain); return 0; }