static int mx25_tsadc_setup_irq(struct platform_device *pdev, struct mx25_tsadc *tsadc) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; int irq; irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "Failed to get irq\n"); return irq; } tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops, tsadc); if (!tsadc->domain) { dev_err(dev, "Failed to add irq domain\n"); return -ENOMEM; } irq_set_chained_handler(irq, mx25_tsadc_irq_handler); irq_set_handler_data(irq, tsadc); return 0; }
void __init sa1100_init_gpio(void) { /* clear all GPIO edge detects */ GFER = 0; GRER = 0; GEDR = -1; gpiochip_add(&sa1100_gpio_chip); sa1100_gpio_irqdomain = irq_domain_add_simple(NULL, 28, IRQ_GPIO0, &sa1100_gpio_irqdomain_ops, NULL); /* * Install handlers for GPIO 0-10 edge detect interrupts */ irq_set_chained_handler(IRQ_GPIO0_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO1_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO2_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO3_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO4_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO5_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO6_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO7_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO8_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO9_SC, sa1100_gpio_handler); irq_set_chained_handler(IRQ_GPIO10_SC, sa1100_gpio_handler); /* * Install handler for GPIO 11-27 edge detect interrupts */ irq_set_chained_handler(IRQ_GPIO11_27, sa1100_gpio_handler); }
static void __init omap_generic_init(void) { struct device_node *node = of_find_matching_node(NULL, intc_match); if (node) irq_domain_add_simple(node, 0); omap_sdrc_init(NULL, NULL); of_platform_populate(NULL, omap_dt_match_table, NULL, NULL); }
static int __init imx6q_gpio_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; gpio_irq_base -= 32; irq_domain_add_simple(np, gpio_irq_base); return 0; }
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np) { int base = tc3589x->irq_base; tc3589x->domain = irq_domain_add_simple( np, TC3589x_NR_INTERNAL_IRQS, base, &tc3589x_irq_ops, tc3589x); if (!tc3589x->domain) { dev_err(tc3589x->dev, "Failed to create irqdomain\n"); return -ENOSYS; } return 0; }
static void __init msm8x60_dt_init(void) { struct device_node *node; node = of_find_matching_node_by_address(NULL, msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS); if (node) irq_domain_add_simple(node, GIC_SPI_START); if (of_machine_is_compatible("qcom,msm8660-surf")) { printk(KERN_INFO "Init surf UART registers\n"); msm8x60_init_uart12dm(); } of_platform_populate(NULL, of_default_bus_match_table, msm_auxdata_lookup, NULL); }
void __init mcs814x_of_irq_init(void) { struct device_node *np; np = of_find_matching_node(NULL, mcs814x_intc_ids); if (!np) panic("unable to find compatible intc node in dtb\n"); mcs814x_intc_base = of_iomap(np, 0); if (!mcs814x_intc_base) panic("unable to map intc cpu registers\n"); irq_domain_add_simple(np, 0); of_node_put(np); mcs814x_alloc_gc(mcs814x_intc_base, 0, 32); }
void __init sirfsoc_of_irq_init(void) { struct device_node *np; np = of_find_matching_node(NULL, intc_ids); if (!np) panic("unable to find compatible intc node in dtb\n"); sirfsoc_intc_base = of_iomap(np, 0); if (!sirfsoc_intc_base) panic("unable to map intc cpu registers\n"); irq_domain_add_simple(np, 0); of_node_put(np); sirfsoc_irq_init(); }
void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start, int parent_irq, u32 valid, struct device_node *node) { struct fpga_irq_data *f; int i; if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) { pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__); return; } f = &fpga_irq_devices[fpga_irq_id]; f->base = base; f->chip.name = name; f->chip.irq_ack = fpga_irq_mask; f->chip.irq_mask = fpga_irq_mask; f->chip.irq_unmask = fpga_irq_unmask; f->valid = valid; if (parent_irq != -1) { irq_set_handler_data(parent_irq, f); irq_set_chained_handler(parent_irq, fpga_irq_handle); } /* This will also allocate irq descriptors */ f->domain = irq_domain_add_simple(node, fls(valid), irq_start, &fpga_irqdomain_ops, f); /* This will allocate all valid descriptors in the linear case */ for (i = 0; i < fls(valid); i++) if (valid & BIT(i)) { if (!irq_start) irq_create_mapping(f->domain, i); f->used_irqs++; } pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs", fpga_irq_id, name, base, f->used_irqs); if (parent_irq != -1) pr_cont(", parent IRQ: %d\n", parent_irq); else pr_cont("\n"); fpga_irq_id++; }
/** * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip * @chip: the gpio chip to add the irqchip to * @irqchip: the irqchip to add to the adapter * @first_irq: if not dynamically assigned, the base (first) IRQ to * allocate gpio irqs from * @handler: the irq handler to use (often a predefined irq core function) * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE * to have the core avoid setting up any default type in the hardware. * * This function closely associates a certain irqchip with a certain * gpio chip, providing an irq domain to translate the local IRQs to * global irqs, and making sure that the gpio chip * is passed as chip data to all related functions. Driver callbacks * need to use container_of() to get their local state containers back * from the gpio chip passed as chip data. An irqdomain will be stored * in the gpio chip that shall be used by the driver to handle IRQ number * translation. The gpio chip will need to be initialized and registered * before calling this function. */ static int gb_gpio_irqchip_add(struct gpio_chip *chip, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, unsigned int type) { struct gb_gpio_controller *ggc; unsigned int offset; unsigned int irq_base; if (!chip || !irqchip) return -EINVAL; ggc = gpio_chip_to_gb_gpio_controller(chip); ggc->irqchip = irqchip; ggc->irq_handler = handler; ggc->irq_default_type = type; ggc->irqdomain = irq_domain_add_simple(NULL, ggc->line_max + 1, first_irq, &gb_gpio_domain_ops, chip); if (!ggc->irqdomain) { ggc->irqchip = NULL; return -EINVAL; } /* * Prepare the mapping since the irqchip shall be orthogonal to * any gpio calls. If the first_irq was zero, this is * necessary to allocate descriptors for all IRQs. */ for (offset = 0; offset < (ggc->line_max + 1); offset++) { irq_base = irq_create_mapping(ggc->irqdomain, offset); if (offset == 0) ggc->irq_base = irq_base; } return 0; }
static void __init tegra_dt_init(void) { struct device_node *node; node = of_find_matching_node_by_address(NULL, tegra_dt_gic_match, TEGRA_ARM_INT_DIST_BASE); if (node) irq_domain_add_simple(node, INT_GIC_BASE); tegra_clk_init_from_table(tegra_dt_clk_init_table); if (of_machine_is_compatible("nvidia,harmony")) harmony_pinmux_init(); else if (of_machine_is_compatible("nvidia,seaboard")) seaboard_pinmux_init(); /* * Finished with the static registrations now; fill in the missing * devices */ of_platform_populate(NULL, tegra_dt_match_table, tegra20_auxdata_lookup, NULL); }
static void __init combiner_init(void __iomem *combiner_base, struct device_node *np, unsigned int max_nr, int irq_base) { int i, irq; unsigned int nr_irq; struct combiner_chip_data *combiner_data; nr_irq = max_nr * IRQ_IN_COMBINER; combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); if (!combiner_data) { pr_warning("%s: could not allocate combiner data\n", __func__); return; } combiner_irq_domain = irq_domain_add_simple(np, nr_irq, irq_base, &combiner_irq_domain_ops, combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warning("%s: irq domain init failed\n", __func__); return; } for (i = 0; i < max_nr; i++) { #ifdef CONFIG_OF if (np) irq = irq_of_parse_and_map(np, i); else #endif irq = combiner_lookup_irq(i); combiner_init_one(&combiner_data[i], i, combiner_base + (i >> 2) * 0x10, irq); combiner_cascade_irq(&combiner_data[i], irq); } }
int __init ft010_of_init_irq(struct device_node *node, struct device_node *parent) { struct ft010_irq_data *f = &firq; /* * Disable the idle handler by default since it is buggy * For more info see arch/arm/mach-gemini/idle.c */ cpu_idle_poll_ctrl(true); f->base = of_iomap(node, 0); WARN(!f->base, "unable to map gemini irq registers\n"); /* Disable all interrupts */ writel(0, FT010_IRQ_MASK(f->base)); writel(0, FT010_FIQ_MASK(f->base)); f->domain = irq_domain_add_simple(node, FT010_NUM_IRQS, 0, &ft010_irqdomain_ops, f); set_handle_irq(ft010_irqchip_handle_irq); return 0; }
static int irqc_probe(struct platform_device *pdev) { struct renesas_irqc_config *pdata = pdev->dev.platform_data; struct irqc_priv *p; struct resource *io; struct resource *irq; struct irq_chip *irq_chip; const char *name = dev_name(&pdev->dev); int ret; int k; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } /* deal with driver instance configuration */ if (pdata) memcpy(&p->config, pdata, sizeof(*pdata)); p->pdev = pdev; platform_set_drvdata(pdev, p); /* get hold of manadatory IOMEM */ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) { dev_err(&pdev->dev, "not enough IOMEM resources\n"); ret = -EINVAL; goto err1; } /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ for (k = 0; k < IRQC_IRQ_MAX; k++) { irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); if (!irq) break; p->irq[k].p = p; p->irq[k].requested_irq = irq->start; } p->number_of_irqs = k; if (p->number_of_irqs < 1) { dev_err(&pdev->dev, "not enough IRQ resources\n"); ret = -EINVAL; goto err1; } /* ioremap IOMEM and setup read/write callbacks */ p->iomem = ioremap_nocache(io->start, resource_size(io)); if (!p->iomem) { dev_err(&pdev->dev, "failed to remap IOMEM\n"); ret = -ENXIO; goto err2; } p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = irqc_irq_disable; irq_chip->irq_unmask = irqc_irq_enable; irq_chip->irq_enable = irqc_irq_enable; irq_chip->irq_disable = irqc_irq_disable; irq_chip->irq_set_type = irqc_irq_set_type; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE; p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, p->number_of_irqs, p->config.irq_base, &irqc_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(&pdev->dev, "cannot initialize irq domain\n"); goto err2; } /* request interrupts one by one */ for (k = 0; k < p->number_of_irqs; k++) { if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, 0, name, &p->irq[k])) { dev_err(&pdev->dev, "failed to request IRQ\n"); ret = -ENOENT; goto err3; } } dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); /* warn in case of mismatch if irq base is specified */ if (p->config.irq_base) { if (p->config.irq_base != p->irq[0].domain_irq) dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n", p->config.irq_base, p->irq[0].domain_irq); } return 0; err3: for (; k >= 0; k--) free_irq(p->irq[k - 1].requested_irq, &p->irq[k - 1]); irq_domain_remove(p->irq_domain); err2: iounmap(p->iomem); err1: kfree(p); err0: return ret; }
static int gpio_rcar_probe(struct platform_device *pdev) { struct gpio_rcar_priv *p; struct resource *io, *irq; struct gpio_chip *gpio_chip; struct irq_chip *irq_chip; const char *name = dev_name(&pdev->dev); int ret; p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } p->pdev = pdev; spin_lock_init(&p->lock); /* Get device configuration from DT node or platform data. */ gpio_rcar_parse_pdata(p); platform_set_drvdata(pdev, p); io = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!io || !irq) { dev_err(&pdev->dev, "missing IRQ or IOMEM\n"); ret = -EINVAL; goto err0; } p->base = devm_ioremap_nocache(&pdev->dev, io->start, resource_size(io)); if (!p->base) { dev_err(&pdev->dev, "failed to remap I/O memory\n"); ret = -ENXIO; goto err0; } gpio_chip = &p->gpio_chip; gpio_chip->request = gpio_rcar_request; gpio_chip->free = gpio_rcar_free; gpio_chip->direction_input = gpio_rcar_direction_input; gpio_chip->get = gpio_rcar_get; gpio_chip->direction_output = gpio_rcar_direction_output; gpio_chip->set = gpio_rcar_set; gpio_chip->to_irq = gpio_rcar_to_irq; gpio_chip->label = name; gpio_chip->dev = &pdev->dev; gpio_chip->owner = THIS_MODULE; gpio_chip->base = p->config.gpio_base; gpio_chip->ngpio = p->config.number_of_pins; irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = gpio_rcar_irq_disable; irq_chip->irq_unmask = gpio_rcar_irq_enable; irq_chip->irq_enable = gpio_rcar_irq_enable; irq_chip->irq_disable = gpio_rcar_irq_disable; irq_chip->irq_set_type = gpio_rcar_irq_set_type; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED; p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, p->config.number_of_pins, p->config.irq_base, &gpio_rcar_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(&pdev->dev, "cannot initialize irq domain\n"); goto err1; } if (devm_request_irq(&pdev->dev, irq->start, gpio_rcar_irq_handler, IRQF_SHARED, name, p)) { dev_err(&pdev->dev, "failed to request IRQ\n"); ret = -ENOENT; goto err1; } ret = gpiochip_add(gpio_chip); if (ret) { dev_err(&pdev->dev, "failed to add GPIO controller\n"); goto err1; } dev_info(&pdev->dev, "driving %d GPIOs\n", p->config.number_of_pins); /* warn in case of mismatch if irq base is specified */ if (p->config.irq_base) { ret = irq_find_mapping(p->irq_domain, 0); if (p->config.irq_base != ret) dev_warn(&pdev->dev, "irq base mismatch (%u/%u)\n", p->config.irq_base, ret); } if (p->config.pctl_name) { ret = gpiochip_add_pin_range(gpio_chip, p->config.pctl_name, 0, gpio_chip->base, gpio_chip->ngpio); if (ret < 0) dev_warn(&pdev->dev, "failed to add pin range\n"); } return 0; err1: irq_domain_remove(p->irq_domain); err0: return ret; }
static int em_gio_probe(struct platform_device *pdev) { struct gpio_em_config pdata_dt; struct gpio_em_config *pdata = dev_get_platdata(&pdev->dev); struct em_gio_priv *p; struct resource *io[2], *irq[2]; struct gpio_chip *gpio_chip; irq_chip_no_const *irq_chip; const char *name = dev_name(&pdev->dev); int ret; p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (!p) { ret = -ENOMEM; goto err0; } p->pdev = pdev; platform_set_drvdata(pdev, p); spin_lock_init(&p->sense_lock); io[0] = platform_get_resource(pdev, IORESOURCE_MEM, 0); io[1] = platform_get_resource(pdev, IORESOURCE_MEM, 1); irq[0] = platform_get_resource(pdev, IORESOURCE_IRQ, 0); irq[1] = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!io[0] || !io[1] || !irq[0] || !irq[1]) { dev_err(&pdev->dev, "missing IRQ or IOMEM\n"); ret = -EINVAL; goto err0; } p->base0 = devm_ioremap_nocache(&pdev->dev, io[0]->start, resource_size(io[0])); if (!p->base0) { dev_err(&pdev->dev, "failed to remap low I/O memory\n"); ret = -ENXIO; goto err0; } p->base1 = devm_ioremap_nocache(&pdev->dev, io[1]->start, resource_size(io[1])); if (!p->base1) { dev_err(&pdev->dev, "failed to remap high I/O memory\n"); ret = -ENXIO; goto err0; } if (!pdata) { memset(&pdata_dt, 0, sizeof(pdata_dt)); pdata = &pdata_dt; if (of_property_read_u32(pdev->dev.of_node, "ngpios", &pdata->number_of_pins)) { dev_err(&pdev->dev, "Missing ngpios OF property\n"); ret = -EINVAL; goto err0; } pdata->gpio_base = -1; } gpio_chip = &p->gpio_chip; gpio_chip->of_node = pdev->dev.of_node; gpio_chip->direction_input = em_gio_direction_input; gpio_chip->get = em_gio_get; gpio_chip->direction_output = em_gio_direction_output; gpio_chip->set = em_gio_set; gpio_chip->to_irq = em_gio_to_irq; gpio_chip->request = em_gio_request; gpio_chip->free = em_gio_free; gpio_chip->label = name; gpio_chip->dev = &pdev->dev; gpio_chip->owner = THIS_MODULE; gpio_chip->base = pdata->gpio_base; gpio_chip->ngpio = pdata->number_of_pins; irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = em_gio_irq_disable; irq_chip->irq_unmask = em_gio_irq_enable; irq_chip->irq_set_type = em_gio_irq_set_type; irq_chip->irq_request_resources = em_gio_irq_reqres; irq_chip->irq_release_resources = em_gio_irq_relres; irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, pdata->number_of_pins, pdata->irq_base, &em_gio_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(&pdev->dev, "cannot initialize irq domain\n"); goto err0; } if (devm_request_irq(&pdev->dev, irq[0]->start, em_gio_irq_handler, 0, name, p)) { dev_err(&pdev->dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } if (devm_request_irq(&pdev->dev, irq[1]->start, em_gio_irq_handler, 0, name, p)) { dev_err(&pdev->dev, "failed to request high IRQ\n"); ret = -ENOENT; goto err1; } ret = gpiochip_add(gpio_chip); if (ret) { dev_err(&pdev->dev, "failed to add GPIO controller\n"); goto err1; } if (pdata->pctl_name) { ret = gpiochip_add_pin_range(gpio_chip, pdata->pctl_name, 0, gpio_chip->base, gpio_chip->ngpio); if (ret < 0) dev_warn(&pdev->dev, "failed to add pin range\n"); } return 0; err1: irq_domain_remove(p->irq_domain); err0: return ret; }
static int lnw_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void *base; resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; u32 irq_base; int retval; int ngpio = id->driver_data; retval = pci_enable_device(pdev); if (retval) return retval; retval = pci_request_regions(pdev, "langwell_gpio"); if (retval) { dev_err(&pdev->dev, "error requesting resources\n"); goto err_pci_req_region; } /* get the gpio_base from bar1 */ start = pci_resource_start(pdev, 1); len = pci_resource_len(pdev, 1); base = ioremap_nocache(start, len); if (!base) { dev_err(&pdev->dev, "error mapping bar1\n"); retval = -EFAULT; goto err_ioremap; } irq_base = *(u32 *)base; gpio_base = *((u32 *)base + 1); /* release the IO mapping, since we already get the info from bar1 */ iounmap(base); /* get the register base from bar0 */ start = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); base = devm_ioremap_nocache(&pdev->dev, start, len); if (!base) { dev_err(&pdev->dev, "error mapping bar0\n"); retval = -EFAULT; goto err_ioremap; } lnw = devm_kzalloc(&pdev->dev, sizeof(*lnw), GFP_KERNEL); if (!lnw) { dev_err(&pdev->dev, "can't allocate langwell_gpio chip data\n"); retval = -ENOMEM; goto err_ioremap; } lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); lnw->chip.request = lnw_gpio_request; lnw->chip.direction_input = lnw_gpio_direction_input; lnw->chip.direction_output = lnw_gpio_direction_output; lnw->chip.get = lnw_gpio_get; lnw->chip.set = lnw_gpio_set; lnw->chip.to_irq = lnw_gpio_to_irq; lnw->chip.base = gpio_base; lnw->chip.ngpio = ngpio; lnw->chip.can_sleep = 0; lnw->pdev = pdev; lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base, &lnw_gpio_irq_ops, lnw); if (!lnw->domain) { retval = -ENOMEM; goto err_ioremap; } pci_set_drvdata(pdev, lnw); retval = gpiochip_add(&lnw->chip); if (retval) { dev_err(&pdev->dev, "langwell gpiochip_add error %d\n", retval); goto err_ioremap; } lnw_irq_init_hw(lnw); irq_set_handler_data(pdev->irq, lnw); irq_set_chained_handler(pdev->irq, lnw_irq_handler); spin_lock_init(&lnw->lock); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; err_ioremap: pci_release_regions(pdev); err_pci_req_region: pci_disable_device(pdev); return retval; }
static int pl061_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; struct pl061_platform_data *pdata = dev_get_platdata(dev); struct pl061_gpio *chip; int ret, irq, i, irq_base; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; if (pdata) { chip->gc.base = pdata->gpio_base; irq_base = pdata->irq_base; if (irq_base <= 0) { dev_err(&adev->dev, "invalid IRQ base in pdata\n"); return -ENODEV; } } else { chip->gc.base = -1; irq_base = 0; } chip->base = devm_ioremap_resource(dev, &adev->res); if (IS_ERR(chip->base)) return PTR_ERR(chip->base); spin_lock_init(&chip->lock); chip->gc.request = pl061_gpio_request; chip->gc.free = pl061_gpio_free; chip->gc.direction_input = pl061_direction_input; chip->gc.direction_output = pl061_direction_output; chip->gc.get = pl061_get_value; chip->gc.set = pl061_set_value; chip->gc.to_irq = pl061_to_irq; chip->gc.ngpio = PL061_GPIO_NR; chip->gc.label = dev_name(dev); chip->gc.dev = dev; chip->gc.owner = THIS_MODULE; ret = gpiochip_add(&chip->gc); if (ret) return ret; /* * irq_chip support */ writeb(0, chip->base + GPIOIE); /* disable irqs */ irq = adev->irq[0]; if (irq < 0) { dev_err(&adev->dev, "invalid IRQ\n"); return -ENODEV; } irq_set_chained_handler(irq, pl061_irq_handler); irq_set_handler_data(irq, chip); chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR, irq_base, &pl061_domain_ops, chip); if (!chip->domain) { dev_err(&adev->dev, "no irq domain\n"); return -ENODEV; } for (i = 0; i < PL061_GPIO_NR; i++) { if (pdata) { if (pdata->directions & (1 << i)) pl061_direction_output(&chip->gc, i, pdata->values & (1 << i)); else pl061_direction_input(&chip->gc, i); } } amba_set_drvdata(adev, chip); dev_info(&adev->dev, "PL061 GPIO chip @%pa registered\n", &adev->res.start); return 0; }
static int intel_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *base; struct intel_mid_gpio *priv; u32 gpio_base; u32 irq_base; int retval; struct intel_mid_gpio_ddata *ddata = (struct intel_mid_gpio_ddata *)id->driver_data; retval = pcim_enable_device(pdev); if (retval) return retval; retval = pcim_iomap_regions(pdev, 1 << 0 | 1 << 1, pci_name(pdev)); if (retval) { dev_err(&pdev->dev, "I/O memory mapping error\n"); return retval; } base = pcim_iomap_table(pdev)[1]; irq_base = readl(base); gpio_base = readl(sizeof(u32) + base); /* release the IO mapping, since we already get the info from bar1 */ pcim_iounmap_regions(pdev, 1 << 1); priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "can't allocate chip data\n"); return -ENOMEM; } priv->reg_base = pcim_iomap_table(pdev)[0]; priv->chip.label = dev_name(&pdev->dev); priv->chip.dev = &pdev->dev; priv->chip.request = intel_gpio_request; priv->chip.direction_input = intel_gpio_direction_input; priv->chip.direction_output = intel_gpio_direction_output; priv->chip.get = intel_gpio_get; priv->chip.set = intel_gpio_set; priv->chip.to_irq = intel_gpio_to_irq; priv->chip.base = gpio_base; priv->chip.ngpio = ddata->ngpio; priv->chip.can_sleep = false; priv->pdev = pdev; spin_lock_init(&priv->lock); priv->domain = irq_domain_add_simple(pdev->dev.of_node, ddata->ngpio, irq_base, &intel_gpio_irq_ops, priv); if (!priv->domain) return -ENOMEM; pci_set_drvdata(pdev, priv); retval = gpiochip_add(&priv->chip); if (retval) { dev_err(&pdev->dev, "gpiochip_add error %d\n", retval); return retval; } intel_mid_irq_init_hw(priv); irq_set_handler_data(pdev->irq, priv); irq_set_chained_handler(pdev->irq, intel_mid_irq_handler); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; }
static int irqc_probe(struct platform_device *pdev) { struct irqc_priv *p; struct resource *io; struct resource *irq; struct irq_chip *irq_chip; const char *name = dev_name(&pdev->dev); int ret; int k; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { dev_err(&pdev->dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } p->pdev = pdev; platform_set_drvdata(pdev, p); p->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(p->clk)) { dev_warn(&pdev->dev, "unable to get clock\n"); p->clk = NULL; } pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); /* get hold of manadatory IOMEM */ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) { dev_err(&pdev->dev, "not enough IOMEM resources\n"); ret = -EINVAL; goto err1; } /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ for (k = 0; k < IRQC_IRQ_MAX; k++) { irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); if (!irq) break; p->irq[k].p = p; p->irq[k].requested_irq = irq->start; } p->number_of_irqs = k; if (p->number_of_irqs < 1) { dev_err(&pdev->dev, "not enough IRQ resources\n"); ret = -EINVAL; goto err1; } /* ioremap IOMEM and setup read/write callbacks */ p->iomem = ioremap_nocache(io->start, resource_size(io)); if (!p->iomem) { dev_err(&pdev->dev, "failed to remap IOMEM\n"); ret = -ENXIO; goto err2; } p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = irqc_irq_disable; irq_chip->irq_unmask = irqc_irq_enable; irq_chip->irq_set_type = irqc_irq_set_type; irq_chip->irq_set_wake = irqc_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, p->number_of_irqs, 0, &irqc_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(&pdev->dev, "cannot initialize irq domain\n"); goto err2; } /* request interrupts one by one */ for (k = 0; k < p->number_of_irqs; k++) { if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, 0, name, &p->irq[k])) { dev_err(&pdev->dev, "failed to request IRQ\n"); ret = -ENOENT; goto err3; } } dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); return 0; err3: while (--k >= 0) free_irq(p->irq[k].requested_irq, &p->irq[k]); irq_domain_remove(p->irq_domain); err2: iounmap(p->iomem); err1: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); kfree(p); err0: return ret; }
static int pl061_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; struct pl061_platform_data *pdata = dev->platform_data; struct pl061_gpio *chip; int ret, irq, i, irq_base; struct device_node *np = dev->of_node; chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; if (pdata) { chip->gc.base = pdata->gpio_base; irq_base = pdata->irq_base; if (irq_base <= 0) return -ENODEV; } else { chip->gc.base = pl061_parse_gpio_base(dev); irq_base = 0; } if (!devm_request_mem_region(dev, adev->res.start, resource_size(&adev->res), "pl061")) return -EBUSY; chip->base = devm_ioremap(dev, adev->res.start, resource_size(&adev->res)); if (!chip->base) return -ENOMEM; spin_lock_init(&chip->lock); if (of_get_property(np, "gpio,hwspinlock", NULL)) { gpio_hwlock = hwspin_lock_request_specific(GPIO_HWLOCK_ID); if (gpio_hwlock == NULL) return -EBUSY; } /* Hook the request()/free() for pinctrl operation */ if (of_get_property(dev->of_node, "gpio-ranges", NULL)) { chip->gc.request = pl061_gpio_request; chip->gc.free = pl061_gpio_free; } chip->gc.direction_input = pl061_direction_input; chip->gc.direction_output = pl061_direction_output; chip->gc.get = pl061_get_value; chip->gc.set = pl061_set_value; chip->gc.to_irq = pl061_to_irq; chip->gc.ngpio = PL061_GPIO_NR; chip->gc.label = dev_name(dev); chip->gc.dev = dev; chip->gc.owner = THIS_MODULE; ret = gpiochip_add(&chip->gc); if (ret) return ret; /* * irq_chip support */ writeb(0, chip->base + GPIOIE); /* disable irqs */ irq = adev->irq[0]; if (irq < 0) return -ENODEV; irq_set_chained_handler(irq, pl061_irq_handler); irq_set_handler_data(irq, chip); chip->domain = irq_domain_add_simple(adev->dev.of_node, PL061_GPIO_NR, irq_base, &pl061_domain_ops, chip); if (!chip->domain) return -ENODEV; for (i = 0; i < PL061_GPIO_NR; i++) { if (pdata) { if (pdata->directions & (1 << i)) pl061_direction_output(&chip->gc, i, pdata->values & (1 << i)); else pl061_direction_input(&chip->gc, i); } } amba_set_drvdata(adev, chip); return 0; }
static int intc_irqpin_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct renesas_intc_irqpin_config *pdata = dev->platform_data; const struct of_device_id *of_id; struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; struct resource *irq; struct irq_chip *irq_chip; void (*enable_fn)(struct irq_data *d); void (*disable_fn)(struct irq_data *d); const char *name = dev_name(dev); int ref_irq; int ret; int k; p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); if (!p) { dev_err(dev, "failed to allocate driver data\n"); return -ENOMEM; } /* deal with driver instance configuration */ if (pdata) { memcpy(&p->config, pdata, sizeof(*pdata)); } else { of_property_read_u32(dev->of_node, "sense-bitfield-width", &p->config.sense_bitfield_width); p->config.control_parent = of_property_read_bool(dev->of_node, "control-parent"); } if (!p->config.sense_bitfield_width) p->config.sense_bitfield_width = 4; /* default to 4 bits */ p->pdev = pdev; platform_set_drvdata(pdev, p); p->clk = devm_clk_get(dev, NULL); if (IS_ERR(p->clk)) { dev_warn(dev, "unable to get clock\n"); p->clk = NULL; } pm_runtime_enable(dev); pm_runtime_get_sync(dev); /* get hold of register banks */ memset(io, 0, sizeof(io)); for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k); if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) { dev_err(dev, "not enough IOMEM resources\n"); ret = -EINVAL; goto err0; } } /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */ for (k = 0; k < INTC_IRQPIN_MAX; k++) { irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); if (!irq) break; p->irq[k].p = p; p->irq[k].requested_irq = irq->start; } p->number_of_irqs = k; if (p->number_of_irqs < 1) { dev_err(dev, "not enough IRQ resources\n"); ret = -EINVAL; goto err0; } /* ioremap IOMEM and setup read/write callbacks */ for (k = 0; k < INTC_IRQPIN_REG_NR; k++) { i = &p->iomem[k]; /* handle optional registers */ if (!io[k]) continue; switch (resource_size(io[k])) { case 1: i->width = 8; i->read = intc_irqpin_read8; i->write = intc_irqpin_write8; break; case 4: i->width = 32; i->read = intc_irqpin_read32; i->write = intc_irqpin_write32; break; default: dev_err(dev, "IOMEM size mismatch\n"); ret = -EINVAL; goto err0; } i->iomem = devm_ioremap_nocache(dev, io[k]->start, resource_size(io[k])); if (!i->iomem) { dev_err(dev, "failed to remap IOMEM\n"); ret = -ENXIO; goto err0; } } /* configure "individual IRQ mode" where needed */ of_id = of_match_device(intc_irqpin_dt_ids, dev); if (of_id && of_id->data) { const struct intc_irqpin_irlm_config *irlm_config = of_id->data; if (io[INTC_IRQPIN_REG_IRLM]) intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM, irlm_config->irlm_bit, 1, 1); else dev_warn(dev, "unable to select IRLM mode\n"); } /* mask all interrupts using priority */ for (k = 0; k < p->number_of_irqs; k++) intc_irqpin_mask_unmask_prio(p, k, 1); /* clear all pending interrupts */ intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0); /* scan for shared interrupt lines */ ref_irq = p->irq[0].requested_irq; p->shared_irqs = true; for (k = 1; k < p->number_of_irqs; k++) { if (ref_irq != p->irq[k].requested_irq) { p->shared_irqs = false; break; } } /* use more severe masking method if requested */ if (p->config.control_parent) { enable_fn = intc_irqpin_irq_enable_force; disable_fn = intc_irqpin_irq_disable_force; } else if (!p->shared_irqs) { enable_fn = intc_irqpin_irq_enable; disable_fn = intc_irqpin_irq_disable; } else { enable_fn = intc_irqpin_shared_irq_enable; disable_fn = intc_irqpin_shared_irq_disable; } irq_chip = &p->irq_chip; irq_chip->name = name; irq_chip->irq_mask = disable_fn; irq_chip->irq_unmask = enable_fn; irq_chip->irq_set_type = intc_irqpin_irq_set_type; irq_chip->irq_set_wake = intc_irqpin_irq_set_wake; irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(dev->of_node, p->number_of_irqs, p->config.irq_base, &intc_irqpin_irq_domain_ops, p); if (!p->irq_domain) { ret = -ENXIO; dev_err(dev, "cannot initialize irq domain\n"); goto err0; } if (p->shared_irqs) { /* request one shared interrupt */ if (devm_request_irq(dev, p->irq[0].requested_irq, intc_irqpin_shared_irq_handler, IRQF_SHARED, name, p)) { dev_err(dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } } else { /* request interrupts one by one */ for (k = 0; k < p->number_of_irqs; k++) { if (devm_request_irq(dev, p->irq[k].requested_irq, intc_irqpin_irq_handler, 0, name, &p->irq[k])) { dev_err(dev, "failed to request low IRQ\n"); ret = -ENOENT; goto err1; } } } /* unmask all interrupts on prio level */ for (k = 0; k < p->number_of_irqs; k++) intc_irqpin_mask_unmask_prio(p, k, 0); dev_info(dev, "driving %d irqs\n", p->number_of_irqs); /* warn in case of mismatch if irq base is specified */ if (p->config.irq_base) { if (p->config.irq_base != p->irq[0].domain_irq) dev_warn(dev, "irq base mismatch (%d/%d)\n", p->config.irq_base, p->irq[0].domain_irq); } return 0; err1: irq_domain_remove(p->irq_domain); err0: pm_runtime_put(dev); pm_runtime_disable(dev); return ret; }
static int hi6401_irq_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct hi6401_irq *irq = NULL; enum of_gpio_flags flags; unsigned int virq; int ret = 0; int i; irq = devm_kzalloc(dev, sizeof(*irq), GFP_KERNEL); if (!irq) { dev_err(dev, "cannot allocate hi6401_irq device info\n"); return -ENOMEM; } platform_set_drvdata(pdev, irq); /* get resources */ irq->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!irq->res) { dev_err(dev, "platform_get_resource err\n"); goto err_exit; } if (!devm_request_mem_region(dev, irq->res->start, resource_size(irq->res), pdev->name)) { dev_err(dev, "cannot claim register memory\n"); goto err_exit; } irq->reg_base_addr = devm_ioremap(dev, irq->res->start, resource_size(irq->res)); if (!irq->reg_base_addr) { dev_err(dev, "cannot map register memory\n"); goto ioremap_err; } /* get pinctrl */ irq->pctrl = devm_pinctrl_get(dev); if (IS_ERR(irq->pctrl)) { dev_err(dev, "could not get pinctrl\n"); goto codec_ssi_get_err; } ret = codec_ssi_iomux_default(irq->pctrl); if (0 != ret) goto codec_ssi_iomux_err; /* get codec ssi clk */ irq->codec_ssi_clk = devm_clk_get(dev, "clk_codecssi"); if (IS_ERR(irq->codec_ssi_clk)) { pr_err("clk_get: codecssi clk not found!\n"); ret = PTR_ERR(irq->codec_ssi_clk); goto codec_ssi_clk_err; } ret = clk_prepare_enable(irq->codec_ssi_clk); if (0 != ret) { pr_err("codec_ssi_clk :clk prepare enable failed !\n"); goto codec_ssi_clk_enable_err; } /* get pmu audio clk */ irq->pmu_audio_clk = devm_clk_get(dev, "clk_pmuaudioclk"); if (IS_ERR(irq->pmu_audio_clk)) { pr_err("_clk_get: pmu_audio_clk not found!\n"); ret = PTR_ERR(irq->pmu_audio_clk); goto pmu_audio_clk_err; } ret = clk_prepare_enable(irq->pmu_audio_clk); if (0 != ret) { pr_err("pmu_audio_clk :clk prepare enable failed !\n"); goto pmu_audio_clk_enable_err; } spin_lock_init(&irq->lock); spin_lock_init(&irq->rw_lock); mutex_init(&irq->sr_mutex); mutex_init(&irq->pll_mutex); wake_lock_init(&irq->wake_lock, WAKE_LOCK_SUSPEND, "hi6401-irq"); irq->dev = dev; /* clear IRQ status */ hi6401_irq_write(irq, HI6401_REG_IRQ_0, 0xFF); hi6401_irq_write(irq, HI6401_REG_IRQ_1, 0xFF); /* mask all irqs */ hi6401_irq_write(irq, HI6401_REG_IRQM_0, 0xFF); hi6401_irq_write(irq, HI6401_REG_IRQM_1, 0xFF); irq->gpio = of_get_gpio_flags(np, 0, &flags); if (0 > irq->gpio) { dev_err(dev, "get gpio flags error\n"); ret = irq->gpio; goto get_gpio_err; } if (!gpio_is_valid(irq->gpio)) { dev_err(dev, "gpio is invalid\n"); ret = -EINVAL; goto get_gpio_err; } ret = gpio_request_one(irq->gpio, GPIOF_IN, "hi6401_irq"); if (0 > ret) { dev_err(dev, "failed to request gpio%d\n", irq->gpio); goto get_gpio_err; } irq->irq = gpio_to_irq(irq->gpio); irq->domain = irq_domain_add_simple(np, HI6401_MAX_IRQS, 0, &hi6401_domain_ops, irq); if (!irq->domain) { dev_err(dev, "irq domain error\n"); ret = -ENODEV; goto gpio_err; } for (i = 0; i < HI6401_MAX_IRQS; i++) { virq = irq_create_mapping(irq->domain, i); if (virq == NO_IRQ) { dev_err(dev, "Failed mapping hwirq\n"); ret = -ENOSPC; goto gpio_err; } irq->irqs[i] = virq; } ret = request_irq(irq->irq, hi6401_irq_handler, IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, "hi6401_irq", irq); if (0 > ret) { dev_err(dev, "could not claim irq %d\n", ret); ret = -ENODEV; goto gpio_err; } irq->hi6401_irq_delay_wq = create_singlethread_workqueue("hi6401_irq_delay_wq"); if (!(irq->hi6401_irq_delay_wq)) { pr_err("%s(%u) : workqueue create failed", __FUNCTION__,__LINE__); ret = -ENOMEM; goto irq_delay_wq_err; } INIT_DELAYED_WORK(&irq->hi6401_irq_delay_work, hi6401_irq_work_func); irq->pll_delay_wq = create_singlethread_workqueue("pll_delay_wq"); if (!(irq->pll_delay_wq)) { pr_err("%s : pll_delay_wq create failed", __FUNCTION__); ret = -ENOMEM; goto pll_delay_wq_err; } INIT_DELAYED_WORK(&irq->pll_delay_work, hi6401_pll_work_func); g_dump_buf = (char*)kmalloc(sizeof(char)*Hi6401_SIZE_MAX, GFP_KERNEL); if (!g_dump_buf) { pr_err("%s : couldn't malloc buffer.\n",__FUNCTION__); ret = -ENOMEM; goto g_dump_buf_kmalloc_err; } memset(g_dump_buf, 0, Hi6401_SIZE_MAX); /* populate sub nodes */ of_platform_populate(np, of_hi6401_irq_child_match_tbl, NULL, dev); if (!hi6401_client) { hi6401_client = dsm_register_client(&dsm_hi6401); } return 0; g_dump_buf_kmalloc_err: if(irq->pll_delay_wq) { cancel_delayed_work(&irq->pll_delay_work); flush_workqueue(irq->pll_delay_wq); destroy_workqueue(irq->pll_delay_wq); } pll_delay_wq_err: if(irq->hi6401_irq_delay_wq) { cancel_delayed_work(&irq->hi6401_irq_delay_work); flush_workqueue(irq->hi6401_irq_delay_wq); destroy_workqueue(irq->hi6401_irq_delay_wq); } irq_delay_wq_err: free_irq(irq->irq, irq); gpio_err: gpio_free(irq->gpio); get_gpio_err: clk_disable_unprepare(irq->pmu_audio_clk); pmu_audio_clk_enable_err: devm_clk_put(dev, irq->pmu_audio_clk); pmu_audio_clk_err: clk_disable_unprepare(irq->codec_ssi_clk); codec_ssi_clk_enable_err: devm_clk_put(dev, irq->codec_ssi_clk); codec_ssi_clk_err: codec_ssi_iomux_idle(irq->pctrl); codec_ssi_iomux_err: pinctrl_put(irq->pctrl); codec_ssi_get_err: devm_iounmap(dev, irq->reg_base_addr); ioremap_err: devm_release_mem_region(dev, irq->res->start, resource_size(irq->res)); err_exit: devm_kfree(dev, irq); return ret; }