static void em_gio_irq_domain_cleanup(struct em_gio_priv *p) { struct gpio_em_config *pdata = p->pdev->dev.platform_data; irq_free_descs(p->irq_base, pdata->number_of_pins); /* FIXME: irq domain wants to be freed! */ }
__devinit int ce4100_gpio_irq_setup(struct intelce_gpio_chip *c, struct pci_dev *pdev) { int i; int irq; int ret; c->irq_base = irq_alloc_descs(-1, 0, CE4100_PUB_GPIOS_PER_BANK, -1); if (c->irq_base < 0) return c->irq_base; /* mask + ACK all interrupt sources */ intelce_gpio_mmio_write32(0, c->reg_base + CE4100_PUB_GPIO_INT_EN); intelce_gpio_mmio_write32(0xFFF, c->reg_base + CE4100_PUB_GPIO_INT_STAT); ret = request_irq(pdev->irq, ce4100_gpio_irq_handler, IRQF_SHARED, "ce4100_gpio", c); if (ret) goto out_free_desc; /* * This gpio irq controller latches level irqs. Testing shows that if * we unmask & ACK the IRQ before the source of the interrupt is gone * then the interrupt is active again. */ irq = c->irq_base; for (i=0; i < c->chip.ngpio; i++) { irq_set_chip_and_handler_name(irq, &ce4100_irq_chip, handle_fasteoi_irq, "gpio_irq"); irq_set_chip_data(irq, c); irq++; } return 0; out_free_desc: irq_free_descs(c->irq_base, CE4100_PUB_GPIOS_PER_BANK); return ret; }
static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) { struct platform_device *pdev = p->pdev; struct gpio_em_config *pdata = pdev->dev.platform_data; p->irq_base = irq_alloc_descs(pdata->irq_base, 0, pdata->number_of_pins, numa_node_id()); if (IS_ERR_VALUE(p->irq_base)) { dev_err(&pdev->dev, "cannot get irq_desc\n"); return -ENXIO; } pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", pdata->gpio_base, pdata->number_of_pins, p->irq_base); p->irq_domain = irq_domain_add_legacy(pdev->dev.of_node, pdata->number_of_pins, p->irq_base, 0, &em_gio_irq_domain_ops, p); if (!p->irq_domain) { irq_free_descs(p->irq_base, pdata->number_of_pins); return -ENXIO; } return 0; }
static int sch_gpio_remove(struct platform_device *pdev) { int err = 0; struct resource *res; struct sch_gpio *chip = platform_get_drvdata(pdev); if (gpio_ba) { if (info != NULL) { uio_unregister_device(info); kfree(info); } sch_gpio_resume_irqs_deinit(chip, sch_gpio_resume.ngpio); sch_gpio_core_irqs_deinit(chip, sch_gpio_core.ngpio); if (irq_num > 0) free_irq(irq_num, chip); platform_device_unregister(&qrk_gpio_restrict_pdev); irq_free_descs(chip->irq_base_resume, sch_gpio_resume.ngpio); irq_free_descs(chip->irq_base_core, sch_gpio_core.ngpio); gpiochip_remove(&sch_gpio_resume); gpiochip_remove(&sch_gpio_core); res = platform_get_resource(pdev, IORESOURCE_IO, 0); release_region(res->start, resource_size(res)); gpio_ba = 0; } sch_gpio_resume_restore_state(&(chip->initial_resume)); sch_gpio_core_restore_state(&(chip->initial_core)); kfree(chip); chip_ptr = 0; return err; }
/** * irq_free_hwirqs - Free irq descriptor and cleanup the hardware * @from: Free from irq number * @cnt: number of interrupts to free * */ void irq_free_hwirqs(unsigned int from, int cnt) { int i, j; for (i = from, j = cnt; j > 0; i++, j--) { irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); arch_teardown_hwirq(i); } irq_free_descs(from, cnt); }
static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr, struct device_node *np) { int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0; struct irq_domain *shirq_domain; void __iomem *base; base = of_iomap(np, 0); if (!base) { pr_err("%s: failed to map shirq registers\n", __func__); return -ENXIO; } for (i = 0; i < block_nr; i++) nr_irqs += shirq_blocks[i]->nr_irqs; virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0); if (IS_ERR_VALUE(virq_base)) { pr_err("%s: irq desc alloc failed\n", __func__); goto err_unmap; } shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0, &irq_domain_simple_ops, NULL); if (WARN_ON(!shirq_domain)) { pr_warn("%s: irq domain init failed\n", __func__); goto err_free_desc; } for (i = 0; i < block_nr; i++) { shirq_blocks[i]->base = base; shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain, hwirq); parent_irq = irq_of_parse_and_map(np, i); spear_shirq_register(shirq_blocks[i], parent_irq); hwirq += shirq_blocks[i]->nr_irqs; } return 0; err_free_desc: irq_free_descs(virq_base, nr_irqs); err_unmap: iounmap(base); return -ENXIO; }
static int __devexit sunxi_gpio_remove(struct platform_device *pdev) { int ret = 0; struct sunxi_gpio_chip *sunxi_chip = platform_get_drvdata(pdev); pr_info("sunxi_gpio driver exit\n"); ret = gpiochip_remove(&sunxi_chip->chip); if (ret < 0) pr_err("%s(): gpiochip_remove() failed, ret=%d\n", __func__, ret); if (sunxi_chip->irq_base >= 0) free_irq(GPIO_IRQ_NO, sunxi_chip); sunxi_gpio_irq_remove(sunxi_chip); irq_free_descs(sunxi_chip->irq_base, EINT_NUM); iounmap(sunxi_chip->gaddr); kfree(sunxi_chip->chip.names); kfree(sunxi_chip); return 0; }
/** * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware * @cnt: number of interrupts to allocate * @node: node on which to allocate * * Returns an interrupt number > 0 or 0, if the allocation fails. */ unsigned int irq_alloc_hwirqs(int cnt, int node) { int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); if (irq < 0) return 0; for (i = irq; cnt > 0; i++, cnt--) { if (arch_setup_hwirq(i, node)) goto err; irq_clear_status_flags(i, _IRQ_NOREQUEST); } return irq; err: for (i--; i >= irq; i--) { irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); arch_teardown_hwirq(i); } irq_free_descs(irq, cnt); return 0; }
static int __devinit sunxi_gpio_probe(struct platform_device *pdev) { int i; int err = 0; int names_size = 0; int gpio_used = 0; int gpio_num = 0; struct sunxi_gpio_data *gpio_i = NULL; struct sunxi_gpio_data *gpio_data = NULL; struct sunxi_gpio_chip *sunxi_chip = NULL; char **pnames = NULL; /* parse script.bin for [gpio_para] section gpio_used/gpio_num/gpio_pin_x */ pr_info("sunxi_gpio driver init ver %s\n", SUNXI_GPIO_VER); err = script_parser_fetch("gpio_para", "gpio_used", &gpio_used, sizeof(gpio_used)/sizeof(int)); if (err) { /* Not error - just info */ pr_info("%s can't find script.bin '[gpio_para]' 'gpio_used'\n", __func__); return err; } if (!gpio_used) { pr_info("%s gpio_used is false. Skip gpio initialization\n", __func__); err = 0; return err; } err = script_parser_fetch("gpio_para", "gpio_num", &gpio_num, sizeof(gpio_num)/sizeof(int)); if (err) { pr_err("%s script_parser_fetch '[gpio_para]' 'gpio_num' err\n", __func__); return err; } if (!gpio_num) { pr_info("%s gpio_num is none. Skip gpio initialization\n", __func__); err = 0; return err; } /* Allocate memory for sunxi_gpio_chip + data/names array */ sunxi_chip = kzalloc(sizeof(struct sunxi_gpio_chip) + sizeof(struct sunxi_gpio_data) * gpio_num, GFP_KERNEL); gpio_data = (void *)sunxi_chip + sizeof(struct sunxi_gpio_chip); /* Allocate memory for variable array of fixed size strings */ /* in one chunk. This is to avoid 1+gpio_num kzalloc calls */ names_size = sizeof(*pnames) * gpio_num + sizeof(char) * MAX_GPIO_NAMELEN * gpio_num; pnames = kzalloc(names_size, GFP_KERNEL); for (i = 0; i < gpio_num; i++) { pnames[i] = (void *)pnames + sizeof(*pnames) * gpio_num + i * MAX_GPIO_NAMELEN; } if ((!pnames) || (!sunxi_chip)) { pr_err("%s kzalloc failed\n", __func__); err = -ENOMEM; goto exit; } /* Parse gpio_para/pin script data */ gpio_i = gpio_data; for (i = 0; i < gpio_num; i++) { sprintf(gpio_i->pin_name, "gpio_pin_%d", i+1); err = script_parser_fetch("gpio_para", gpio_i->pin_name, (int *)&gpio_i->info, sizeof(script_gpio_set_t)); if (err) { pr_err("%s script_parser_fetch '[gpio_para]' '%s' err\n", __func__, gpio_i->pin_name); break; } gpio_i++; } sunxi_chip->gaddr = ioremap(PIO_BASE_ADDRESS, PIO_RANGE_SIZE); if (!sunxi_chip->gaddr) { pr_err("Can't request gpio registers memory\n"); err = -EIO; goto unmap; } sunxi_chip->dev = &pdev->dev; sunxi_chip->data = gpio_data; sunxi_chip->chip = template_chip; sunxi_chip->chip.ngpio = gpio_num; sunxi_chip->chip.dev = &pdev->dev; sunxi_chip->chip.label = "A1X_GPIO"; sunxi_chip->chip.base = 1; sunxi_chip->chip.names = (const char *const *)pnames; sunxi_chip->irq_base = -1; /* configure EINTs for the detected SoC */ sunxi_gpio_eint_probe(); /* This needs additional system irq numbers (NR_IRQ=NR_IRQ+EINT_NUM) */ if (EINT_NUM > 0) { sunxi_chip->irq_base = irq_alloc_descs(-1, 0, EINT_NUM, 0); if (sunxi_chip->irq_base < 0) { err = sunxi_chip->irq_base; pr_err("Couldn't allocate virq numbers err %d. GPIO irq support disabled\n", err); } } else pr_info("GPIO irq support disabled in this platform\n"); spin_lock_init(&sunxi_chip->irq_lock); sunxi_gpio_irq_init(sunxi_chip); if (sunxi_chip->irq_base >= 0) { err = request_irq(GPIO_IRQ_NO, sunxi_gpio_irq_handler, IRQF_SHARED, "sunxi-gpio", sunxi_chip); if (err) { pr_err("Can't request irq %d\n", GPIO_IRQ_NO); goto irqchip; } } err = gpiochip_add(&sunxi_chip->chip); if (err < 0) goto irqhdl; platform_set_drvdata(pdev, sunxi_chip); return 0; irqhdl: if (sunxi_chip->irq_base >= 0) free_irq(GPIO_IRQ_NO, sunxi_chip); irqchip: sunxi_gpio_irq_remove(sunxi_chip); if (sunxi_chip->irq_base >= 0) irq_free_descs(sunxi_chip->irq_base, EINT_NUM); unmap: iounmap(sunxi_chip->gaddr); exit: kfree(sunxi_chip); kfree(pnames); return err; }
/** * irq_sim_fini - Deinitialize the interrupt simulator: free the interrupt * descriptors and allocated memory. * * @sim: The interrupt simulator to tear down. */ void irq_sim_fini(struct irq_sim *sim) { irq_work_sync(&sim->work_ctx.work); irq_free_descs(sim->irq_base, sim->irq_count); kfree(sim->irqs); }
static int sch_gpio_probe(struct platform_device *pdev) { struct resource *res; struct sch_gpio *chip; int err, id; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; chip_ptr = chip; sch_gpio_core_save_state(&(chip->initial_core)); sch_gpio_resume_save_state(&(chip->initial_resume)); id = pdev->id; if (!id) return -ENODEV; /* Get UIO memory */ info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) return -EBUSY; if (!request_region(res->start, resource_size(res), pdev->name)) return -EBUSY; gpio_ba = res->start; irq_num = RESOURCE_IRQ; switch (id) { case PCI_DEVICE_ID_INTEL_SCH_LPC: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 10; sch_gpio_resume.base = 10; sch_gpio_resume.ngpio = 4; /* * GPIO[6:0] enabled by default * GPIO7 is configured by the CMC as SLPIOVR * Enable GPIO[9:8] core powered gpios explicitly */ outb(0x3, gpio_ba + CGEN + 1); /* * SUS_GPIO[2:0] enabled by default * Enable SUS_GPIO3 resume powered gpio explicitly */ outb(0x8, gpio_ba + RGEN); break; case PCI_DEVICE_ID_INTEL_ITC_LPC: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 5; sch_gpio_resume.base = 5; sch_gpio_resume.ngpio = 9; break; case PCI_DEVICE_ID_INTEL_CENTERTON_ILB: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 21; sch_gpio_resume.base = 21; sch_gpio_resume.ngpio = 9; break; case PCI_DEVICE_ID_INTEL_QUARK_ILB: sch_gpio_core.base = 0; sch_gpio_core.ngpio = 2; sch_gpio_resume.base = 2; sch_gpio_resume.ngpio = 6; break; default: err = -ENODEV; goto err_sch_gpio_core; } sch_gpio_core.dev = &pdev->dev; sch_gpio_resume.dev = &pdev->dev; err = gpiochip_add(&sch_gpio_core); if (err < 0) goto err_sch_gpio_core; err = gpiochip_add(&sch_gpio_resume); if (err < 0) goto err_sch_gpio_resume; chip->irq_base_core = irq_alloc_descs(-1, 0, sch_gpio_core.ngpio, NUMA_NO_NODE); if (chip->irq_base_core < 0) { dev_err(&pdev->dev, "failure adding GPIO core IRQ descs\n"); chip->irq_base_core = -1; goto err_sch_intr_core; } chip->irq_base_resume = irq_alloc_descs(-1, 0, sch_gpio_resume.ngpio, NUMA_NO_NODE); if (chip->irq_base_resume < 0) { dev_err(&pdev->dev, "failure adding GPIO resume IRQ descs\n"); chip->irq_base_resume = -1; goto err_sch_intr_resume; } platform_set_drvdata(pdev, chip); err = platform_device_register(&qrk_gpio_restrict_pdev); if (err < 0) goto err_sch_gpio_device_register; /* disable interrupts */ sch_gpio_core_irq_disable_all(chip, sch_gpio_core.ngpio); sch_gpio_resume_irq_disable_all(chip, sch_gpio_resume.ngpio); err = request_irq(irq_num, sch_gpio_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip); if (err != 0) { dev_err(&pdev->dev, "%s request_irq failed\n", __func__); goto err_sch_request_irq; } sch_gpio_core_irqs_init(chip, sch_gpio_core.ngpio); sch_gpio_resume_irqs_init(chip, sch_gpio_resume.ngpio); /* UIO */ info->port[0].name = "gpio_regs"; info->port[0].start = res->start; info->port[0].size = resource_size(res); info->port[0].porttype = UIO_PORT_X86; info->name = "sch_gpio"; info->version = "0.0.1"; if (uio_register_device(&pdev->dev, info)) goto err_sch_uio_register; pr_info("%s UIO port addr 0x%04x size %lu porttype %d\n", __func__, (unsigned int)info->port[0].start, info->port[0].size, info->port[0].porttype); return 0; err_sch_uio_register: free_irq(irq_num, chip); err_sch_request_irq: platform_device_unregister(&qrk_gpio_restrict_pdev); err_sch_gpio_device_register: irq_free_descs(chip->irq_base_resume, sch_gpio_resume.ngpio); err_sch_intr_resume: irq_free_descs(chip->irq_base_core, sch_gpio_core.ngpio); err_sch_intr_core: gpiochip_remove(&sch_gpio_resume); err_sch_gpio_resume: gpiochip_remove(&sch_gpio_core); err_sch_gpio_core: release_region(res->start, resource_size(res)); gpio_ba = 0; sch_gpio_resume_restore_state(&(chip->initial_resume)); sch_gpio_core_restore_state(&(chip->initial_core)); kfree(chip); chip_ptr = 0; if (info != NULL) kfree(info); return err; }
/** * irq_reserve_ipi() - Setup an IPI to destination cpumask * @domain: IPI domain * @dest: cpumask of cpus which can receive the IPI * * Allocate a virq that can be used to send IPI to any CPU in dest mask. * * On success it'll return linux irq number and error code on failure */ int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest) { unsigned int nr_irqs, offset; struct irq_data *data; int virq, i; if (!domain ||!irq_domain_is_ipi(domain)) { pr_warn("Reservation on a non IPI domain\n"); return -EINVAL; } if (!cpumask_subset(dest, cpu_possible_mask)) { pr_warn("Reservation is not in possible_cpu_mask\n"); return -EINVAL; } nr_irqs = cpumask_weight(dest); if (!nr_irqs) { pr_warn("Reservation for empty destination mask\n"); return -EINVAL; } if (irq_domain_is_ipi_single(domain)) { /* * If the underlying implementation uses a single HW irq on * all cpus then we only need a single Linux irq number for * it. We have no restrictions vs. the destination mask. The * underlying implementation can deal with holes nicely. */ nr_irqs = 1; offset = 0; } else { unsigned int next; /* * The IPI requires a seperate HW irq on each CPU. We require * that the destination mask is consecutive. If an * implementation needs to support holes, it can reserve * several IPI ranges. */ offset = cpumask_first(dest); /* * Find a hole and if found look for another set bit after the * hole. For now we don't support this scenario. */ next = cpumask_next_zero(offset, dest); if (next < nr_cpu_ids) next = cpumask_next(next, dest); if (next < nr_cpu_ids) { pr_warn("Destination mask has holes\n"); return -EINVAL; } } virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE); if (virq <= 0) { pr_warn("Can't reserve IPI, failed to alloc descs\n"); return -ENOMEM; } virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE, (void *) dest, true); if (virq <= 0) { pr_warn("Can't reserve IPI, failed to alloc hw irqs\n"); goto free_descs; } for (i = 0; i < nr_irqs; i++) { data = irq_get_irq_data(virq + i); cpumask_copy(data->common->affinity, dest); data->common->ipi_offset = offset; irq_set_status_flags(virq + i, IRQ_NO_BALANCING); } return virq; free_descs: irq_free_descs(virq, nr_irqs); return -EBUSY; }