static int __init xen_hvc_init(void) { struct hvc_struct *hp; struct hv_ops *ops; if (!xen_pv_domain()) return -ENODEV; if (xen_initial_domain()) { ops = &dom0_hvc_ops; xencons_irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); } else { if (!xen_start_info->console.domU.evtchn) return -ENODEV; ops = &domU_hvc_ops; xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); } if (xencons_irq < 0) xencons_irq = 0; /* NO_IRQ */ else irq_set_noprobe(xencons_irq); hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); if (IS_ERR(hp)) return PTR_ERR(hp); hvc = hp; console_pfn = mfn_to_pfn(xen_start_info->console.domU.mfn); return 0; }
static int pca953x_irq_setup(struct pca953x_chip *chip, const struct i2c_device_id *id) { struct i2c_client *client = chip->client; struct pca953x_platform_data *pdata = client->dev.platform_data; int ret; if (pdata->irq_base != -1 && (id->driver_data & PCA953X_INT)) { int lvl; ret = pca953x_read_reg(chip, PCA953X_INPUT, &chip->irq_stat); if (ret) goto out_failed; /* * There is no way to know which GPIO line generated the * interrupt. We have to rely on the previous read for * this purpose. */ chip->irq_stat &= chip->reg_direction; chip->irq_base = pdata->irq_base; mutex_init(&chip->irq_lock); for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { int irq = lvl + chip->irq_base; irq_set_chip_data(irq, chip); irq_set_chip_and_handler(irq, &pca953x_irq_chip, handle_edge_irq); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, pca953x_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out_failed; } chip->gpio_chip.to_irq = pca953x_gpio_to_irq; } return 0; out_failed: chip->irq_base = -1; return ret; }
static int sa1100_gpio_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip, handle_edge_irq); irq_set_noprobe(irq); return 0; }
static inline void activate_irq(int irq) { #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif }
static inline void activate_irq(int irq) { #ifdef CONFIG_ARM /* ARM requires an extra step to clear IRQ_NOREQUEST, which it * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE. */ set_irq_flags(irq, IRQF_VALID); #else /* same effect on other architectures */ irq_set_noprobe(irq); #endif }
static int adp5588_irq_setup(struct adp5588_gpio *dev) { struct i2c_client *client = dev->client; struct adp5588_gpio_platform_data *pdata = dev_get_platdata(&client->dev); unsigned gpio; int ret; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC); adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */ adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */ dev->irq_base = pdata->irq_base; mutex_init(&dev->irq_lock); for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) { int irq = gpio + dev->irq_base; irq_set_chip_data(irq, dev); irq_set_chip_and_handler(irq, &adp5588_irq_chip, handle_level_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM /* * ARM needs us to explicitly flag the IRQ as VALID, * once we do so, it will also set the noprobe. */ set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, adp5588_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), dev); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out; } dev->gpio_chip.to_irq = adp5588_gpio_to_irq; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT); return 0; out: dev->irq_base = 0; return ret; }
struct pm_irq_chip * pm8xxx_irq_init(struct device *dev, const struct pm8xxx_irq_platform_data *pdata) { struct pm_irq_chip *chip; int devirq, rc; unsigned int pmirq; if (!pdata) { pr_err("No platform data\n"); return ERR_PTR(-EINVAL); } devirq = pdata->devirq; if (devirq < 0) { pr_err("missing devirq\n"); rc = devirq; return ERR_PTR(-EINVAL); } chip = kzalloc(sizeof(struct pm_irq_chip) + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL); if (!chip) { pr_err("Cannot alloc pm_irq_chip struct\n"); return ERR_PTR(-EINVAL); } chip->dev = dev; chip->devirq = devirq; chip->irq_base = pdata->irq_base; chip->num_irqs = pdata->irq_cdata.nirqs; chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8); chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8); spin_lock_init(&chip->pm_irq_lock); for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) { irq_set_chip_and_handler(chip->irq_base + pmirq, &pm8xxx_irq_chip, handle_level_irq); irq_set_chip_data(chip->irq_base + pmirq, chip); #ifdef CONFIG_ARM set_irq_flags(chip->irq_base + pmirq, IRQF_VALID); #else irq_set_noprobe(chip->irq_base + pmirq); #endif } irq_set_irq_type(devirq, pdata->irq_trigger_flag); irq_set_handler_data(devirq, chip); irq_set_chained_handler(devirq, pm8xxx_irq_handler); set_irq_wake(devirq, 1); return chip; }
static int arizona_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct regmap_irq_chip_data *data = h->host_data; irq_set_chip_data(virq, data); irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq); irq_set_nested_thread(virq, 1); irq_set_noprobe(virq); return 0; }
static int regmap_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct regmap_irq_chip_data *data = h->host_data; irq_set_chip_data(virq, data); irq_set_chip(virq, &data->irq_chip); irq_set_nested_thread(virq, 1); irq_set_noprobe(virq); return 0; }
static int max732x_irq_setup(struct max732x_chip *chip, const struct i2c_device_id *id) { struct i2c_client *client = chip->client; struct max732x_platform_data *pdata = client->dev.platform_data; int has_irq = max732x_features[id->driver_data] >> 32; int ret; if (pdata->irq_base && has_irq != INT_NONE) { int lvl; chip->irq_base = pdata->irq_base; chip->irq_features = has_irq; mutex_init(&chip->irq_lock); for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { int irq = lvl + chip->irq_base; if (!(chip->dir_input & (1 << lvl))) continue; irq_set_chip_data(irq, chip); irq_set_chip_and_handler(irq, &max732x_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, max732x_irq_handler, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out_failed; } chip->gpio_chip.to_irq = max732x_gpio_to_irq; } return 0; out_failed: chip->irq_base = 0; return ret; }
static int mcp2210_irq_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_data(irq, domain->host_data); irq_set_chip(irq, &mcp2210_irq_chip); irq_set_nested_thread(irq, true); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif return 0; }
static int max8997_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct max8997_dev *max8997 = d->host_data; irq_set_chip_data(irq, max8997); irq_set_chip_and_handler(irq, &max8997_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif return 0; }
static int tc3589x_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) { struct tc3589x *tc3589x = d->host_data; irq_set_chip_data(virq, tc3589x); irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_edge_irq); irq_set_nested_thread(virq, 1); #ifdef CONFIG_ARM set_irq_flags(virq, IRQF_VALID); #else irq_set_noprobe(virq); #endif return 0; }
/** * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip * @d: the irqdomain used by this irqchip * @irq: the global irq number used by this GB gpio irqchip irq * @hwirq: the local IRQ/GPIO line offset on this GB gpio * * This function will set up the mapping for a certain IRQ line on a * GB gpio by assigning the GB gpio as chip data, and using the irqchip * stored inside the GB gpio. */ static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { struct gpio_chip *chip = domain->host_data; struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip); irq_set_chip_data(irq, ggc); irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler); irq_set_noprobe(irq); /* * No set-up of the hardware will happen if IRQ_TYPE_NONE * is passed as default type. */ if (ggc->irq_default_type != IRQ_TYPE_NONE) irq_set_irq_type(irq, ggc->irq_default_type); return 0; }
static int qpnpint_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) { struct q_chip_data *chip_d = d->host_data; struct q_irq_data *irq_d; int rc; pr_debug("hwirq = %lu\n", hwirq); if (hwirq < 0 || hwirq >= QPNPINT_NR_IRQS) { pr_err("hwirq %lu out of bounds\n", hwirq); return -EINVAL; } irq_radix_revmap_insert(d, virq, hwirq); irq_d = qpnpint_alloc_irq_data(chip_d, hwirq); if (IS_ERR(irq_d)) { pr_err("failed to alloc irq data for hwirq %lu\n", hwirq); return PTR_ERR(irq_d); } rc = qpnpint_init_irq_data(chip_d, irq_d, hwirq); if (rc) { pr_err("failed to init irq data for hwirq %lu\n", hwirq); goto map_err; } irq_set_chip_and_handler(virq, &qpnpint_chip, handle_level_irq); irq_set_chip_data(virq, irq_d); #ifdef CONFIG_ARM set_irq_flags(virq, IRQF_VALID); #else irq_set_noprobe(virq); #endif return 0; map_err: qpnpint_free_irq_data(irq_d); return rc; }
static int tc3589x_irq_init(struct tc3589x *tc3589x) { int base = tc3589x->irq_base; int irq; for (irq = base; irq < base + TC3589x_NR_INTERNAL_IRQS; irq++) { irq_set_chip_data(irq, tc3589x); irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_edge_irq); irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } return 0; }
static int wm8994_edge_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct wm8994 *wm8994 = h->host_data; irq_set_chip_data(virq, wm8994); irq_set_chip_and_handler(virq, &wm8994_edge_irq_chip, handle_edge_irq); irq_set_nested_thread(virq, 1); /* ARM needs us to explicitly flag the IRQ as valid * and will set them noprobe when we do so. */ #ifdef CONFIG_ARM set_irq_flags(virq, IRQF_VALID); #else irq_set_noprobe(virq); #endif return 0; }
static int pmic_irq_init(void) { int cur_irq; int ret; pmic->irq_mask = 0xff; intel_mid_pmic_writeb(MIRQLVL1, pmic->irq_mask); pmic->irq_mask = intel_mid_pmic_readb(MIRQLVL1); pmic->irq_base = irq_alloc_descs(VV_PMIC_IRQBASE, 0, PMIC_IRQ_NUM, 0); if (pmic->irq_base < 0) { dev_warn(pmic->dev, "Failed to allocate IRQs: %d\n", pmic->irq_base); pmic->irq_base = 0; return -EINVAL; } /* Register them with genirq */ for (cur_irq = pmic->irq_base; cur_irq < PMIC_IRQ_NUM + pmic->irq_base; cur_irq++) { irq_set_chip_data(cur_irq, pmic); irq_set_chip_and_handler(cur_irq, &pmic_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); irq_set_noprobe(cur_irq); } ret = request_threaded_irq(pmic->irq, pmic_irq_isr, pmic_irq_thread, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "intel_mid_pmic", pmic); if (ret != 0) { dev_err(pmic->dev, "Failed to request IRQ %d: %d\n", pmic->irq, ret); return ret; } ret = enable_irq_wake(pmic->irq); if (ret != 0) { dev_warn(pmic->dev, "Can't enable PMIC IRQ as wake source: %d\n", ret); } return 0; }
static int regmap_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct regmap_irq_chip_data *data = h->host_data; irq_set_chip_data(virq, data); irq_set_chip(virq, &data->irq_chip); irq_set_nested_thread(virq, 1); /* ARM needs us to explicitly flag the IRQ as valid * and will set them noprobe when we do so. */ #ifdef CONFIG_ARM set_irq_flags(virq, IRQF_VALID); #else irq_set_noprobe(virq); #endif return 0; }
/****************************************************************************** * probe & remove */ int mcp2210_irq_probe(struct mcp2210_device *dev) { uint i; int ret; mcp2210_info(); mutex_init(&dev->irq_lock); dev->nr_irqs = 0; dev->poll_intr = 0; dev->poll_gpio = 0; for (i = 0; i < MCP2210_NUM_PINS; ++i) { const struct mcp2210_pin_config *pin = &dev->config->pins[i]; if (pin->mode == MCP2210_PIN_SPI || !pin->has_irq) continue; ++dev->nr_irqs; BUG_ON(dev->irq_revmap[i]); dev->irq_revmap[i] = pin->irq; if (pin->mode == MCP2210_PIN_DEDICATED) dev->poll_intr = 1; else if (pin->mode == MCP2210_PIN_GPIO) { dev->poll_gpio = 1; dev->irq_type[i] = pin->irq_type; } } if (!dev->nr_irqs) return 0; ret = irq_alloc_descs(-1, 0, dev->nr_irqs, 0); if (ret < 0) { /* CONFIG_SPARSE_IRQ needed? */ mcp2210_err("Failed to allocate %u irq descriptors: %d", dev->nr_irqs, ret); return ret; } dev->irq_base = ret; for (i = 0; i < dev->nr_irqs; ++i) { int virq = dev->irq_base + i; dev->irq_descs[i] = irq_to_desc(virq); BUG_ON(!dev->irq_descs[i]); irq_set_chip_data(virq, dev); irq_set_chip(virq, &mcp2210_irq_chip); #if defined(CONFIG_ARM) && LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0) set_irq_flags(virq, 0); #else irq_set_noprobe(virq); #endif } #ifdef CONFIG_MCP2210_GPIO if (dev->poll_gpio) { ctl_cmd_init(dev, &dev->cmd_poll_gpio, MCP2210_CMD_GET_PIN_VALUE, 0, NULL, 0, false); dev->cmd_poll_gpio.head.complete = complete_poll; mcp2210_add_cmd(&dev->cmd_poll_gpio.head, false); } #endif /* CONFIG_MCP2210_GPIO */ if (dev->poll_intr) { /* read and then reset */ ctl_cmd_init(dev, &dev->cmd_poll_intr, MCP2210_CMD_GET_INTERRUPTS, 0, NULL, 0, false); dev->cmd_poll_intr.head.complete = complete_poll; mcp2210_add_cmd(&dev->cmd_poll_intr.head, false); } dev->is_irq_probed = 1; dev->suppress_poll_warn = 0; return 0; }
int max14577_irq_init(struct max14577_dev *max14577) { struct max14577_platform_data *pdata = max14577->pdata; struct i2c_client *i2c = max14577->i2c; int i; int cur_irq; int ret; if (!pdata->irq_gpio) { pr_warn("%s:%s No interrupt specified.\n", MFD_DEV_NAME, __func__); pdata->irq_base = 0; return 0; } if (!pdata->irq_base) { pr_err("%s:%s No interrupt base specified.\n", MFD_DEV_NAME, __func__); return 0; } mutex_init(&max14577->irq_lock); max14577->irq = gpio_to_irq(pdata->irq_gpio); ret = gpio_request(pdata->irq_gpio, "max14577_irq"); if (ret) { pr_err("%s:%s failed requesting gpio(%d)\n", MFD_DEV_NAME, __func__, pdata->irq_gpio); return ret; } gpio_direction_input(pdata->irq_gpio); gpio_free(pdata->irq_gpio); pr_info("%s:%s\n", MFD_DEV_NAME, __func__); #ifdef CONFIG_MFD_MAX77836 /* MAX77836 INTSRC for MUIC UNMASK */ max14577_write_reg(max14577->i2c_pmic, MAX77836_PMIC_REG_INTSRC_MASK, 0xF7); #endif /* Mask individual interrupt sources */ for (i = 0; i < MAX14577_IRQ_REGS_NUM; i++) { /* IRQ 0:MASK 1:NOT MASK */ max14577->irq_masks_cur[i] = 0x00; max14577->irq_masks_cache[i] = 0x00; if (IS_ERR_OR_NULL(i2c)) continue; if (max14577_mask_reg[i] == MAX14577_REG_INVALID) continue; max14577_write_reg(i2c, max14577_mask_reg[i], max14577->irq_masks_cur[i]); } /* Register with genirq */ for (i = 0; i < MAX14577_IRQ_NUM; i++) { cur_irq = i + pdata->irq_base; irq_set_chip_data(cur_irq, max14577); irq_set_chip_and_handler(cur_irq, &max14577_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, true); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } ret = request_threaded_irq(max14577->irq, NULL, max14577_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max14577-irq", max14577); if (ret) { pr_err("%s:%s Failed to request IRQ(%d) ret(%d)\n", MFD_DEV_NAME, __func__, max14577->irq, ret); return ret; } return 0; }
int max77828_irq_init(struct max77828_dev *max77828) { int i; int cur_irq; int ret; if (!max77828->irq_gpio) { dev_warn(max77828->dev, "No interrupt specified.\n"); max77828->irq_base = 0; return 0; } if (!max77828->irq_base) { dev_err(max77828->dev, "No interrupt base specified.\n"); return 0; } mutex_init(&max77828->irqlock); max77828->irq = gpio_to_irq(max77828->irq_gpio); pr_info("%s:%s irq=%d, irq->gpio=%d\n", MFD_DEV_NAME, __func__, max77828->irq, max77828->irq_gpio); ret = gpio_request(max77828->irq_gpio, "if_pmic_irq"); if (ret) { dev_err(max77828->dev, "%s: failed requesting gpio %d\n", __func__, max77828->irq_gpio); return ret; } gpio_direction_input(max77828->irq_gpio); gpio_free(max77828->irq_gpio); /* Mask individual interrupt sources */ for (i = 0; i < MAX77828_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; /* MUIC IRQ 0:MASK 1:NOT MASK */ /* Other IRQ 1:MASK 0:NOT MASK */ if (i >= MUIC_INT1 && i <= MUIC_INT3) { max77828->irq_masks_cur[i] = 0x00; max77828->irq_masks_cache[i] = 0x00; } else { max77828->irq_masks_cur[i] = 0xff; max77828->irq_masks_cache[i] = 0xff; } i2c = get_i2c(max77828, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max77828_mask_reg[i] == MAX77828_REG_INVALID) continue; if (i >= MUIC_INT1 && i <= MUIC_INT3) max77828_write_reg(i2c, max77828_mask_reg[i], 0x00); else max77828_write_reg(i2c, max77828_mask_reg[i], 0xff); } /* Register with genirq */ for (i = 0; i < MAX77828_IRQ_NR; i++) { cur_irq = i + max77828->irq_base; irq_set_chip_data(cur_irq, max77828); irq_set_chip_and_handler(cur_irq, &max77828_irq_chip, handle_level_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } ret = request_threaded_irq(max77828->irq, NULL, max77828_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "max77828-irq", max77828); if (ret) { dev_err(max77828->dev, "Failed to request IRQ %d: %d\n", max77828->irq, ret); return ret; } return 0; }
static int ezx_pcap_probe(struct spi_device *spi) { struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev); struct pcap_chip *pcap; int i, adc_irq; int ret = -ENODEV; /* platform data is required */ if (!pdata) goto ret; pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL); if (!pcap) { ret = -ENOMEM; goto ret; } mutex_init(&pcap->io_mutex); mutex_init(&pcap->adc_mutex); INIT_WORK(&pcap->isr_work, pcap_isr_work); INIT_WORK(&pcap->msr_work, pcap_msr_work); spi_set_drvdata(spi, pcap); /* setup spi */ spi->bits_per_word = 32; spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0); ret = spi_setup(spi); if (ret) goto ret; pcap->spi = spi; /* setup irq */ pcap->irq_base = pdata->irq_base; pcap->workqueue = create_singlethread_workqueue("pcapd"); if (!pcap->workqueue) { ret = -ENOMEM; dev_err(&spi->dev, "can't create pcap thread\n"); goto ret; } /* redirect interrupts to AP, except adcdone2 */ if (!(pdata->config & PCAP_SECOND_PORT)) ezx_pcap_write(pcap, PCAP_REG_INT_SEL, (1 << PCAP_IRQ_ADCDONE2)); /* setup irq chip */ for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); irq_set_chip_data(i, pcap); #ifdef CONFIG_ARM set_irq_flags(i, IRQF_VALID); #else irq_set_noprobe(i); #endif } /* mask/ack all PCAP interrupts */ ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT); ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); pcap->msr = PCAP_MASK_ALL_INTERRUPT; irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); irq_set_handler_data(spi->irq, pcap); irq_set_chained_handler(spi->irq, pcap_irq_handler); irq_set_irq_wake(spi->irq, 1); /* ADC */ adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC", pcap); if (ret) goto free_irqchip; /* setup subdevs */ for (i = 0; i < pdata->num_subdevs; i++) { ret = pcap_add_subdev(pcap, &pdata->subdevs[i]); if (ret) goto remove_subdevs; } /* board specific quirks */ if (pdata->init) pdata->init(pcap); return 0; remove_subdevs: device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); free_irqchip: for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) irq_set_chip_and_handler(i, NULL, NULL); /* destroy_workqueue: */ destroy_workqueue(pcap->workqueue); ret: return ret; }
int arizona_irq_init(struct arizona *arizona) { int flags = IRQF_ONESHOT; int ret, i; struct regmap_irq_chip *aod, *irq; bool ctrlif_error = true; int irq_base; switch (arizona->type) { #ifdef CONFIG_MFD_WM5102 case WM5102: aod = &wm5102_aod; irq = &wm5102_irq; switch (arizona->rev) { case 0: ctrlif_error = false; break; default: break; } break; #endif #ifdef CONFIG_MFD_WM5110 case WM5110: aod = &wm5110_aod; irq = &wm5110_irq; switch (arizona->rev) { case 0: ctrlif_error = false; break; default: break; } break; #endif default: BUG_ON("Unknown Arizona class device" == NULL); return -EINVAL; } if (arizona->pdata.irq_active_high) { ret = regmap_update_bits(arizona->regmap, ARIZONA_IRQ_CTRL_1, ARIZONA_IRQ_POL, 0); if (ret != 0) { dev_err(arizona->dev, "Couldn't set IRQ polarity: %d\n", ret); goto err; } flags |= IRQF_TRIGGER_HIGH; } else { flags |= IRQF_TRIGGER_LOW; } /* set virtual IRQs */ if (arizona->pdata.irq_base > 0) { arizona->virq[0] = arizona->pdata.irq_base; arizona->virq[1] = arizona->pdata.irq_base + 1; } else { dev_err(arizona->dev, "No irq_base specified\n"); return -EINVAL; } ret = irq_alloc_descs(arizona->pdata.irq_base, 0, ARRAY_SIZE(arizona->virq), 0); if (ret < 0) { dev_err(arizona->dev, "Failed to allocate IRQs: %d\n", ret); return ret; } for (i = 0; i < ARRAY_SIZE(arizona->virq); i++) { irq_set_chip_data(arizona->virq[i], arizona); irq_set_chip_and_handler(arizona->virq[i], &arizona_irq_chip, handle_edge_irq); irq_set_nested_thread(arizona->virq[i], 1); /* ARM needs us to explicitly flag the IRQ as valid and * will set them noprobe when we do so. */ #ifdef CONFIG_ARM set_irq_flags(arizona->virq[i], IRQF_VALID); #else irq_set_noprobe(arizona->virq[i]); #endif } irq_base = arizona->pdata.irq_base + 2; ret = regmap_add_irq_chip(arizona->regmap, arizona->virq[0], IRQF_ONESHOT, irq_base, aod, &arizona->aod_irq_chip); if (ret != 0) { dev_err(arizona->dev, "Failed to add AOD IRQs: %d\n", ret); goto err_domain; } irq_base = arizona->pdata.irq_base + 2 + ARIZONA_NUM_IRQ; ret = regmap_add_irq_chip(arizona->regmap, arizona->virq[1], IRQF_ONESHOT, irq_base, irq, &arizona->irq_chip); if (ret != 0) { dev_err(arizona->dev, "Failed to add IRQs: %d\n", ret); goto err_aod; } /* Make sure the boot done IRQ is unmasked for resumes */ i = arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE); ret = request_threaded_irq(i, NULL, arizona_boot_done, IRQF_ONESHOT, "Boot done", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request boot done %d: %d\n", arizona->irq, ret); goto err_boot_done; } /* Handle control interface errors in the core */ if (ctrlif_error) { i = arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR); ret = request_threaded_irq(i, NULL, arizona_ctrlif_err, IRQF_ONESHOT, "Control interface error", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request CTRLIF_ERR %d: %d\n", arizona->irq, ret); goto err_ctrlif; } } ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; } return 0; err_main_irq: free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona); err_ctrlif: free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona); err_boot_done: regmap_del_irq_chip(arizona->virq[1], arizona->irq_chip); err_aod: regmap_del_irq_chip(arizona->virq[0], arizona->aod_irq_chip); err_domain: err: return ret; }
/** * regmap_add_irq_chip(): Use standard regmap IRQ controller handling * * map: The regmap for the device. * irq: The IRQ the device uses to signal interrupts * irq_flags: The IRQF_ flags to use for the primary interrupt. * chip: Configuration for the interrupt controller. * data: Runtime data structure for the controller, allocated on success * * Returns 0 on success or an errno on failure. * * In order for this to be efficient the chip really should use a * register cache. The chip driver is responsible for restoring the * register values used by the IRQ controller over suspend and resume. */ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data) { struct regmap_irq_chip_data *d; int cur_irq, i; int ret = -ENOMEM; irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); if (irq_base < 0) { dev_warn(map->dev, "Failed to allocate IRQs: %d\n", irq_base); return irq_base; } d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; *data = d; *data = d; d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, GFP_KERNEL); if (!d->status_buf) goto err_alloc; d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs, GFP_KERNEL); if (!d->status_reg_buf) goto err_alloc; d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, GFP_KERNEL); if (!d->mask_buf) goto err_alloc; d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, GFP_KERNEL); if (!d->mask_buf_def) goto err_alloc; d->map = map; d->chip = chip; d->irq_base = irq_base; mutex_init(&d->lock); for (i = 0; i < chip->num_irqs; i++) d->mask_buf_def[chip->irqs[i].reg_offset] |= chip->irqs[i].mask; /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; if (chip->mask_invert) ret = regmap_write(map, chip->mask_base + i, ~d->mask_buf[i]); else ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", chip->mask_base + i, ret); goto err_alloc; } /* need to wait between 32K register for 88PM805 */ #ifdef CONFIG_MFD_88PM805 if (!strcmp("88pm805", chip->name)) msleep(1); #endif } /* Register them with genirq */ for (cur_irq = irq_base; cur_irq < chip->num_irqs + irq_base; cur_irq++) { irq_set_chip_data(cur_irq, d); irq_set_chip_and_handler(cur_irq, ®map_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); /* ARM needs us to explicitly flag the IRQ as valid * and will set them noprobe when we do so. */ #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, chip->name, d); if (ret != 0) { dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); goto err_alloc; } return 0; err_alloc: kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_reg_buf); kfree(d->status_buf); kfree(d); return ret; }
int sec_irq_init(struct sec_pmic_dev *sec_pmic) { int i, reg_int1m, reg_irq_nr; int cur_irq; int ret = 0; int type = sec_pmic->device_type; if (!sec_pmic->irq) { dev_warn(sec_pmic->dev, "No interrupt specified, no interrupts\n"); sec_pmic->irq_base = 0; return 0; } if (!sec_pmic->irq_base) { dev_err(sec_pmic->dev, "No interrupt base specified, no interrupts\n"); return 0; } mutex_init(&sec_pmic->irqlock); switch (type) { case S5M8763X: reg_int1m = S5M8763_REG_IRQM1; reg_irq_nr = S5M8763_IRQ_NR; break; case S5M8767X: reg_int1m = S5M8767_REG_INT1M; reg_irq_nr = S5M8767_IRQ_NR; break; case S2MPS11X: reg_int1m = S2MPS11_REG_INT1M; reg_irq_nr = S2MPS11_IRQ_NR; break; default: dev_err(sec_pmic->dev, "Unknown device type %d\n", sec_pmic->device_type); return -EINVAL; } for (i = 0; i < NUM_IRQ_REGS - 1; i++) { sec_pmic->irq_masks_cur[i] = 0xff; sec_pmic->irq_masks_cache[i] = 0xff; sec_reg_write(sec_pmic, reg_int1m + i, 0xff); } for (i = 0; i < reg_irq_nr; i++) { cur_irq = i + sec_pmic->irq_base; ret = irq_set_chip_data(cur_irq, sec_pmic); if (ret) { dev_err(sec_pmic->dev, "Failed to irq_set_chip_data %d: %d\n", sec_pmic->irq, ret); return ret; } irq_set_chip_and_handler(cur_irq, &sec_pmic_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } ret = request_threaded_irq(sec_pmic->irq, NULL, sec_pmic_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "sec-pmic-irq", sec_pmic); if (ret) { dev_err(sec_pmic->dev, "Failed to request IRQ %d: %d\n", sec_pmic->irq, ret); return ret; } if (!sec_pmic->ono) return 0; ret = request_threaded_irq(sec_pmic->ono, NULL, sec_pmic_irq_thread, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "sec-pmic-ono", sec_pmic); if (ret) { dev_err(sec_pmic->dev, "Failed to request IRQ %d: %d\n", sec_pmic->ono, ret); return ret; } return 0; }
struct pm_irq_chip * __devinit pm8821_irq_init(struct device *dev, const struct pm8xxx_irq_platform_data *pdata) { struct pm_irq_chip *chip; int devirq, rc, blocks, masters; unsigned int pmirq; if (!pdata) { pr_err("No platform data\n"); return ERR_PTR(-EINVAL); } devirq = pdata->devirq; if (devirq < 0) { pr_err("missing devirq\n"); rc = devirq; return ERR_PTR(-EINVAL); } chip = kzalloc(sizeof(struct pm_irq_chip) + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL); if (!chip) { pr_err("Cannot alloc pm_irq_chip struct\n"); return ERR_PTR(-EINVAL); } chip->dev = dev; chip->devirq = devirq; chip->irq_base = pdata->irq_base; chip->num_irqs = pdata->irq_cdata.nirqs; chip->base_addr = pdata->irq_cdata.base_addr; blocks = DIV_ROUND_UP(pdata->irq_cdata.nirqs, 8); masters = DIV_ROUND_UP(blocks, PM8821_BLOCKS_PER_MASTER); chip->masters[0] = chip->base_addr + SSBI_REG_ADDR_IRQ_MASTER0; chip->masters[1] = chip->base_addr + SSBI_REG_ADDR_IRQ_MASTER1; if (masters != PM8821_TOTAL_IRQ_MASTERS) { pr_err("Unequal number of masters, passed: %d, " "should have been: %d\n", masters, PM8821_TOTAL_IRQ_MASTERS); kfree(chip); return ERR_PTR(-EINVAL); } spin_lock_init(&chip->pm_irq_lock); for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) { irq_set_chip_and_handler(chip->irq_base + pmirq, &pm_irq_chip, handle_level_irq); irq_set_chip_data(chip->irq_base + pmirq, chip); #ifdef CONFIG_ARM set_irq_flags(chip->irq_base + pmirq, IRQF_VALID); #else irq_set_noprobe(chip->irq_base + pmirq); #endif } if (devirq != 0) { rc = request_irq(devirq, pm8821_irq_handler, pdata->irq_trigger_flag, "pm8821_sec_irq", chip); if (rc) { pr_err("failed to request_irq for %d rc=%d\n", devirq, rc); kfree(chip); return ERR_PTR(rc); } else irq_set_irq_wake(devirq, 1); } return chip; }
int arizona_irq_init(struct arizona *arizona) { int flags = IRQF_ONESHOT; int ret, i; struct regmap_irq_chip *aod, *irq; bool ctrlif_error = true; int irq_base; struct irq_data *irq_data; switch (arizona->type) { #ifdef CONFIG_MFD_WM5102 case WM5102: aod = &wm5102_aod; irq = &wm5102_irq; ctrlif_error = false; break; #endif #ifdef CONFIG_MFD_WM5110 case WM5110: aod = &wm5110_aod; irq = &wm5110_irq; ctrlif_error = false; break; #endif default: BUG_ON("Unknown Arizona class device" == NULL); return -EINVAL; } /* Disable all wake sources by default */ regmap_write(arizona->regmap, ARIZONA_WAKE_CONTROL, 0); /* Read the flags from the interrupt controller if not specified */ if (!arizona->pdata.irq_flags) { irq_data = irq_get_irq_data(arizona->irq); if (!irq_data) { dev_err(arizona->dev, "Invalid IRQ: %d\n", arizona->irq); return -EINVAL; } arizona->pdata.irq_flags = irqd_get_trigger_type(irq_data); switch (arizona->pdata.irq_flags) { case IRQF_TRIGGER_LOW: case IRQF_TRIGGER_HIGH: case IRQF_TRIGGER_RISING: case IRQF_TRIGGER_FALLING: break; case IRQ_TYPE_NONE: default: /* Device default */ arizona->pdata.irq_flags = IRQF_TRIGGER_LOW; break; } } if (arizona->pdata.irq_flags & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_RISING)) { ret = regmap_update_bits(arizona->regmap, ARIZONA_IRQ_CTRL_1, ARIZONA_IRQ_POL, 0); if (ret != 0) { dev_err(arizona->dev, "Couldn't set IRQ polarity: %d\n", ret); goto err; } } flags |= arizona->pdata.irq_flags; /* set up virtual IRQs */ irq_base = irq_alloc_descs(arizona->pdata.irq_base, 0, ARRAY_SIZE(arizona->virq), 0); if (irq_base < 0) { dev_warn(arizona->dev, "Failed to allocate IRQs: %d\n", irq_base); return irq_base; } arizona->virq[0] = irq_base; arizona->virq[1] = irq_base + 1; irq_base += 2; for (i = 0; i < ARRAY_SIZE(arizona->virq); i++) { irq_set_chip_and_handler(arizona->virq[i], &arizona_irq_chip, handle_edge_irq); irq_set_nested_thread(arizona->virq[i], 1); /* ARM needs us to explicitly flag the IRQ as valid * and will set them noprobe when we do so. */ #ifdef CONFIG_ARM set_irq_flags(arizona->virq[i], IRQF_VALID); #else irq_set_noprobe(arizona->virq[i]); #endif } ret = regmap_add_irq_chip(arizona->regmap, arizona->virq[0], IRQF_ONESHOT, irq_base, aod, &arizona->aod_irq_chip); if (ret != 0) { dev_err(arizona->dev, "Failed to add AOD IRQs: %d\n", ret); goto err_domain; } ret = regmap_add_irq_chip(arizona->regmap, arizona->virq[1], IRQF_ONESHOT, irq_base + ARIZONA_NUM_IRQ, irq, &arizona->irq_chip); if (ret != 0) { dev_err(arizona->dev, "Failed to add IRQs: %d\n", ret); goto err_aod; } /* Make sure the boot done IRQ is unmasked for resumes */ i = arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE); ret = request_threaded_irq(i, NULL, arizona_boot_done, IRQF_ONESHOT, "Boot done", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request boot done %d: %d\n", arizona->irq, ret); goto err_boot_done; } /* Handle control interface errors in the core */ if (ctrlif_error) { i = arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR); ret = request_threaded_irq(i, NULL, arizona_ctrlif_err, IRQF_ONESHOT, "Control interface error", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request CTRLIF_ERR %d: %d\n", arizona->irq, ret); goto err_ctrlif; } } /* Used to emulate edge trigger and to work around broken pinmux */ if (arizona->pdata.irq_gpio) { if (gpio_to_irq(arizona->pdata.irq_gpio) != arizona->irq) { dev_warn(arizona->dev, "IRQ %d is not GPIO %d (%d)\n", arizona->irq, arizona->pdata.irq_gpio, gpio_to_irq(arizona->pdata.irq_gpio)); arizona->irq = gpio_to_irq(arizona->pdata.irq_gpio); } ret = gpio_request_one(arizona->pdata.irq_gpio, GPIOF_IN, "arizona IRQ"); if (ret != 0) { dev_err(arizona->dev, "Failed to request IRQ GPIO %d:: %d\n", arizona->pdata.irq_gpio, ret); arizona->pdata.irq_gpio = 0; } } ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); if (ret != 0) { dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; } return 0; err_main_irq: free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona); err_ctrlif: free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona); err_boot_done: regmap_del_irq_chip(arizona->virq[1], arizona->irq_chip); err_aod: regmap_del_irq_chip(arizona->virq[0], arizona->aod_irq_chip); err_domain: err: return ret; }
int max77693_irq_init(struct max77693_dev *max77693) { int i; int cur_irq; int ret; u8 i2c_data; if (!max77693->irq_gpio) { dev_warn(max77693->dev, "No interrupt specified.\n"); max77693->irq_base = 0; return 0; } if (!max77693->irq_base) { dev_err(max77693->dev, "No interrupt base specified.\n"); return 0; } mutex_init(&max77693->irqlock); max77693->irq = gpio_to_irq(max77693->irq_gpio); ret = gpio_request(max77693->irq_gpio, "if_pmic_irq"); if (ret) { dev_err(max77693->dev, "%s: failed requesting gpio %d\n", __func__, max77693->irq_gpio); return ret; } gpio_direction_input(max77693->irq_gpio); gpio_free(max77693->irq_gpio); /* Mask individual interrupt sources */ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) { struct i2c_client *i2c; /* MUIC IRQ 0:MASK 1:NOT MASK */ /* Other IRQ 1:MASK 0:NOT MASK */ if (i >= MUIC_INT1 && i <= MUIC_INT3) { max77693->irq_masks_cur[i] = 0x00; max77693->irq_masks_cache[i] = 0x00; } else { max77693->irq_masks_cur[i] = 0xff; max77693->irq_masks_cache[i] = 0xff; } i2c = get_i2c(max77693, i); if (IS_ERR_OR_NULL(i2c)) continue; if (max77693_mask_reg[i] == MAX77693_REG_INVALID) continue; if (i >= MUIC_INT1 && i <= MUIC_INT3) max77693_write_reg(i2c, max77693_mask_reg[i], 0x00); else max77693_write_reg(i2c, max77693_mask_reg[i], 0xff); } /* Register with genirq */ for (i = 0; i < MAX77693_IRQ_NR; i++) { cur_irq = i + max77693->irq_base; irq_set_chip_data(cur_irq, max77693); irq_set_chip_and_handler(cur_irq, &max77693_irq_chip, handle_edge_irq); irq_set_nested_thread(cur_irq, 1); #ifdef CONFIG_ARM set_irq_flags(cur_irq, IRQF_VALID); #else irq_set_noprobe(cur_irq); #endif } /* Unmask max77693 interrupt */ ret = max77693_read_reg(max77693->i2c, MAX77693_PMIC_REG_INTSRC_MASK, &i2c_data); if (ret) { dev_err(max77693->dev, "%s: fail to read muic reg\n", __func__); return ret; } i2c_data &= ~(MAX77693_IRQSRC_CHG); /* Unmask charger interrupt */ i2c_data &= ~(MAX77693_IRQSRC_MUIC); /* Unmask muic interrupt */ max77693_write_reg(max77693->i2c, MAX77693_PMIC_REG_INTSRC_MASK, i2c_data); ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max77693-irq", max77693); if (ret) { dev_err(max77693->dev, "Failed to request IRQ %d: %d\n", max77693->irq, ret); return ret; } return 0; }
struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev, const struct pm8xxx_irq_platform_data *pdata) { #ifndef CONFIG_MACH_ACER_A9 struct pm_irq_chip *chip; #endif int devirq, rc; unsigned int pmirq; if (!pdata) { pr_err("No platform data\n"); return ERR_PTR(-EINVAL); } devirq = pdata->devirq; if (devirq < 0) { pr_err("missing devirq\n"); rc = devirq; return ERR_PTR(-EINVAL); } chip = kzalloc(sizeof(struct pm_irq_chip) + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL); if (!chip) { pr_err("Cannot alloc pm_irq_chip struct\n"); return ERR_PTR(-EINVAL); } chip->dev = dev; chip->devirq = devirq; chip->irq_base = pdata->irq_base; chip->num_irqs = pdata->irq_cdata.nirqs; chip->base_addr = pdata->irq_cdata.base_addr; chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8); chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8); spin_lock_init(&chip->pm_irq_lock); for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) { irq_set_chip_and_handler(chip->irq_base + pmirq, &pm8xxx_irq_chip, handle_level_irq); irq_set_chip_data(chip->irq_base + pmirq, chip); #ifdef CONFIG_ARM set_irq_flags(chip->irq_base + pmirq, IRQF_VALID); #else irq_set_noprobe(chip->irq_base + pmirq); #endif } if (devirq != 0) { rc = request_irq(devirq, pm8xxx_irq_handler, pdata->irq_trigger_flag, "pm8xxx_usr_irq", chip); if (rc) { pr_err("failed to request_irq for %d rc=%d\n", devirq, rc); } else { irq_set_irq_wake(devirq, 1); } } #ifdef CONFIG_MACH_ACER_A9 register_syscore_ops(&pm8xxx_irq_syscore_ops); #endif return chip; }