static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct gpio_rcar_priv *p = gpiochip_get_data(gc); unsigned int hwirq = irqd_to_hwirq(d); dev_dbg(p->dev, "sense irq = %d, type = %d\n", hwirq, type); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_LEVEL_HIGH: gpio_rcar_config_interrupt_input_mode(p, hwirq, true, true, false); break; case IRQ_TYPE_LEVEL_LOW: gpio_rcar_config_interrupt_input_mode(p, hwirq, false, true, false); break; case IRQ_TYPE_EDGE_RISING: gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false, false); break; case IRQ_TYPE_EDGE_FALLING: gpio_rcar_config_interrupt_input_mode(p, hwirq, false, false, false); break; case IRQ_TYPE_EDGE_BOTH: if (!p->has_both_edge_trigger) return -EINVAL; gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false, true); break; default: return -EINVAL; } return 0; }
static void wcd9xxx_irq_sync_unlock(struct irq_data *data) { struct wcd9xxx_core_resource *wcd9xxx_res = irq_data_get_irq_chip_data(data); int i; if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) > WCD9XXX_MAX_IRQ_REGS) || (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) > WCD9XXX_MAX_IRQ_REGS)) { pr_err("%s: Array Size out of bound\n", __func__); return; } if (!wcd9xxx_res->codec_reg_write) { pr_err("%s: Codec reg write callback function not defined\n", __func__); return; } for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) { /* If there's been a change in the mask write it back * to the hardware. */ if (wcd9xxx_res->irq_masks_cur[i] != wcd9xxx_res->irq_masks_cache[i]) { wcd9xxx_res->irq_masks_cache[i] = wcd9xxx_res->irq_masks_cur[i]; wcd9xxx_res->codec_reg_write(wcd9xxx_res, WCD9XXX_A_INTR_MASK0 + i, wcd9xxx_res->irq_masks_cur[i]); } } mutex_unlock(&wcd9xxx_res->irq_lock); }
static void qpnpint_irq_mask(struct irq_data *d) { struct q_irq_data *irq_d = irq_data_get_irq_chip_data(d); struct q_chip_data *chip_d = irq_d->chip_d; struct q_perip_data *per_d = irq_d->per_d; int rc; uint8_t prev_int_en = per_d->int_en; pr_debug("hwirq %lu irq: %d\n", d->hwirq, d->irq); if (!chip_d->cb) { pr_warn_ratelimited("No arbiter on bus=%u slave=%u offset=%u\n", chip_d->bus_nr, irq_d->spmi_slave, irq_d->spmi_offset); return; } per_d->int_en &= ~irq_d->mask_shift; if (prev_int_en && !(per_d->int_en)) { /* * no interrupt on this peripheral is enabled * ask the arbiter to ignore this peripheral */ qpnpint_arbiter_op(d, irq_d, chip_d->cb->mask); } rc = qpnpint_spmi_write(irq_d, QPNPINT_REG_EN_CLR, (u8 *)&irq_d->mask_shift, 1); if (rc) { pr_err_ratelimited("spmi failure on irq %d\n", d->irq); return; } pr_debug("done hwirq %lu irq: %d\n", d->hwirq, d->irq); }
static void octeon_irq_ciu_enable(struct irq_data *data) { int cpu = next_cpu_for_irq(data); int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen; unsigned long flags; union octeon_ciu_chip_data cd; cd.p = irq_data_get_irq_chip_data(data); if (cd.s.line == 0) { raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); } else { raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); set_bit(cd.s.bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); } }
static void irq_sim_irqunmask(struct irq_data *data) { struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data); irq_ctx->enabled = true; }
static void pmic_bus_lock(struct irq_data *data) { struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); mutex_lock(&pg->buslock); }
static void crystalcove_bus_lock(struct irq_data *data) { struct crystalcove_gpio *cg = irq_data_get_irq_chip_data(data); mutex_lock(&cg->buslock); }
void jz4740_irq_suspend(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); jz4740_irq_set_mask(gc, gc->wake_active); }
static void regmap_irq_lock(struct irq_data *data) { struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); mutex_lock(&d->lock); }
static void em_gio_irq_relres(struct irq_data *d) { struct em_gio_priv *p = irq_data_get_irq_chip_data(d); gpiochip_unlock_as_irq(&p->gpio_chip, irqd_to_hwirq(d)); }
static void mcp2210_irq_unmask(struct irq_data *data) { struct mcp2210_device *dev = irq_data_get_irq_chip_data(data); dev->irq_mask &= ~data->mask; }
static void regmap_irq_sync_unlock(struct irq_data *data) { struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; int i, ret; u32 reg; if (d->chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); if (ret < 0) dev_err(map->dev, "IRQ sync failed to resume: %d\n", ret); } /* * If there's been a change in the mask write it back to the * hardware. We rely on the use of the regmap core cache to * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { if (!d->chip->mask_base) continue; reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->mask_buf[i]); else ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->mask_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); reg = d->chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); if (d->wake_buf) { if (d->chip->wake_invert) ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync wakes in %x: %d\n", reg, ret); } } for (i = 0; i < d->chip->num_type_reg; i++) { if (!d->type_buf_def[i]) continue; reg = d->chip->type_base + (i * map->reg_stride * d->type_reg_stride); if (d->chip->type_invert) ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], ~d->type_buf[i]); else ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], d->type_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync type in %x\n", reg); } if (d->chip->runtime_pm) pm_runtime_put(map->dev); /* If we've changed our wakeup count propagate it to the parent */ if (d->wake_count < 0) for (i = d->wake_count; i < 0; i++) irq_set_irq_wake(d->irq, 0); else if (d->wake_count > 0) for (i = 0; i < d->wake_count; i++) irq_set_irq_wake(d->irq, 1); d->wake_count = 0; mutex_unlock(&d->lock); }
static void mcp2210_irq_unmask(struct irq_data *data) { struct mcp2210_device *dev = irq_data_get_irq_chip_data(data); mcp2210_debug(); }
/* * Clear an interrupt before processing it so that any new assertions * will trigger another irq. */ static void tile_irq_chip_ack(struct irq_data *d) { if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED) clear_irqs(1UL << d->irq); }
static void eic_unmask_irq(struct irq_data *d) { struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); }
static int pm8821_irq_read_line(struct irq_data *d) { struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); return pm8821_get_irq_stat(chip, d->irq); }
static void ps3_chip_eoi(struct irq_data *d) { const struct ps3_private *pd = irq_data_get_irq_chip_data(d); lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); }
static void pl061_irq_relres(struct irq_data *d) { struct pl061_gpio *chip = irq_data_get_irq_chip_data(d); gpio_unlock_as_irq(&chip->gc, irqd_to_hwirq(d)); }
static void em_gio_irq_enable(struct irq_data *d) { struct em_gio_priv *p = irq_data_get_irq_chip_data(d); em_gio_write(p, GIO_IEN, BIT(irqd_to_hwirq(d))); }
static void rk808_irq_lock(struct irq_data *data) { struct rk808 *rk808 = irq_data_get_irq_chip_data(data); mutex_lock(&rk808->irq_lock); }
static void regmap_irq_sync_unlock(struct irq_data *data) { struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; int i, ret; u32 reg; u32 unmask_offset; if (d->chip->runtime_pm) { ret = pm_runtime_get_sync(map->dev); if (ret < 0) dev_err(map->dev, "IRQ sync failed to resume: %d\n", ret); } /* * If there's been a change in the mask write it back to the * hardware. We rely on the use of the regmap core cache to * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { reg = d->chip->mask_base + (i * map->reg_stride * d->irq_reg_stride); if (d->chip->mask_invert) { ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->mask_buf[i]); } else if (d->chip->unmask_base) { /* set mask with mask_base register */ ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->mask_buf[i]); if (ret < 0) dev_err(d->map->dev, "Failed to sync unmasks in %x\n", reg); unmask_offset = d->chip->unmask_base - d->chip->mask_base; /* clear mask with unmask_base register */ ret = regmap_update_bits(d->map, reg + unmask_offset, d->mask_buf_def[i], d->mask_buf[i]); } else { ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->mask_buf[i]); } if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); reg = d->chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); if (d->wake_buf) { if (d->chip->wake_invert) ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync wakes in %x: %d\n", reg, ret); } if (!d->chip->init_ack_masked) continue; /* * Ack all the masked interrupts unconditionally, * OR if there is masked interrupt which hasn't been Acked, * it'll be ignored in irq handler, then may introduce irq storm */ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { reg = d->chip->ack_base + (i * map->reg_stride * d->irq_reg_stride); /* some chips ack by write 0 */ if (d->chip->ack_invert) ret = regmap_write(map, reg, ~d->mask_buf[i]); else ret = regmap_write(map, reg, d->mask_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", reg, ret); } } for (i = 0; i < d->chip->num_type_reg; i++) { if (!d->type_buf_def[i]) continue; reg = d->chip->type_base + (i * map->reg_stride * d->type_reg_stride); if (d->chip->type_invert) ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], ~d->type_buf[i]); else ret = regmap_update_bits(d->map, reg, d->type_buf_def[i], d->type_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync type in %x\n", reg); } if (d->chip->runtime_pm) pm_runtime_put(map->dev); /* If we've changed our wakeup count propagate it to the parent */ if (d->wake_count < 0) for (i = d->wake_count; i < 0; i++) irq_set_irq_wake(d->irq, 0); else if (d->wake_count > 0) for (i = 0; i < d->wake_count; i++) irq_set_irq_wake(d->irq, 1); d->wake_count = 0; mutex_unlock(&d->lock); }
static void tps6586x_irq_lock(struct irq_data *data) { struct tps6586x *tps6586x = irq_data_get_irq_chip_data(data); mutex_lock(&tps6586x->irq_lock); }
void jz4740_irq_resume(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); jz4740_irq_set_mask(gc, gc->mask_cache); }
static int arizona_irq_set_wake(struct irq_data *data, unsigned int on) { struct arizona *arizona = irq_data_get_irq_chip_data(data); return irq_set_irq_wake(arizona->irq, on); }
static void wcd9xxx_irq_lock(struct irq_data *data) { struct wcd9xxx_core_resource *wcd9xxx_res = irq_data_get_irq_chip_data(data); mutex_lock(&wcd9xxx_res->irq_lock); }
static void msic_bus_lock(struct irq_data *data) { struct msic_gpio *mg = irq_data_get_irq_chip_data(data); mutex_lock(&mg->buslock); }
static void pmc_irq_unmask(struct irq_data *d) { struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq); }
static void intc_enable(struct irq_data *data) { _intc_enable(data, (unsigned long)irq_data_get_irq_chip_data(data)); }
static void adp5588_irq_bus_lock(struct irq_data *d) { struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); mutex_lock(&dev->irq_lock); }
static void pmc_irq_resume(struct irq_data *d) { struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); pmc_write(pmc, AT91_PMC_IER, pmc->imr); }