static void armctrl_unmask_irq(unsigned int irq) #endif { static const unsigned int enables[4] = { IO_ADDRESS(ARM_IRQ_ENBL1), IO_ADDRESS(ARM_IRQ_ENBL2), IO_ADDRESS(ARM_IRQ_ENBL3), 0 }; if(d->irq >= FIQ_START) { unsigned int data = (unsigned int)irq_get_chip_data(d->irq) - FIQ_START; writel(0x80 | data, __io(IO_ADDRESS(ARM_IRQ_FAST))); } else { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) unsigned int data = (unsigned int)irq_get_chip_data(d->irq); #else unsigned int data = (unsigned int)get_irq_chip_data(irq); #endif writel(1 << (data & 0x1f), __io(enables[(data >> 5) & 0x3])); } }
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); unsigned val, mask = BIT(offset); spin_lock_irqsave(&msm_chip->lock, irq_flags); val = __raw_readl(msm_chip->regs.int_edge); if (flow_type & IRQ_TYPE_EDGE_BOTH) { __raw_writel(val | mask, msm_chip->regs.int_edge); __irq_set_handler_locked(d->irq, handle_edge_irq); } else { __raw_writel(val & ~mask, msm_chip->regs.int_edge); __irq_set_handler_locked(d->irq, handle_level_irq); } if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { msm_chip->both_edge_detect |= mask; msm_gpio_update_both_edge_detect(msm_chip); } else { msm_chip->both_edge_detect &= ~mask; val = __raw_readl(msm_chip->regs.int_pos); if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) __raw_writel(val | mask, msm_chip->regs.int_pos); else __raw_writel(val & ~mask, msm_chip->regs.int_pos); } mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); return 0; }
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, unsigned int *virq) { int result; struct ps3_private *pd; result = ps3_virq_setup(cpu, outlet, virq); if (result) { pr_debug("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__); goto fail_setup; } pd = irq_get_chip_data(*virq); /* Binds outlet to cpu + virq. */ result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq, outlet, 0); if (result) { pr_info("%s:%d: lv1_connect_irq_plug_ext failed: %s\n", __func__, __LINE__, ps3_result(result)); result = -EPERM; goto fail_connect; } return result; fail_connect: ps3_virq_destroy(*virq); fail_setup: return result; }
static void intc_unmask_irq(struct irq_data *d) { unsigned int irq = d->irq; void __iomem *base = irq_get_chip_data(irq); irq &= 31; writel(readl(base + INTC_IMR) | (1 << irq), base + INTC_IMR); }
static int intc_set_type(struct irq_data *d, unsigned int flow_type) { unsigned int irq = d->irq; void __iomem *base = irq_get_chip_data(irq); unsigned int type = 0, val; irq &= 31; switch (flow_type) { case IRQ_TYPE_LEVEL_HIGH: type = ACTIVE_HIGH; break; case IRQ_TYPE_LEVEL_LOW: type = ACTIVE_LOW; break; case IRQ_TYPE_EDGE_RISING: type = RISING_EDGE_TRIGGER; break; case IRQ_TYPE_EDGE_FALLING: type = FALLING_EDGE_TRIGGER; break; case IRQ_TYPE_EDGE_BOTH: type = EITHER_EDGE_TRIGGER; break; default: return -EINVAL; } val = readl(base + INTC_ISTCR(irq)); type &= 0x7; val &= ~(0x7 << ((irq & 0x7) << 2)); val |= type << ((irq & 0x7) << 2); writel(val, base + INTC_ISTCR(irq)); return 0; }
static void intel_compose_msi_msg(struct pci_dev *pdev, unsigned int irq, unsigned int dest, struct msi_msg *msg, u8 hpet_id) { struct irq_cfg *cfg; struct irte irte; u16 sub_handle = 0; int ir_index; cfg = irq_get_chip_data(irq); ir_index = map_irq_to_irte_handle(irq, &sub_handle); BUG_ON(ir_index == -1); prepare_irte(&irte, cfg->vector, dest); /* Set source-id of interrupt request */ if (pdev) set_msi_sid(&irte, pdev); else set_hpet_sid(&irte, hpet_id); modify_irte(irq, &irte); msg->address_hi = MSI_ADDR_BASE_HI; msg->data = sub_handle; msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | MSI_ADDR_IR_SHV | MSI_ADDR_IR_INDEX1(ir_index) | MSI_ADDR_IR_INDEX2(ir_index); }
static void max77665_irq_sync_unlock(struct irq_data *data) { struct max77665_dev *max77665 = irq_get_chip_data(data->irq); int i; for (i = 0; i < MAX77665_IRQ_GROUP_NR; i++) { u8 mask_reg = max77665_mask_reg[i]; struct i2c_client *i2c = get_i2c(max77665, i); if (mask_reg == MAX77665_REG_INVALID || IS_ERR_OR_NULL(i2c)) continue; max77665->irq_masks_cache[i] = max77665->irq_masks_cur[i]; if (max77665->irq_masks_cur[i] != 0xff) { u8 reg_data; max77665_read_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK, ®_data); reg_data &= ~(1<<i); max77665_write_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK,reg_data); } else { u8 reg_data; max77665_read_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK, ®_data); reg_data |= (1<<i); max77665_write_reg(i2c, MAX77665_PMIC_REG_INTSRC_MASK,reg_data); } max77665_write_reg(i2c, max77665_mask_reg[i], max77665->irq_masks_cur[i]); } mutex_unlock(&max77665->irqlock); }
int cpcap_sense_virq(struct regmap *regmap, int virq) { struct regmap_irq_chip_data *d = irq_get_chip_data(virq); int irq_base = regmap_irq_chip_get_base(d); return cpcap_sense_irq(regmap, virq - irq_base); }
static void max8997_irq_unmask(struct irq_data *data) { struct max8997_dev *max8997 = irq_get_chip_data(data->irq); const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997, data->irq); max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask; }
static void intc_ack_irq(struct irq_data *d) { unsigned int irq = d->irq; void __iomem *base = irq_get_chip_data(irq); writel(1 << (irq & 31), base + INTC_ICR); /* moreover, clear the soft-triggered, in case it was the reason */ writel(1 << (irq & 31), base + INTC_SICR(irq)); }
static inline struct davinci_gpio_regs __iomem *irq2regs(int irq) { struct davinci_gpio_regs __iomem *g; g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq); return g; }
void lnw_gpio_set_alt(int gpio, int alt) { struct lnw_gpio *lnw; u32 __iomem *mem; int reg; int bit; u32 offset; u32 value; unsigned long flags; /* use this trick to get memio */ lnw = irq_get_chip_data(gpio_to_irq(gpio)); if (!lnw) { pr_err("langwell_gpio: can not find pin %d\n", gpio); return; } if (gpio < lnw->chip.base || gpio >= lnw->chip.base + lnw->chip.ngpio) { dev_err(lnw->chip.dev, "langwell_gpio: wrong pin %d to config alt\n", gpio); return; } #if 0 if (lnw->irq_base + gpio - lnw->chip.base != gpio_to_irq(gpio)) { dev_err(lnw->chip.dev, "langwell_gpio: wrong chip data for pin %d\n", gpio); return; } #endif gpio -= lnw->chip.base; if (lnw->type != TANGIER_GPIO) { reg = gpio / 16; bit = gpio % 16; mem = gpio_reg(&lnw->chip, 0, GAFR); spin_lock_irqsave(&lnw->lock, flags); value = readl(mem + reg); value &= ~(3 << (bit * 2)); value |= (alt & 3) << (bit * 2); writel(value, mem + reg); spin_unlock_irqrestore(&lnw->lock, flags); dev_dbg(lnw->chip.dev, "ALT: writing 0x%x to %p\n", value, mem + reg); } else { offset = lnw->get_flis_offset(gpio); if (WARN(offset == -EINVAL, "invalid pin %d\n", gpio)) return; if (!is_merr_i2c_flis(offset)) spin_lock_irqsave(&lnw->lock, flags); value = get_flis_value(offset); value &= ~7; value |= (alt & 7); set_flis_value(value, offset); if (!is_merr_i2c_flis(offset)) spin_unlock_irqrestore(&lnw->lock, flags); } }
static void axon_msi_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_desc_get_handler_data(desc); u32 write_offset, msi; int idx; int retry = 0; write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); /* write_offset doesn't wrap properly, so we have to mask it */ write_offset &= MSIC_FIFO_SIZE_MASK; while (msic->read_offset != write_offset && retry < 100) { idx = msic->read_offset / sizeof(__le32); msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { /* * Reading the MSIC_WRITE_OFFSET_REG does not * reliably flush the outstanding DMA to the * FIFO buffer. Here we were reading stale * data, so we need to retry. */ udelay(1); retry++; pr_devel("axon_msi: invalid irq 0x%x!\n", msi); continue; } if (retry) { pr_devel("axon_msi: late irq 0x%x, retry %d\n", msi, retry); retry = 0; } msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } if (retry) { printk(KERN_WARNING "axon_msi: irq timed out\n"); msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } chip->irq_eoi(&desc->irq_data); }
void __weak arch_teardown_msi_irq(unsigned int irq) { struct msi_controller *chip = irq_get_chip_data(irq); if (!chip || !chip->teardown_irq) return; chip->teardown_irq(chip, irq); }
static void max77xxx_irq_lock(struct irq_data *data) { struct max77xxx_dev *max77xxx = irq_get_chip_data(data->irq); if (debug_mask & MAX77XXX_DEBUG_IRQ_MASK) pr_info("%s\n", __func__); mutex_lock(&max77xxx->irqlock); }
static void armctrl_unmask_irq(struct irq_data *d) { static const unsigned int enables[4] = { ARM_IRQ_ENBL1, ARM_IRQ_ENBL2, ARM_IRQ_ENBL3, 0 }; if (d->irq >= FIQ_START) { unsigned int data = (unsigned int)irq_get_chip_data(d->irq) - FIQ_START; writel(0x80 | data, __io_address(ARM_IRQ_FAST)); } else { unsigned int data = (unsigned int)irq_get_chip_data(d->irq); writel(1 << (data & 0x1f), __io_address(enables[(data >> 5) & 0x3])); } }
static void msm_gpio_irq_ack(struct irq_data *d) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); spin_lock_irqsave(&msm_chip->lock, irq_flags); msm_gpio_clear_detect_status(msm_chip, d->irq - gpio_to_irq(msm_chip->chip.base)); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); }
void qpnpint_irq_domain_unmap(struct irq_domain *d, unsigned int virq) { struct q_irq_data *irq_d = irq_get_chip_data(virq); if (WARN_ON(!irq_d)) return; qpnpint_free_irq_data(irq_d); }
static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep) { static unsigned int slpm[NUM_BANKS]; unsigned long flags; bool glitch = false; int ret = 0; int i; for (i = 0; i < num; i++) { if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) { glitch = true; break; } } spin_lock_irqsave(&nmk_gpio_slpm_lock, flags); if (glitch) { memset(slpm, 0xff, sizeof(slpm)); for (i = 0; i < num; i++) { int pin = PIN_NUM(cfgs[i]); int offset = pin % NMK_GPIO_PER_CHIP; if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) slpm[pin / NMK_GPIO_PER_CHIP] &= ~BIT(offset); } nmk_gpio_glitch_slpm_init(slpm); } for (i = 0; i < num; i++) { struct nmk_gpio_chip *nmk_chip; int pin = PIN_NUM(cfgs[i]); nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(pin)); if (!nmk_chip) { ret = -EINVAL; break; } clk_enable(nmk_chip->clk); spin_lock(&nmk_chip->lock); __nmk_config_pin(nmk_chip, pin - nmk_chip->chip.base, cfgs[i], sleep, glitch ? slpm : NULL); spin_unlock(&nmk_chip->lock); clk_disable(nmk_chip->clk); } if (glitch) nmk_gpio_glitch_slpm_restore(slpm); spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags); return ret; }
static void max77828_irq_unmask(struct irq_data *data) { struct max77828_dev *max77828 = irq_get_chip_data(data->irq); const struct max77828_irq_data *irq_data = irq_to_max77828_irq(max77828, data->irq); if (irq_data->group >= MAX77828_IRQ_GROUP_NR) return; max77828->irq_masks_cur[irq_data->group] |= irq_data->mask; }
static void max77665_irq_unmask(struct irq_data *data) { struct max77665_dev *max77665 = irq_get_chip_data(data->irq); const struct max77665_irq_data *irq_data = irq_to_max77665_irq(max77665, data->irq); if(irq_data->group == MUIC_INT1) max77665->irq_masks_cur[irq_data->group] |= irq_data->mask; else max77665->irq_masks_cur[irq_data->group] &= ~irq_data->mask; }
static void max77693_irq_unmask(struct irq_data *data) { struct max77693_dev *max77693 = irq_get_chip_data(data->irq); const struct max77693_irq_data *irq_data = irq_to_max77693_irq(max77693, data->irq); if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3) max77693->irq_masks_cur[irq_data->group] |= irq_data->mask; else max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask; }
static void max77xxx_irq_unmask(struct irq_data *data) { struct max77xxx_dev *max77xxx = irq_get_chip_data(data->irq); const struct max77xxx_irq_data *irq_data = to_max77xxx_irq(data->irq); max77xxx->irq_masks_cur[irq_data->group] &= ~irq_data->mask; if (debug_mask & MAX77XXX_DEBUG_IRQ_MASK) pr_info("%s: group=%d, cur=0x%x\n", __func__, irq_data->group, max77xxx->irq_masks_cur[irq_data->group]); }
static int ps3_virq_destroy(unsigned int virq) { const struct ps3_private *pd = irq_get_chip_data(virq); pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, __LINE__, pd->ppe_id, pd->thread_id, virq); irq_set_chip_data(virq, NULL); irq_dispose_mapping(virq); pr_debug("%s:%d <-\n", __func__, __LINE__); return 0; }
static void max77803_irq_unmask(struct irq_data *data) { struct max77803_dev *max77803 = irq_get_chip_data(data->irq); const struct max77803_irq_data *irq_data = irq_to_max77803_irq(max77803, data->irq); if (irq_data->group >= MAX77803_IRQ_GROUP_NR) return; if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3) max77803->irq_masks_cur[irq_data->group] |= irq_data->mask; else max77803->irq_masks_cur[irq_data->group] &= ~irq_data->mask; }
static void max14577_irq_mask(struct irq_data *data) { struct max14577_dev *max14577 = irq_get_chip_data(data->irq); const struct max14577_irq_data *irq_data = irq_to_max14577_irq(max14577, data->irq); if (!irq_data) return; if (irq_data->group >= MAX14577_IRQ_REGS_NUM) return; max14577->irq_masks_cur[irq_data->group] &= ~irq_data->mask; }
/** * nmk_gpio_set_mode() - set the mux mode of a gpio pin * @gpio: pin number * @gpio_mode: one of NMK_GPIO_ALT_GPIO, NMK_GPIO_ALT_A, * NMK_GPIO_ALT_B, and NMK_GPIO_ALT_C * * Sets the mode of the specified pin to one of the alternate functions or * plain GPIO. */ int nmk_gpio_set_mode(int gpio, int gpio_mode) { struct nmk_gpio_chip *nmk_chip; unsigned long flags; nmk_chip = irq_get_chip_data(NOMADIK_GPIO_TO_IRQ(gpio)); if (!nmk_chip) return -EINVAL; spin_lock_irqsave(&nmk_chip->lock, flags); __nmk_gpio_set_mode(nmk_chip, gpio - nmk_chip->chip.base, gpio_mode); spin_unlock_irqrestore(&nmk_chip->lock, flags); return 0; }
static void msm_gpio_irq_unmask(struct irq_data *d) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); spin_lock_irqsave(&msm_chip->lock, irq_flags); /* level triggered interrupts are also latched */ if (!(__raw_readl(msm_chip->regs.int_edge) & BIT(offset))) msm_gpio_clear_detect_status(msm_chip, offset); msm_chip->int_enable[0] |= BIT(offset); __raw_writel(msm_chip->int_enable[0], msm_chip->regs.int_en); mb(); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); }
static int max77xxx_irq_set_wake(struct irq_data *data, unsigned int on) { struct max77xxx_dev *max77xxx = irq_get_chip_data(data->irq); if (device_may_wakeup(max77xxx->dev)) { if (on) return enable_irq_wake(max77xxx->irq); else return disable_irq_wake(max77xxx->irq); } else if (on) { dev_warn(max77xxx->dev, "Child requested wakeup but wakeup disabled\n"); return -ENXIO; } return 0; }
static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) { unsigned long irq_flags; struct msm_gpio_chip *msm_chip = irq_get_chip_data(d->irq); unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); spin_lock_irqsave(&msm_chip->lock, irq_flags); if (on) msm_chip->int_enable[1] |= BIT(offset); else msm_chip->int_enable[1] &= ~BIT(offset); spin_unlock_irqrestore(&msm_chip->lock, irq_flags); return 0; }