static void pm8901_irq_unmask(struct irq_data *d) { int master, irq_bit; struct pm8901_chip *chip = irq_data_get_irq_handler_data(d); u8 block, config, old_irqs_allowed, old_blocks_allowed; unsigned int irq = d->irq; irq -= chip->pdata.irq_base; block = irq / 8; master = block / 8; irq_bit = irq % 8; old_irqs_allowed = chip->irqs_allowed[block]; chip->irqs_allowed[block] |= 1 << irq_bit; if (!old_irqs_allowed) { master = block / 8; old_blocks_allowed = chip->blocks_allowed[master]; chip->blocks_allowed[master] |= 1 << (block % 8); if (!old_blocks_allowed) chip->masters_allowed |= 1 << master; } config = PM8901_IRQF_WRITE | chip->config[irq]; pm8901_config_irq(chip, &block, &config); }
/* * XLP8XX/4XX/3XX/2XX: * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X * interrupts generated by the PIC and each of these correspond to a MSI-X * vector (0-31) that can be assigned. * * We divide the MSI-X vectors to 8 per link and do a per-link allocation * * XLP9XX: * 32 MSI-X vectors are available per link, and the interrupts are not routed * thru the PIC. PIC ack not needed. * * Enable and disable done using standard MSI functions. */ static void xlp_msix_mask_ack(struct irq_data *d) { struct xlp_msi_data *md; int link, msixvec; uint32_t status_reg, bit; msixvec = nlm_irq_msixvec(d->irq); link = nlm_irq_msixlink(msixvec); mask_msi_irq(d); md = irq_data_get_irq_handler_data(d); /* Ack MSI on bridge */ if (cpu_is_xlp9xx()) { status_reg = PCIE_9XX_MSIX_STATUSX(link); bit = msixvec % XLP_MSIXVEC_PER_LINK; } else { status_reg = PCIE_MSIX_STATUS; bit = msixvec; } nlm_write_reg(md->lnkbase, status_reg, 1u << bit); /* Ack at eirr and PIC */ ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link)); if (!cpu_is_xlp9xx()) nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_MSIX_INDEX(msixvec)); }
static void gpio_irq_disable(struct irq_data *d) { struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); u32 mask = (u32) irq_data_get_irq_handler_data(d); __raw_writel(mask, &g->clr_falling); __raw_writel(mask, &g->clr_rising); }
static int gpio_irq_type(struct irq_data *d, unsigned trigger) { struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); u32 mask = (u32) irq_data_get_irq_handler_data(d); if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) return -EINVAL; return 0; }
static void pm8901_irq_ack(struct irq_data *d) { struct pm8901_chip *chip = irq_data_get_irq_handler_data(d); u8 block, config; unsigned int irq = d->irq; irq -= chip->pdata.irq_base; block = irq / 8; config = PM8901_IRQF_WRITE | chip->config[irq] | PM8901_IRQF_CLR; pm8901_config_irq(chip, &block, &config); }
static void gpio_irq_enable(struct irq_data *d) { struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); u32 mask = (u32) irq_data_get_irq_handler_data(d); unsigned status = irqd_get_trigger_type(d); status &= IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; if (!status) status = IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING; if (status & IRQ_TYPE_EDGE_FALLING) __raw_writel(mask, &g->set_falling); if (status & IRQ_TYPE_EDGE_RISING) __raw_writel(mask, &g->set_rising); }
static void xlp_msi_mask_ack(struct irq_data *d) { struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); int link, vec; link = nlm_irq_msilink(d->irq); vec = nlm_irq_msivec(d->irq); xlp_msi_disable(d); /* Ack MSI on bridge */ if (cpu_is_xlp9xx()) nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); else nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); }
static void xlp_pic_unmask(struct irq_data *d) { void *hd = irq_data_get_irq_handler_data(d); int irt; irt = nlm_irq_to_irt(d->irq); if (irt == -1) return; if (hd) { void (*extra_ack)(void *) = hd; extra_ack(d); } /* Ack is a single write, no need to lock */ nlm_pic_ack(nlm_pic_base, irt); }
static void xlp_msi_disable(struct irq_data *d) { struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); unsigned long flags; int vec; vec = nlm_irq_msivec(d->irq); spin_lock_irqsave(&md->msi_lock, flags); md->msi_enabled_mask &= ~(1u << vec); if (cpu_is_xlp9xx()) nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, md->msi_enabled_mask); else nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); spin_unlock_irqrestore(&md->msi_lock, flags); }
static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct byt_gpio *vg = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, pin, mask; void __iomem *reg; u32 pending; unsigned virq; int looplimit = 0; /* check from GPIO controller which pin triggered the interrupt */ for (base = 0; base < vg->chip.ngpio; base += 32) { reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG); while ((pending = readl(reg))) { pin = __ffs(pending); mask = BIT(pin); /* Clear before handling so we can't lose an edge */ writel(mask, reg); virq = irq_find_mapping(vg->domain, base + pin); generic_handle_irq(virq); /* In case bios or user sets triggering incorretly a pin * might remain in "interrupt triggered" state. */ if (looplimit++ > 32) { dev_err(&vg->pdev->dev, "Gpio %d interrupt flood, disabling\n", base + pin); reg = byt_gpio_reg(&vg->chip, base + pin, BYT_CONF0_REG); mask = readl(reg); mask &= ~(BYT_TRIG_NEG | BYT_TRIG_POS | BYT_TRIG_LVL); writel(mask, reg); mask = readl(reg); /* flush */ break; } } } chip->irq_eoi(data); }
static void sun4m_unmask_irq(struct irq_data *data) { struct sun4m_handler_data *handler_data; int cpu = smp_processor_id(); handler_data = irq_data_get_irq_handler_data(data); if (handler_data->mask) { unsigned long flags; local_irq_save(flags); if (handler_data->percpu) { sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear); } else { sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear); } local_irq_restore(flags); } }
static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) { struct davinci_gpio_controller *d; struct davinci_gpio_regs __iomem *g; u32 mask; d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data); g = (struct davinci_gpio_regs __iomem *)d->regs; mask = __gpio_mask(data->irq - d->gpio_irq); if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) return -EINVAL; writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_FALLING) ? &g->set_falling : &g->clr_falling); writel_relaxed(mask, (trigger & IRQ_TYPE_EDGE_RISING) ? &g->set_rising : &g->clr_rising); return 0; }
static int pm8901_irq_set_wake(struct irq_data *d, unsigned int on) { struct pm8901_chip *chip = irq_data_get_irq_handler_data(d); unsigned int irq = d->irq; irq -= chip->pdata.irq_base; if (on) { if (!chip->wake_enable[irq]) { chip->wake_enable[irq] = 1; chip->count_wakeable++; } } else { if (chip->wake_enable[irq]) { chip->wake_enable[irq] = 0; chip->count_wakeable--; } } return 0; }
static int pm8901_irq_set_type(struct irq_data *d, unsigned int flow_type) { int master, irq_bit; struct pm8901_chip *chip = irq_data_get_irq_handler_data(d); u8 block, config; unsigned int irq = d->irq; irq -= chip->pdata.irq_base; if (irq > chip->pm_max_irq) { chip->pm_max_irq = irq; chip->pm_max_blocks = chip->pm_max_irq / 8 + 1; chip->pm_max_masters = chip->pm_max_blocks / 8 + 1; } block = irq / 8; master = block / 8; irq_bit = irq % 8; chip->config[irq] = (irq_bit << PM8901_IRQF_BITS_SHIFT) | PM8901_IRQF_MASK_RE | PM8901_IRQF_MASK_FE; if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { if (flow_type & IRQF_TRIGGER_RISING) chip->config[irq] &= ~PM8901_IRQF_MASK_RE; if (flow_type & IRQF_TRIGGER_FALLING) chip->config[irq] &= ~PM8901_IRQF_MASK_FE; } else { chip->config[irq] |= PM8901_IRQF_LVL_SEL; if (flow_type & IRQF_TRIGGER_HIGH) chip->config[irq] &= ~PM8901_IRQF_MASK_RE; else chip->config[irq] &= ~PM8901_IRQF_MASK_FE; } config = PM8901_IRQF_WRITE | chip->config[irq] | PM8901_IRQF_CLR; return pm8901_config_irq(chip, &block, &config); }
static void xlp_msi_mask_ack(struct irq_data *d) { struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); int link, vec; link = nlm_irq_msilink(d->irq); vec = nlm_irq_msivec(d->irq); xlp_msi_disable(d); /* Ack MSI on bridge */ if (cpu_is_xlp9xx()) nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); else nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); /* Ack at eirr and PIC */ ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link)); if (cpu_is_xlp9xx()) nlm_pic_ack(md->node->picbase, PIC_9XX_IRT_PCIE_LINK_INDEX(link)); else nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); }
static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); struct intel_msic *msic = pdev_to_intel_msic(mg->pdev); int i; int bitnr; u8 pin; unsigned long pending = 0; for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) { intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin); pending = pin; if (pending) { for_each_set_bit(bitnr, &pending, BITS_PER_BYTE) generic_handle_irq(mg->irq_base + (i * BITS_PER_BYTE) + bitnr); } } chip->irq_eoi(data); }
static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct lnw_gpio *lnw = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); u32 base, gpio, mask; unsigned long pending; void __iomem *gedr; /* check GPIO controller to check which pin triggered the interrupt */ for (base = 0; base < lnw->chip.ngpio; base += 32) { gedr = gpio_reg(&lnw->chip, base, GEDR); while ((pending = readl(gedr))) { gpio = __ffs(pending); mask = BIT(gpio); /* Clear before handling so we can't lose an edge */ writel(mask, gedr); generic_handle_irq(irq_find_mapping(lnw->domain, base + gpio)); } } chip->irq_eoi(data); }
static void pm8901_irq_mask(struct irq_data *d) { int master, irq_bit; struct pm8901_chip *chip = irq_data_get_irq_handler_data(d); u8 block, config; unsigned int irq = d->irq; irq -= chip->pdata.irq_base; block = irq / 8; master = block / 8; irq_bit = irq % 8; chip->irqs_allowed[block] &= ~(1 << irq_bit); if (!chip->irqs_allowed[block]) { chip->blocks_allowed[master] &= ~(1 << (block % 8)); if (!chip->blocks_allowed[master]) chip->masters_allowed &= ~(1 << master); } config = PM8901_IRQF_WRITE | chip->config[irq] | PM8901_IRQF_MASK_FE | PM8901_IRQF_MASK_RE; pm8901_config_irq(chip, &block, &config); }
static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg) { hpet_msi_write(irq_data_get_irq_handler_data(data), msg); }