static void __init __vic_set_irq_chip(void) { struct irq_chip *d = NULL; int i = 0, num = 2; for (i = 0; num > i; i++) { d = irq_get_chip(i * 32); /* VIC.0, 1 */ if (d) { if (!d->irq_enable) d->irq_enable = __vic_irq_enable; if (!d->irq_disable) d->irq_disable = __vic_irq_disable; } } #if 0 d = irq_get_chip(IRQ_GIC_PPI_START); if (d) { if (!d->irq_enable) d->irq_enable = __gic_irq_enable; if (!d->irq_disable) d->irq_disable = __gic_irq_disable; } #endif }
/* static int pod_gpio_irq_type(unsigned int _irq, unsigned int type) old kernels ? */ static int pod_gpio_irq_type(struct irq_data *data, unsigned int type) { /* unsigned int irq = _irq; old kernels ? */ unsigned int irq = data->irq; struct irq_chip *chip = irq_get_chip(irq); struct pod_gpio_chip *cg_chip = container_of(chip, struct pod_gpio_chip, interrupt_chip); u16 edge_config; edge_config = cg_read_reg(cg_chip, GPIO_INTERRUPT_EDGE_TYPE); /* Only falling and rising edge are supporter by pod_gpio */ if (type == IRQ_TYPE_EDGE_RISING) { cg_write_reg(cg_chip, GPIO_INTERRUPT_EDGE_TYPE, edge_config | (1 << (irq - IRQ_GPIO_POD(0)))); } else if (type == IRQ_TYPE_EDGE_FALLING) { cg_write_reg(cg_chip, GPIO_INTERRUPT_EDGE_TYPE, edge_config & ~(1 << (irq - IRQ_GPIO_POD(0)))); } else { dev_err(&cg_chip->pdev->dev, "The selected edge type is not supported by the driver.\n"); return -EINVAL; } return 0; }
static void shirq_handler(unsigned irq, struct irq_desc *desc) { u32 i, j, val, mask, tmp; struct irq_chip *chip; struct spear_shirq *shirq = irq_get_handler_data(irq); chip = irq_get_chip(irq); chip->irq_ack(&desc->irq_data); mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off; while ((val = readl(shirq->base + shirq->regs.status_reg) & mask)) { val >>= shirq->irq_bit_off; for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) { if (!(j & val)) continue; generic_handle_irq(shirq->irq_base + i); /* clear interrupt */ if (shirq->regs.clear_reg == -1) continue; tmp = readl(shirq->base + shirq->regs.clear_reg); if (shirq->regs.reset_to_clear) tmp &= ~(j << shirq->irq_bit_off); else tmp |= (j << shirq->irq_bit_off); writel(tmp, shirq->base + shirq->regs.clear_reg); } } chip->irq_unmask(&desc->irq_data); }
static struct irq_chip *keystone_gpio_get_irq_chip(unsigned int irq) { static struct irq_chip gpio_unbanked; gpio_unbanked = *irq_get_chip(irq); return &gpio_unbanked; };
static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); generic_handle_irq((int)desc->irq_data.handler_data); chained_irq_exit(chip, desc); }
void __init tegra_init_irq(void) { struct irq_chip *gic; unsigned int i; int irq; tegra_init_legacy_irq(); gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); gic = irq_get_chip(29); tegra_gic_unmask_irq = gic->irq_unmask; tegra_gic_mask_irq = gic->irq_mask; tegra_gic_ack_irq = gic->irq_ack; #ifdef CONFIG_SMP tegra_irq.irq_set_affinity = gic->irq_set_affinity; #endif for (i = 0; i < INT_MAIN_NR; i++) { irq = INT_PRI_BASE + i; irq_set_chip_and_handler(irq, &tegra_irq, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } }
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, u32 status) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip = irq_get_chip(irq); unsigned int first_irq; if (host_chip->irq_mask_ack) host_chip->irq_mask_ack(&desc->irq_data); else { host_chip->irq_mask(&desc->irq_data); if (host_chip->irq_ack) host_chip->irq_ack(&desc->irq_data); } nmk_chip = irq_get_handler_data(irq); first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); while (status) { int bit = __ffs(status); generic_handle_irq(first_irq + bit); status &= ~BIT(bit); } host_chip->irq_unmask(&desc->irq_data); }
static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct combiner_chip_data *chip_data = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); unsigned int cascade_irq, combiner_irq; unsigned long status; chained_irq_enter(chip, desc); spin_lock(&irq_controller_lock); status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); spin_unlock(&irq_controller_lock); status &= chip_data->irq_mask; if (status == 0) goto out; combiner_irq = __ffs(status); cascade_irq = combiner_irq + (chip_data->irq_offset & ~31); if (unlikely(cascade_irq >= NR_IRQS)) do_bad_IRQ(cascade_irq, desc); else generic_handle_irq(cascade_irq); out: chained_irq_exit(chip, desc); }
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct gic_chip_data *chip_data = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); unsigned int cascade_irq, gic_irq; unsigned long status; chained_irq_enter(chip, desc); raw_spin_lock(&irq_controller_lock); status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); raw_spin_unlock(&irq_controller_lock); gic_irq = (status & 0x3ff); if (gic_irq == 1023) goto out; cascade_irq = gic_irq + chip_data->irq_offset; if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) do_bad_IRQ(cascade_irq, desc); else generic_handle_irq(cascade_irq); out: chained_irq_exit(chip, desc); }
static void exynos4_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); exynos4_irq_demux_eint(IRQ_EINT(16)); exynos4_irq_demux_eint(IRQ_EINT(24)); chained_irq_exit(chip, desc); }
/* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_platform->irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; struct irq_chip *irq_chip = NULL; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(req[i].id > MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; }
static void pod_gpio_ack_irq(struct irq_data *data) { struct irq_chip *chip = irq_get_chip(data->irq); struct pod_gpio_chip *cg_chip = container_of(chip, struct pod_gpio_chip, interrupt_chip); cg_write_reg(cg_chip , GPIO_INTERRUPT_STATUS, (1 << (data->irq - IRQ_GPIO_POD(0)))); }
static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) { u32 *irq_data = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); generic_handle_irq(*irq_data); chained_irq_exit(chip, desc); }
static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc) { u32 *irq_data = irq_get_handler_data(irq); struct irq_chip *chip = irq_get_chip(irq); int eint_irq; chained_irq_enter(chip, desc); eint_irq = irq_find_mapping(irq_domain, *irq_data); generic_handle_irq(eint_irq); chained_irq_exit(chip, desc); }
void sn_irq_init(void) { int i; ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; for (i = 0; i < NR_IRQS; i++) { if (irq_get_chip(i) == &no_irq_chip) irq_set_chip(i, &irq_type_sn); } }
static int combiner_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d); struct irq_chip *chip = irq_get_chip(chip_data->parent_irq); struct irq_data *data = irq_get_irq_data(chip_data->parent_irq); if (chip && chip->irq_set_affinity) return chip->irq_set_affinity(data, mask_val, force); else return -EINVAL; }
void __init hpsim_irq_init (void) { int i; for_each_active_irq(i) { struct irq_chip *chip = irq_get_chip(i); if (chip == &no_irq_chip) irq_set_chip(i, &irq_type_hp_sim); } }
static void intc_irqpin_irq_disable_force(struct irq_data *d) { struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d); int irq = p->irq[irqd_to_hwirq(d)].requested_irq; /* disable interrupt through parent interrupt controller, * assumes non-shared interrupt with 1:1 mapping * needed for busted IRQs on some SoCs like sh73a0 */ irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq)); intc_irqpin_irq_disable(d); }
static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); u32 a16_23, a24_31; chained_irq_enter(chip, desc); a16_23 = exynos_irq_demux_eint(IRQ_EINT(16)); a24_31 = exynos_irq_demux_eint(IRQ_EINT(24)); chained_irq_exit(chip, desc); if (!a16_23 && !a24_31) do_bad_IRQ(irq, desc); }
static void pod_gpio_mask_irq(struct irq_data *data) { struct irq_chip *chip = irq_get_chip(data->irq); struct pod_gpio_chip *cg_chip = container_of(chip, struct pod_gpio_chip, interrupt_chip); u16 enable_interrupt_config; enable_interrupt_config = cg_read_reg(cg_chip, GPIO_ENABLE_INTERRUPT); cg_write_reg(cg_chip , GPIO_ENABLE_INTERRUPT, enable_interrupt_config & ~(1 << (data->irq - IRQ_GPIO_POD(0)))); }
static void debug_force_irq(struct fiq_debugger_state *state) { unsigned int irq = state->signal_irq; if (WARN_ON(!debug_have_fiq(state))) return; if (state->pdata->force_irq) { state->pdata->force_irq(state->pdev, irq); } else { struct irq_chip *chip = irq_get_chip(irq); if (chip && chip->irq_retrigger) chip->irq_retrigger(irq_get_irq_data(irq)); } }
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data) { if (irq_has_action(irq)) return -EBUSY; if (irq_get_chip(irq) != &cpu_interrupt_type) return -EBUSY; /* for iosapic interrupts */ if (type) { irq_set_chip_and_handler(irq, type, handle_percpu_irq); irq_set_chip_data(irq, data); __cpu_unmask_irq(irq); } return 0; }
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, u32 status) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip = irq_get_chip(irq); unsigned int first_irq; chained_irq_enter(host_chip, desc); nmk_chip = irq_get_handler_data(irq); first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); while (status) { int bit = __ffs(status); generic_handle_irq(first_irq + bit); status &= ~BIT(bit); } chained_irq_exit(host_chip, desc); }
static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc, u32 status) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip = irq_get_chip(irq); unsigned int first_irq; chained_irq_enter(host_chip, desc); nmk_chip = irq_get_handler_data(irq); first_irq = nmk_chip->domain->revmap_data.legacy.first_irq; while (status) { int bit = __ffs(status); generic_handle_irq(first_irq + bit); status &= ~BIT(bit); } chained_irq_exit(host_chip, desc); }
/* Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3] * are consecutive when looking up the interrupt in the demux routines. */ static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc) { struct s3c_uart_irq *uirq = desc->irq_data.handler_data; u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP); int base = uirq->base_irq; struct irq_chip *chip = irq_get_chip(irq); chained_irq_enter(chip, desc); if (pend & (1 << 0)) generic_handle_irq(base); if (pend & (1 << 1)) generic_handle_irq(base + 1); if (pend & (1 << 2)) generic_handle_irq(base + 2); if (pend & (1 << 3)) generic_handle_irq(base + 3); chained_irq_exit(chip, desc); }
void __init s5p_init_irq(u32 *vic, u32 num_vic) { struct irq_chip *chip; int irq; #ifdef CONFIG_ARM_VIC /* initialize the VICs */ for (irq = 0; irq < num_vic; irq++) vic_init(VA_VIC(irq), VIC_BASE(irq), vic[irq], 0); #endif s3c_init_vic_timer_irq(5, IRQ_TIMER0); s3c_init_uart_irqs(uart_irqs, ARRAY_SIZE(uart_irqs)); /* Register wakeup source. */ for (irq = 0; irq < ARRAY_SIZE(wakeup_source); irq++) { chip = irq_get_chip(wakeup_source[irq]); chip->irq_set_wake = s3c_irq_wake; } }
static void imx_msi_handler(unsigned int irq, struct irq_desc *desc) { int i, j; unsigned int status; struct irq_chip *chip = irq_get_chip(irq); unsigned int base_irq = IRQ_IMX_MSI_0; chained_irq_enter(chip, desc); for (i = 0; i < 8; i++) { status = imx_pcie_msi_pending(i); while (status) { j = __fls(status); generic_handle_irq(base_irq + j); status &= ~(1 << j); } base_irq += 32; } if (intd_active) { pr_info("%s intd\n", __func__); generic_handle_irq(MXC_INT_PCIE_0B); } chained_irq_exit(chip, desc); }
static irqreturn_t deferred_fiq(int irq, void *dev_id) { int gpio, irq_num, fiq_count; struct irq_chip *irq_chip; irq_chip = irq_get_chip(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); /* * For each handled GPIO interrupt, keep calling its interrupt handler * until the IRQ counter catches the FIQ incremented interrupt counter. */ for (gpio = AMS_DELTA_GPIO_PIN_KEYBRD_CLK; gpio <= AMS_DELTA_GPIO_PIN_HOOK_SWITCH; gpio++) { irq_num = gpio_to_irq(gpio); fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio]; if (irq_counter[gpio] < fiq_count && gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) { struct irq_data *d = irq_get_irq_data(irq_num); /* * handle_simple_irq() that OMAP GPIO edge * interrupts default to since commit 80ac93c27441 * requires interrupt already acked and unmasked. */ if (irq_chip) { if (irq_chip->irq_ack) irq_chip->irq_ack(d); if (irq_chip->irq_unmask) irq_chip->irq_unmask(d); } } for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) generic_handle_irq(irq_num); } return IRQ_HANDLED; }
/* * xgold cascade handler entry */ void xgold_irq_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct xgold_irq_chip_data *data = NULL; struct irq_domain *domain = NULL; struct irq_chip *chip = irq_get_chip(irq); uint32_t casc_irq = 0; int32_t domain_irq = 0; data = irq_get_handler_data(irq); if (!data || !chip) return; domain = data->domain; chained_irq_enter(chip, desc); domain_irq = xgold_irq_find_mapping(irq, data->type); if (domain_irq >= 0) casc_irq = irq_find_mapping(data->domain, domain_irq); if (data->handle_entry) data->handle_entry(data); if (casc_irq > 0) generic_handle_irq(casc_irq); if (data->handle_exit) data->handle_exit(data); chained_irq_exit(chip, desc); return; }
/* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_data.irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[SEL_MASK_SIZE]; struct irq_chip *irq_chip, *err_chip; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); err_chip = irq_get_chip(msm_rpm_data.irq_err); if (!err_chip) { irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, target_enum(req[i].id), req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_SEL_0), sel_masks, msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); #if defined(CONFIG_PANTECH_DEBUG) #if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102 pantech_debug_rpm_log(1, req->id, req->value); #endif #endif msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err)); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; }