/* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_platform->irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack; uint32_t sel_masks_ack[MSM_RPM_SEL_MASK_SIZE]; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); get_irq_chip(irq)->irq_mask(irq_get_irq_data(irq)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(req[i].id > MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, req[i].id, req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_SEL_0, sel_masks, MSM_RPM_SEL_MASK_SIZE); msm_rpm_write(MSM_RPM_PAGE_CTRL, MSM_RPM_CTRL_REQ_CTX_0, ctx_mask); /* Ensure RPM data is written before sending the interrupt */ dsb(); msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); get_irq_chip(irq)->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; }
void __init tegra_init_irq(void) { struct irq_chip *gic; unsigned int i; for (i=0; i<PPI_NR; i++) { writel(~0, ictlr_to_virt(i) + ICTLR_CPU_IER_CLR); writel(0, ictlr_to_virt(i) + ICTLR_CPU_IEP_CLASS); } gic_dist_init(0, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE), 29); gic_cpu_init(0, IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100)); gic = get_irq_chip(29); gic_unmask_irq = gic->unmask; gic_mask_irq = gic->mask; tegra_irq.ack = gic->ack; #ifdef CONFIG_SMP tegra_irq.set_affinity = gic->set_affinity; #endif for (i=INT_PRI_BASE; i<INT_SYNCPT_THRESH_BASE; i++) { set_irq_chip(i, &tegra_irq); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID); } syncpt_init_irq(); }
/* * Setup the local clock events for a CPU. */ void __cpuinit twd_timer_setup(struct clock_event_device *clk) { unsigned long flags; twd_calibrate_rate(); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; clk->shift = 20; clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); clk->min_delta_ns = clockevent_delta2ns(0xf, clk); clockevents_register_device(clk); /* Make sure our local interrupt controller has this enabled */ local_irq_save(flags); irq_to_desc(clk->irq)->status |= IRQ_NOPROBE; get_irq_chip(clk->irq)->unmask(clk->irq); local_irq_restore(flags); }
/* * Setup the local clock events for a CPU. */ void __cpuinit local_timer_setup(void) { unsigned int cpu = smp_processor_id(); struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); unsigned long flags; twd_calibrate_rate(); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; clk->rating = 350; clk->set_mode = local_timer_set_mode; clk->set_next_event = local_timer_set_next_event; clk->irq = IRQ_LOCALTIMER; clk->cpumask = cpumask_of(cpu); clk->shift = 20; clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); clk->min_delta_ns = clockevent_delta2ns(0xf, clk); /* Make sure our local interrupt controller has this enabled */ local_irq_save(flags); get_irq_chip(IRQ_LOCALTIMER)->unmask(IRQ_LOCALTIMER); local_irq_restore(flags); clockevents_register_device(clk); }
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) { struct gic_chip_data *chip_data = get_irq_data(irq); struct irq_chip *chip = get_irq_chip(irq); unsigned int cascade_irq, gic_irq; unsigned long status; /* primary controller ack'ing */ chip->ack(irq); spin_lock(&irq_controller_lock); status = readl(chip_data->cpu_base + GIC_CPU_INTACK); spin_unlock(&irq_controller_lock); gic_irq = (status & 0x3ff); if (gic_irq == 1023) goto out; cascade_irq = gic_irq + chip_data->irq_offset; if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) do_bad_IRQ(cascade_irq, desc); else generic_handle_irq(cascade_irq); out: /* primary controller unmasking */ chip->unmask(irq); }
void __init s5p_init_irq(u32 *vic, u32 num_vic) { struct irq_chip *chip; int irq; /* initialize the VICs */ for (irq = 0; irq < num_vic; irq++) vic_init(VA_VIC(irq), VIC_BASE(irq), vic[irq], 0); s3c_init_vic_timer_irq(IRQ_TIMER0_VIC, IRQ_TIMER0); s3c_init_vic_timer_irq(IRQ_TIMER1_VIC, IRQ_TIMER1); s3c_init_vic_timer_irq(IRQ_TIMER2_VIC, IRQ_TIMER2); s3c_init_vic_timer_irq(IRQ_TIMER3_VIC, IRQ_TIMER3); s3c_init_vic_timer_irq(IRQ_TIMER4_VIC, IRQ_TIMER4); s3c_init_uart_irqs(uart_irqs, ARRAY_SIZE(uart_irqs)); #ifdef CONFIG_PM /* Register wakeup source. */ for (irq = 0; irq < ARRAY_SIZE(wakeup_source); irq++) { chip = get_irq_chip(wakeup_source[irq]); chip->set_wake = s3c_irq_wake; } #endif }
static void s5pv310_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = get_irq_chip(irq); u32 a16_23, a24_31; if (chip->ack) chip->ack(irq); a16_23 = s5pv310_irq_demux_eint(irq, IRQ_EINT(16)); a24_31 = s5pv310_irq_demux_eint(irq, IRQ_EINT(24)); if (!a16_23 && !a24_31) do_bad_IRQ(irq, desc); chip->unmask(irq); }
int mxc_set_irq_fiq(unsigned int irq, unsigned int type) { struct mxc_irq_chip *chip; struct irq_chip *base; int ret; ret = -ENOSYS; base = get_irq_chip(irq); if (base) { chip = container_of(base, struct mxc_irq_chip, base); if (chip->set_irq_fiq) ret = chip->set_irq_fiq(irq, type); } return ret; }
int imx_irq_set_priority(unsigned char irq, unsigned char prio) { struct mxc_irq_chip *chip; struct irq_chip *base; int ret; ret = -ENOSYS; base = get_irq_chip(irq); if (base) { chip = container_of(base, struct mxc_irq_chip, base); if (chip->set_priority) ret = chip->set_priority(irq, prio); } return ret; }
void __init s5pv310_init_irq(void) { int irq; #ifdef CONFIG_USE_EXT_GIC gic_cpu_base_addr = S5P_VA_EXTGIC_CPU; gic_dist_init(0, S5P_VA_EXTGIC_DIST, IRQ_SPI(0)); gic_cpu_init(0, S5P_VA_EXTGIC_CPU); #else gic_cpu_base_addr = S5P_VA_GIC_CPU; gic_dist_init(0, S5P_VA_GIC_DIST, IRQ_SPI(0)); gic_cpu_init(0, S5P_VA_GIC_CPU); #endif for (irq = 0; irq < MAX_COMBINER_NR; irq++) { #ifdef CONFIG_CPU_S5PV310_EVT1 /* From SPI(0) to SPI(39) and SPI(51), SPI(53) * are connected to the interrupt combiner. These irqs * should be initialized to support cascade interrupt. */ if ((irq >= 40) && !(irq == 51) && !(irq == 53)) continue; #endif #ifdef CONFIG_USE_EXT_GIC combiner_init(irq, (void __iomem *)S5P_VA_EXTCOMBINER(irq), COMBINER_IRQ(irq, 0)); #else combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), COMBINER_IRQ(irq, 0)); #endif combiner_cascade_irq(irq, IRQ_SPI(irq)); } /* The parameters of s5p_init_irq() are for VIC init. * Theses parameters should be NULL and 0 because S5PV310 * uses GIC instead of VIC. */ s5p_init_irq(NULL, 0); /* Set s3c_irq_wake as set_wake() of GIC irq_chip */ get_irq_chip(IRQ_RTC_ALARM)->set_wake = s3c_irq_wake; }
static void s5pv310_irq_eint0_15(unsigned int irq, struct irq_desc *desc) { u32 i; struct irq_chip *chip = get_irq_chip(irq); if (chip->ack) chip->ack(irq); for (i = 0; i <= 15; i++) { if (irq == s5pv310_get_irq_nr(i)) { generic_handle_irq(IRQ_EINT(i)); goto out; } } do_bad_IRQ(irq, desc); out: chip->unmask(irq); }
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct nmk_gpio_chip *nmk_chip; struct irq_chip *host_chip; unsigned int gpio_irq; u32 pending; unsigned int first_irq; nmk_chip = get_irq_data(irq); first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base); while ( (pending = readl(nmk_chip->addr + NMK_GPIO_IS)) ) { gpio_irq = first_irq + __ffs(pending); generic_handle_irq(gpio_irq); } if (0) {/* don't ack parent irq, as ack == disable */ host_chip = get_irq_chip(irq); host_chip->ack(irq); } }
static inline struct ipr_desc *get_ipr_desc(unsigned int irq) { struct irq_chip *chip = get_irq_chip(irq); return container_of(chip, struct ipr_desc, chip); }
void __init mmp3_init_gic(void) { struct irq_chip *chip; /* disable global irq of ICU for MP1, MP2, MM*/ __raw_writel(0x1, MMP3_ICU_GBL_IRQ1_MSK); __raw_writel(0x1, MMP3_ICU_GBL_IRQ2_MSK); __raw_writel(0x1, MMP3_ICU_GBL_IRQ3_MSK); __raw_writel(0x1, MMP3_ICU_GBL_IRQ4_MSK); __raw_writel(0x1, MMP3_ICU_GBL_IRQ5_MSK); __raw_writel(0x1, MMP3_ICU_GBL_IRQ6_MSK); init_mux_irq(&pmic_icu_chip_data, IRQ_MMP3_PMIC_BASE, 4); init_mux_irq(&rtc_icu_chip_data, IRQ_MMP3_RTC_BASE, 2); init_mux_irq(&hsi3_icu_chip_data, IRQ_MMP3_HSI3_BASE, 3); init_mux_irq(&gpu_icu_chip_data, IRQ_MMP3_GPU_BASE, 3); init_mux_irq(&twsi_icu_chip_data, IRQ_MMP3_TWSI_BASE, 5); init_mux_irq(&hsi2_icu_chip_data, IRQ_MMP3_HSI2_BASE, 2); init_mux_irq(&dxo_icu_chip_data, IRQ_MMP3_DXO_BASE, 2); init_mux_irq(&misc1_icu_chip_data, IRQ_MMP3_MISC1_BASE, 31); init_mux_irq(&ci_icu_chip_data, IRQ_MMP3_CI_BASE, 2); init_mux_irq(&ssp_icu_chip_data, IRQ_MMP3_SSP_BASE, 2); init_mux_irq(&hsi1_icu_chip_data, IRQ_MMP3_HSI1_BASE, 4); init_mux_irq(&misc2_icu_chip_data, IRQ_MMP3_MISC2_BASE, 20); init_mux_irq(&hsi0_icu_chip_data, IRQ_MMP3_HSI0_BASE, 5); chip = get_irq_chip(IRQ_MMP3_PMIC_MUX); set_irq_chained_handler(IRQ_MMP3_PMIC_MUX, pmic_irq_demux); chip->unmask(IRQ_MMP3_PMIC_MUX); chip = get_irq_chip(IRQ_MMP3_RTC_MUX); set_irq_chained_handler(IRQ_MMP3_RTC_MUX, rtc_irq_demux); chip->unmask(IRQ_MMP3_RTC_MUX); chip = get_irq_chip(IRQ_MMP3_HSI3_MUX); set_irq_chained_handler(IRQ_MMP3_HSI3_MUX, hsi3_irq_demux); chip->unmask(IRQ_MMP3_HSI3_MUX); chip = get_irq_chip(IRQ_MMP3_GPU_MUX); set_irq_chained_handler(IRQ_MMP3_GPU_MUX, gpu_irq_demux); chip->unmask(IRQ_MMP3_GPU_MUX); chip = get_irq_chip(IRQ_MMP3_TWSI_MUX); set_irq_chained_handler(IRQ_MMP3_TWSI_MUX, twsi_irq_demux); chip->unmask(IRQ_MMP3_TWSI_MUX); chip = get_irq_chip(IRQ_MMP3_HSI2_MUX); set_irq_chained_handler(IRQ_MMP3_HSI2_MUX, hsi2_irq_demux); chip->unmask(IRQ_MMP3_HSI2_MUX); chip = get_irq_chip(IRQ_MMP3_DXO_MUX); set_irq_chained_handler(IRQ_MMP3_DXO_MUX, dxo_irq_demux); chip->unmask(IRQ_MMP3_DXO_MUX); chip = get_irq_chip(IRQ_MMP3_MISC1_MUX); set_irq_chained_handler(IRQ_MMP3_MISC1_MUX, misc1_irq_demux); chip->unmask(IRQ_MMP3_MISC1_MUX); chip = get_irq_chip(IRQ_MMP3_CI_MUX); set_irq_chained_handler(IRQ_MMP3_CI_MUX, ci_irq_demux); chip->unmask(IRQ_MMP3_CI_MUX); chip = get_irq_chip(IRQ_MMP3_SSP_MUX); set_irq_chained_handler(IRQ_MMP3_SSP_MUX, ssp_irq_demux); chip->unmask(IRQ_MMP3_SSP_MUX); chip = get_irq_chip(IRQ_MMP3_HSI1_MUX); set_irq_chained_handler(IRQ_MMP3_HSI1_MUX, hsi1_irq_demux); chip->unmask(IRQ_MMP3_HSI1_MUX); chip = get_irq_chip(IRQ_MMP3_MISC2_MUX); set_irq_chained_handler(IRQ_MMP3_MISC2_MUX, misc2_irq_demux); chip->unmask(IRQ_MMP3_MISC2_MUX); chip = get_irq_chip(IRQ_MMP3_HSI0_MUX); set_irq_chained_handler(IRQ_MMP3_HSI0_MUX, hsi0_irq_demux); chip->unmask(IRQ_MMP3_HSI0_MUX); }
static inline struct ipr_desc *get_ipr_desc(unsigned int irq) { struct irq_chip *chip = get_irq_chip(irq); return (void *)((char *)chip - offsetof(struct ipr_desc, chip)); }
/* * Note that this 'irq' is the real repesentative irq for GPIO * * To reduce the register access, use the valid group cache. * first check the valid groups. if failed, scan all groups fully. */ static void samsung_irq_gpio_handler(unsigned int irq, struct irq_desc *desc) { struct samsung_irq_gpio *gpio; int group, n, offset; int start, end, pend, mask, handled = 0, action = 0; struct irq_chip *chip = get_irq_chip(irq); gpio = get_irq_data(irq); start = gpio->start; end = gpio->nr_groups; /* primary controller ack'ing */ if (chip->ack) chip->ack(irq); /* Check the valid group first */ for (group = 0; group <= end; group++) { if (!test_bit(group, &gpio->valid_groups)) continue; offset = REG_OFFSET(group); /* 4 bytes offset */ pend = __raw_readl(gpio->base + PEND_OFFSET + offset); if (!pend) continue; mask = __raw_readl(gpio->base + MASK_OFFSET + offset); pend &= ~mask; if (!pend) continue; while (pend) { n = fls(pend) - 1; generic_handle_irq(IRQ_GPIO_GROUP(start + group) + n); pend &= ~BIT(n); ++action; } handled = 1; } if (handled) goto out; /* Okay we can't find a proper handler. Scan fully */ for (group = 0; group <= end; group++) { offset = REG_OFFSET(group); /* 4 bytes offset */ pend = __raw_readl(gpio->base + PEND_OFFSET + offset); if (!pend) continue; mask = __raw_readl(gpio->base + MASK_OFFSET + offset); pend &= ~mask; while (pend) { n = fls(pend) - 1; generic_handle_irq(IRQ_GPIO_GROUP(start + group) + n); pend &= ~BIT(n); ++action; } /* It found the valid group */ set_bit(group, &gpio->valid_groups); } out: if (!action) do_bad_IRQ(irq, desc); /* primary controller unmasking */ chip->unmask(irq); }