Example #1
0
void __init arch_init_irq(void)
{
	mips_cpu_irq_init();
	ip27_hub_irq_init();

	irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
	irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0);
	irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
	irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1);
}
Example #2
0
void arc_request_percpu_irq(int irq, int cpu,
                            irqreturn_t (*isr)(int irq, void *dev),
                            const char *irq_nm,
                            void *percpu_dev)
{
    /* Boot cpu calls request, all call enable */
    if (!cpu) {
        int rc;

        /*
         * These 2 calls are essential to making percpu IRQ APIs work
         * Ideally these details could be hidden in irq chip map function
         * but the issue is IPIs IRQs being static (non-DT) and platform
         * specific, so we can't identify them there.
         */
        irq_set_percpu_devid(irq);
        irq_modify_status(irq, IRQ_NOAUTOEN, 0);  /* @irq, @clr, @set */

        rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
        if (rc)
            panic("Percpu IRQ request failed for %d\n", irq);
    }

    enable_percpu_irq(irq, 0);
}
Example #3
0
static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
{
	int irq = irq_create_mapping(intc.domain, hwirq);

	irq_set_percpu_devid(irq);
	irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
	irq_set_status_flags(irq, IRQ_NOAUTOEN);
}
Example #4
0
File: smp.c Project: Aqueti/kernels
static int custom_ipi_domain_map(struct irq_domain *d, unsigned int irq,
				 irq_hw_number_t hw)
{
	if (hw < IPI_CUSTOM_FIRST || hw > IPI_CUSTOM_LAST) {
		pr_err("hwirq-%u is not in supported range for CustomIPI IRQ domain\n",
		       (uint)hw);
		return -EINVAL;
	}

	irq_set_percpu_devid(irq);
	irq_set_chip_and_handler(irq, &custom_ipi_chip, handle_custom_ipi_irq);
	irq_set_status_flags(irq, IRQ_NOAUTOEN);

	return 0;
}
Example #5
0
static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
			  irq_hw_number_t hw)
{
	switch (hw) {
	case NPS_TIMER0_IRQ:
#ifdef CONFIG_SMP
	case NPS_IPI_IRQ:
#endif
		irq_set_percpu_devid(virq);
		irq_set_chip_and_handler(virq, &nps400_irq_chip_percpu,
					 handle_percpu_devid_irq);
		break;
	default:
		irq_set_chip_and_handler(virq, &nps400_irq_chip_fasteoi,
					 handle_fasteoi_irq);
		break;
	}

	return 0;
}
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
			 irq_hw_number_t hw)
{
	/*
	 * core intc IRQs [16, 23]:
	 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
	 */
	if (hw < FIRST_EXT_IRQ) {
		/*
		 * A subsequent request_percpu_irq() fails if percpu_devid is
		 * not set. That in turns sets NOAUTOEN, meaning each core needs
		 * to call enable_percpu_irq()
		 */
		irq_set_percpu_devid(irq);
		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
	} else {
		irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
	}

	return 0;
}
static void mt_gic_dist_init(void)
{
    unsigned int i;
    u32 cpumask = 1 << smp_processor_id();

    cpumask |= cpumask << 8;
    cpumask |= cpumask << 16;

    writel(0, GIC_DIST_BASE + GIC_DIST_CTRL);

    /*
     * Set all global interrupts to be level triggered, active low.
     */
    for (i = 32; i < (MT_NR_SPI + 32); i += 16) {
        writel(0, GIC_DIST_BASE + GIC_DIST_CONFIG + i * 4 / 16);
    }

    /*
     * Set all global interrupts to this CPU only.
     */
    for (i = 32; i < (MT_NR_SPI + 32); i += 4) {
        writel(cpumask, GIC_DIST_BASE + GIC_DIST_TARGET + i * 4 / 4);
    }

    /*
     * Set priority on all global interrupts.
     */
    for (i = 32; i < NR_MT_IRQ_LINE; i += 4) {
        writel(0xA0A0A0A0, GIC_DIST_BASE + GIC_DIST_PRI + i * 4 / 4);
    }

    /*
     * Disable all global interrupts.
     */
    for (i = 32; i < NR_MT_IRQ_LINE; i += 32) {
        writel(0xFFFFFFFF, GIC_DIST_BASE + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 
    }

    /*
     * Setup the Linux IRQ subsystem.
     */
    for (i = GIC_PPI_OFFSET; i < NR_MT_IRQ_LINE; i++) {
        if(i == GIC_PPI_PRIVATE_TIMER || i == GIC_PPI_WATCHDOG_TIMER) {
            irq_set_percpu_devid(i);
            irq_set_chip_and_handler(i, &mt_irq_chip, handle_percpu_devid_irq);
            set_irq_flags(i, IRQF_VALID | IRQF_NOAUTOEN);
        } else {
            irq_set_chip_and_handler(i, &mt_irq_chip, handle_level_irq);
            set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
        }
    }
#ifdef CONFIG_FIQ_DEBUGGER
    irq_set_chip_and_handler(FIQ_DBG_SGI, &mt_irq_chip, handle_level_irq);
    set_irq_flags(FIQ_DBG_SGI, IRQF_VALID | IRQF_PROBE);
#endif

    /*
     * set all global interrupts as non-secure interrupts
     */
    for (i = 32; i < NR_IRQS; i += 32)
    {
        writel(0xFFFFFFFF, GIC_ICDISR + 4 * (i / 32));
    }

    /*
     * enable secure and non-secure interrupts on Distributor
     */
    writel(3, GIC_DIST_BASE + GIC_DIST_CTRL);
}
Example #8
0
static void __init gic_dist_init(struct gic_chip_data *gic,
	unsigned int irq_start)
{
	unsigned int gic_irqs, irq_limit, i;
	u32 cpumask;
	void __iomem *base = gic->dist_base;
	u32 cpu = 0;
	u32 nrppis = 0, ppi_base = 0;

#ifdef CONFIG_SMP
	cpu = cpu_logical_map(smp_processor_id());
#endif

	cpumask = 1 << cpu;
	cpumask |= cpumask << 8;
	cpumask |= cpumask << 16;

	writel_relaxed(0, base + GIC_DIST_CTRL);

	/*
	 * Find out how many interrupts are supported.
	 * The GIC only supports up to 1020 interrupt sources.
	 */
	gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
	gic_irqs = (gic_irqs + 1) * 32;
	if (gic_irqs > 1020)
		gic_irqs = 1020;

	gic->gic_irqs = gic_irqs;

	/*
	 * Nobody would be insane enough to use PPIs on a secondary
	 * GIC, right?
	 */
	if (gic == &gic_data[0]) {
		nrppis = (32 - irq_start) & 31;

		/* The GIC only supports up to 16 PPIs. */
		if (nrppis > 16)
			BUG();

		ppi_base = gic->irq_offset + 32 - nrppis;
	}

	pr_info("Configuring GIC with %d sources (%d PPIs)\n",
		gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);

	/*
	 * Set all global interrupts to be level triggered, active low.
	 */
	for (i = 32; i < gic_irqs; i += 16)
		writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);

	/*
	 * Set all global interrupts to this CPU only.
	 */
	for (i = 32; i < gic_irqs; i += 4)
		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);

	/*
	 * Set priority on all global interrupts.
	 */
	for (i = 32; i < gic_irqs; i += 4)
		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);

	/*
	 * Disable all interrupts.  Leave the PPI and SGIs alone
	 * as these enables are banked registers.
	 */
	for (i = 32; i < gic_irqs; i += 32)
		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);

	/*
	 * Limit number of interrupts registered to the platform maximum
	 */
	irq_limit = gic->irq_offset + gic_irqs;
	if (WARN_ON(irq_limit > NR_IRQS))
		irq_limit = NR_IRQS;

	/*
	 * Setup the Linux IRQ subsystem.
	 */
	for (i = 0; i < nrppis; i++) {
		int ppi = i + ppi_base;

		irq_set_percpu_devid(ppi);
		irq_set_chip_and_handler(ppi, &gic_chip,
					 handle_percpu_devid_irq);
		irq_set_chip_data(ppi, gic);
		set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
	}

	for (i = irq_start + nrppis; i < irq_limit; i++) {
		irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
		irq_set_chip_data(i, gic);
		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
	}

	writel_relaxed(1, base + GIC_DIST_CTRL);
}