void ack_brcm_irq(unsigned int irq)
{
    unsigned long flags;

    spin_lock_irqsave(&brcm_irqlock, flags);
    __disable_ack_brcm_irq(irq);

#if defined(CONFIG_SMP)
    if (irq == INTERRUPT_ID_SOFTWARE_0) {
        int this_cpu = smp_processor_id();
        int other_cpu = !this_cpu;
        per_cpu(ipi_pending, this_cpu) = 0;
        clear_c0_cause(1<<CAUSEB_IP0);
        if (per_cpu(ipi_pending, other_cpu)) {
            set_c0_cause(1<<CAUSEB_IP0);
        }
    }
#else
    if (irq == INTERRUPT_ID_SOFTWARE_0) {
        clear_c0_cause(1<<CAUSEB_IP0);
    }
#endif

    if (irq == INTERRUPT_ID_SOFTWARE_1) {
        clear_c0_cause(1<<CAUSEB_IP1);
    }

    spin_unlock_irqrestore(&brcm_irqlock, flags);
}
Esempio n. 2
0
void __init mips_cpu_irq_init(void)
{
	int irq_base = MIPS_CPU_IRQ_BASE;
	int i;

	/* Mask interrupts. */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	/*
	 * Only MT is using the software interrupts currently, so we just
	 * leave them uninitialized for other processors.
	 */
	if (cpu_has_mipsmt)
		for (i = irq_base; i < irq_base + 2; i++)
			set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller,
						 handle_percpu_irq);

#if defined(CONFIG_BMIPS4380) || defined(CONFIG_BMIPS5000)
	/* set up SW IRQs for SMP */
	for (i = irq_base; i < irq_base + 8; i++)
#else
	for (i = irq_base + 2; i < irq_base + 8; i++)
#endif
		set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
					 handle_percpu_irq);
}
Esempio n. 3
0
void __init arch_init_irq(void)
{
	unsigned int i;

	set_irq_priority();

	/* clear interrupt counter for VPE0 and VPE1 */
	if (isRT6855A)
		tc_outl(CR_INTC_ITR, (1 << 18) | (1 << 10));

	/* Disable all hardware interrupts */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	/* Initialize IRQ action handlers */
	for (i = 0; i < NR_IRQS; i++) {
#ifdef CONFIG_MIPS_TC3262
		/*
	 	 * Only MT is using the software interrupts currently, so we just
	 	 * leave them uninitialized for other processors.
	 	 */
		if (cpu_has_mipsmt) {
			if ((i == SI_SWINT1_INT0) || (i == SI_SWINT1_INT1) ||
				(i == SI_SWINT_INT0) || (i == SI_SWINT_INT1)) { 
				set_irq_chip(i, &mips_mt_cpu_irq_controller);
				continue;
			}
		}

		if ((i == SI_TIMER_INT) || (i == SI_TIMER1_INT))
			set_irq_chip_and_handler(i, &tc3162_irq_chip,
					 handle_percpu_irq);
		else
			set_irq_chip_and_handler(i, &tc3162_irq_chip,
					 handle_level_irq);
#else
		set_irq_chip_and_handler(i, &tc3162_irq_chip,
					 handle_level_irq);
#endif
	}

#ifdef CONFIG_MIPS_TC3262
	if (cpu_has_veic || cpu_has_vint) {
		write_c0_status((read_c0_status() & ~ST0_IM ) |
			                (STATUSF_IP0 | STATUSF_IP1)); 

		/* register irq dispatch functions */
		for (i = 0; i < NR_IRQS; i++)
			set_vi_handler(i, irq_dispatch_tab[i]);
	} else {
		change_c0_status(ST0_IM, ALLINTS);
	}
#else
	/* Enable all interrupts */
	change_c0_status(ST0_IM, ALLINTS);
#endif
#ifdef CONFIG_MIPS_MT_SMP
	vsmp_int_init();
#endif
}
Esempio n. 4
0
/*
 * While we ack the interrupt interrupts are disabled and thus we don't need
 * to deal with concurrency issues.  Same for mips_cpu_irq_end.
 */
static void mips_mt_cpu_irq_ack(unsigned int irq)
{
	unsigned int vpflags = dvpe();
	clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
	evpe(vpflags);
	mask_mips_mt_irq(irq);
}
Esempio n. 5
0
irqreturn_t smtc_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	int vpflags;

	if (read_c0_cause() & (1 << 30)) {
		/* If timer interrupt, make it de-assert */
		write_c0_compare (read_c0_count() - 1);

                vpflags = dvpe();
                clear_c0_cause(0x100<<7);
                evpe(vpflags);

		/*
		 * There are things we only want to do once per tick
		 * in an "MP" system.   One TC of each VPE will take
		 * the actual timer interrupt.  The others will get
		 * timer broadcast IPIs. We use whoever it is that takes
		 * the tick on VPE 0 to run the full timer_interrupt().
		 */
		if (cpu_data[cpu].vpe_id == 0) {
				timer_interrupt(irq, NULL, regs);
				smtc_timer_broadcast(cpu_data[cpu].vpe_id);

		} else {
			write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
			local_timer_interrupt(irq, dev_id, regs);
			smtc_timer_broadcast(cpu_data[cpu].vpe_id);
		}
	}

	return IRQ_HANDLED;
}
static void octeon_unmask_irq(unsigned int irq)
{
    unsigned long flags;
    spin_lock_irqsave(&octeon_irq_lock, flags);
    if (irq < 8)
    {
        /* Core local interrupts, irq 0-7 */
        clear_c0_cause(0x100 << irq);
        set_c0_status(0x100 << irq);
    }
    else if (irq<72)
    {
        /* Interrupts from the CIU, irq 8-71 */
        const uint64_t coreid = octeon_get_core_num();
        uint64_t bit = (irq - 8) & 0x3f;    /* Bit 0-63 of EN0 */
        uint64_t en0 = octeon_read_csr(OCTEON_CIU_INTX_EN0(coreid*2));
        en0 |= 1ull<<bit;
        octeon_write_csr(OCTEON_CIU_INTX_EN0(coreid*2), en0);
    }
    else if (irq<88)
    {
        /* Interrupts from the master 8259, irq 80-87 */
        outb(inb(0x21) & ~(1<<(irq-80)), 0x21);
    }
    else if (irq<96)
    {
        /* Interrupts from the slave 8259, irq 88-95 */
        outb(inb(0xa1) & ~(1<<(irq-88)), 0xa1);
    }
    spin_unlock_irqrestore(&octeon_irq_lock, flags);
}
Esempio n. 7
0
void play_dead(void)
{
    idle_task_exit();
    cpu_play_dead = 1;

    /*
     * Wakeup is on SW0 or SW1; disable everything else
     * Use BEV !IV (BRCM_WARM_RESTART_VEC) to avoid the regular Linux
     * IRQ handlers; this clears ST0_IE and returns immediately.
     */
    clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
    change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
                     IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
    irq_disable_hazard();

    /*
     * wait for SW interrupt from brcmstb_boot_secondary(), then jump
     * back to start_secondary()
     */
    do {
        __asm__ __volatile__(
            "	wait\n"
            "	nop\n"
            : : : "memory");

    } while (cpu_play_dead);

    __asm__ __volatile__(
        "	j	brcmstb_tp1_reentry\n"
        : : : "memory");

}
/*
 * While we ack the interrupt interrupts are disabled and thus we don't need
 * to deal with concurrency issues.  Same for mips_cpu_irq_end.
 */
static void mips_cpu_irq_ack(unsigned int irq)
{
	/* Only necessary for soft interrupts */
	clear_c0_cause(1 << (irq - mips_cpu_irq_base + 8));

	mask_mips_irq(irq);
}
Esempio n. 9
0
/*
 * While we ack the interrupt interrupts are disabled and thus we don't need
 * to deal with concurrency issues.  Same for mips_cpu_irq_end.
 */
static void mips_mt_cpu_irq_ack(unsigned int irq)
{
	unsigned int vpflags = dvpe();
	clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
	evpe(vpflags);
	mask_mips_mt_irq(irq);
}
Esempio n. 10
0
void __init mips_cpu_irq_init(int irq_base)
{
	int i;

	/* Mask interrupts. */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	/*
	 * Only MT is using the software interrupts currently, so we just
	 * leave them uninitialized for other processors.
	 */
	if (cpu_has_mipsmt)
		for (i = irq_base; i < irq_base + 2; i++) {
			irq_desc[i].status = IRQ_DISABLED;
			irq_desc[i].action = NULL;
			irq_desc[i].depth = 1;
			irq_desc[i].chip = &mips_mt_cpu_irq_controller;
		}

	for (i = irq_base + 2; i < irq_base + 8; i++) {
		irq_desc[i].status = IRQ_DISABLED;
		irq_desc[i].action = NULL;
		irq_desc[i].depth = 1;
		irq_desc[i].chip = &mips_cpu_irq_controller;
	}

	mips_cpu_irq_base = irq_base;
}
Esempio n. 11
0
/*
 * While we ack the interrupt interrupts are disabled and thus we don't need
 * to deal with concurrency issues.  Same for mips_cpu_irq_end.
 */
static void mips_mt_cpu_irq_ack(struct irq_data *d)
{
	unsigned int vpflags = dvpe();
	clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
	evpe(vpflags);
	mask_mips_irq(d);
}
Esempio n. 12
0
static void brcmstb_ack_ipi(unsigned int action)
{
    unsigned long flags;
    spin_lock_irqsave(&ipi_lock, flags);
    clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
    irq_enable_hazard();
    spin_unlock_irqrestore(&ipi_lock, flags);
}
Esempio n. 13
0
static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
{
	unsigned int vpflags = dvpe();

	clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
	evpe(vpflags);
	unmask_mips_irq(d);
	return 0;
}
Esempio n. 14
0
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
	unsigned int vpflags = dvpe();

	clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
	evpe(vpflags);
	mips_mt_cpu_irq_enable(irq);

	return 0;
}
Esempio n. 15
0
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
	unsigned int vpflags = dvpe();

	clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE));
	evpe(vpflags);
	unmask_mips_mt_irq(irq);

	return 0;
}
Esempio n. 16
0
/*
 * While we ack the interrupt interrupts are disabled and thus we don't need
 * to deal with concurrency issues.  Same for mips_cpu_irq_end.
 */
static void mips_mt_cpu_irq_ack(unsigned int irq)
{
	unsigned int vpflags = dvpe();
	int cpu_irq = 0;

	if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1))  
		cpu_irq = 1;

	clear_c0_cause(0x100 << cpu_irq);
	evpe(vpflags);
	mask_mips_mt_irq(irq);
}
Esempio n. 17
0
static void octeon_irq_core_ack(unsigned int irq)
{
	unsigned int bit = irq - OCTEON_IRQ_SW0;
	/*
	 * We don't need to disable IRQs to make these atomic since
	 * they are already disabled earlier in the low level
	 * interrupt code.
	 */
	clear_c0_status(0x100 << bit);
	/* The two user interrupts must be cleared manually. */
	if (bit < 2)
		clear_c0_cause(0x100 << bit);
}
Esempio n. 18
0
static void __init __mips_cpu_irq_init(struct device_node *of_node)
{
	struct irq_domain *domain;

	/* Mask interrupts. */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
				       &mips_cpu_intc_irq_domain_ops, NULL);
	if (!domain)
		panic("Failed to add irqdomain for MIPS CPU");
}
Esempio n. 19
0
static void octeon_irq_core_ack(struct irq_data *data)
{
	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
	unsigned int bit = cd->bit;

	/*
	 * We don't need to disable IRQs to make these atomic since
	 * they are already disabled earlier in the low level
	 * interrupt code.
	 */
	clear_c0_status(0x100 << bit);
	/* The two user interrupts must be cleared manually. */
	if (bit < 2)
		clear_c0_cause(0x100 << bit);
}
void ack_brcm_irq(unsigned int irq)
{
	unsigned long flags;

	if (irq == MIPS_TIMER_INT)
		return;

	if (irq <= MIPS_TIMER_INT)
	{
		spin_lock_irqsave(&brcm_irqlock, flags);
		clear_c0_cause(1 << (CAUSEB_IP0 + irq - INTERRUPT_ID_SOFTWARE_0));
		spin_unlock_irqrestore(&brcm_irqlock, flags);
	}
	else
	{
		disable_periph_irq(irq);
	}
}
Esempio n. 21
0
void __init mips_cpu_irq_init(void)
{
	int irq_base = MIPS_CPU_IRQ_BASE;
	int i;

	/* Mask interrupts. */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	/* Software interrupts are used for MT/CMT IPI */
	for (i = irq_base; i < irq_base + 2; i++)
		irq_set_chip_and_handler(i, cpu_has_mipsmt ?
					 &mips_mt_cpu_irq_controller :
					 &mips_cpu_irq_controller,
					 handle_percpu_irq);

	for (i = irq_base + 2; i < irq_base + 8; i++)
		irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
					 handle_percpu_irq);
}
Esempio n. 22
0
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
	unsigned int vpflags = dvpe();
	int cpu_irq = 0;

	if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1))  
		cpu_irq = 1;

	VPint(CR_INTC_IMR) |=  (1 << (irq-1));
	if (irq == SI_SWINT_INT0)
		VPint(CR_INTC_IMR) |=  (1 << (SI_SWINT1_INT0-1));
	else if (irq == SI_SWINT_INT1)
		VPint(CR_INTC_IMR) |=  (1 << (SI_SWINT1_INT1-1));

	clear_c0_cause(0x100 << cpu_irq);
	evpe(vpflags);
	unmask_mips_mt_irq(irq);

	return 0;
}
Esempio n. 23
0
void __init mips_cpu_irq_init(void)
{
	int irq_base = MIPS_CPU_IRQ_BASE;
	int i;

	
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	
	for (i = irq_base; i < irq_base + 2; i++)
		irq_set_chip_and_handler(i, cpu_has_mipsmt ?
					 &mips_mt_cpu_irq_controller :
					 &mips_cpu_irq_controller,
					 handle_percpu_irq);

	for (i = irq_base + 2; i < irq_base + 8; i++)
		irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
					 handle_percpu_irq);
}
Esempio n. 24
0
void __init mips_cpu_irq_init(void)
{
	int irq_base = MIPS_CPU_IRQ_BASE;
	int i;

	/* Mask interrupts. */
	clear_c0_status(ST0_IM);
	clear_c0_cause(CAUSEF_IP);

	/*
	 * Only MT is using the software interrupts currently, so we just
	 * leave them uninitialized for other processors.
	 */
	if (cpu_has_mipsmt)
		for (i = irq_base; i < irq_base + 2; i++)
			set_irq_chip(i, &mips_mt_cpu_irq_controller);

	for (i = irq_base + 2; i < irq_base + 8; i++)
		set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
					 handle_percpu_irq);
}
Esempio n. 25
0
irqreturn_t sim_timer_interrupt(int irq, void *dev_id)
{
#ifdef CONFIG_SMP
	int cpu = smp_processor_id();

	/*
	 * CPU 0 handles the global timer interrupt job
	 * resets count/compare registers to trigger next timer int.
	 */
#ifndef CONFIG_MIPS_MT_SMTC
	if (cpu == 0) {
		timer_interrupt(irq, dev_id);
	} else {
		/* Everyone else needs to reset the timer int here as
		   ll_local_timer_interrupt doesn't */
		/*
		 * FIXME: need to cope with counter underflow.
		 * More support needs to be added to kernel/time for
		 * counter/timer interrupts on multiple CPU's
		 */
		write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
	}
#else /* SMTC */
	/*
	 *  In SMTC system, one Count/Compare set exists per VPE.
	 *  Which TC within a VPE gets the interrupt is essentially
	 *  random - we only know that it shouldn't be one with
	 *  IXMT set. Whichever TC gets the interrupt needs to
	 *  send special interprocessor interrupts to the other
	 *  TCs to make sure that they schedule, etc.
	 *
	 *  That code is specific to the SMTC kernel, not to
	 *  the simulation platform, so it's invoked from
	 *  the general MIPS timer_interrupt routine.
	 *
	 * We have a problem in that the interrupt vector code
	 * had to turn off the timer IM bit to avoid redundant
	 * entries, but we may never get to mips_cpu_irq_end
	 * to turn it back on again if the scheduler gets
	 * involved.  So we clear the pending timer here,
	 * and re-enable the mask...
	 */

	int vpflags = dvpe();
	write_c0_compare (read_c0_count() - 1);
	clear_c0_cause(0x100 << cp0_compare_irq);
	set_c0_status(0x100 << cp0_compare_irq);
	irq_enable_hazard();
	evpe(vpflags);

	if (cpu_data[cpu].vpe_id == 0)
		timer_interrupt(irq, dev_id);
	else
		write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
	smtc_timer_broadcast(cpu_data[cpu].vpe_id);

#endif /* CONFIG_MIPS_MT_SMTC */

	/*
	 * every CPU should do profiling and process accounting
	 */
	local_timer_interrupt (irq, dev_id);

	return IRQ_HANDLED;
#else
	return timer_interrupt (irq, dev_id);
#endif
}
Esempio n. 26
0
static inline void unmask_mips_irq(unsigned int irq)
{
	clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
	set_c0_status(0x100 << (irq - mips_cpu_irq_base));
}
Esempio n. 27
0
File: time.c Progetto: ivucica/linux
irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
{
	int cpu = smp_processor_id();

#ifdef CONFIG_MIPS_MT_SMTC
	/*
	 *  In an SMTC system, one Count/Compare set exists per VPE.
	 *  Which TC within a VPE gets the interrupt is essentially
	 *  random - we only know that it shouldn't be one with
	 *  IXMT set. Whichever TC gets the interrupt needs to
	 *  send special interprocessor interrupts to the other
	 *  TCs to make sure that they schedule, etc.
	 *
	 *  That code is specific to the SMTC kernel, not to
	 *  the a particular platform, so it's invoked from
	 *  the general MIPS timer_interrupt routine.
	 */

	int vpflags;

	/*
	 * We could be here due to timer interrupt,
	 * perf counter overflow, or both.
	 */
	if (read_c0_cause() & (1 << 26))
		perf_irq();

	if (read_c0_cause() & (1 << 30)) {
		/* If timer interrupt, make it de-assert */
		write_c0_compare (read_c0_count() - 1);
		/*
		 * DVPE is necessary so long as cross-VPE interrupts
		 * are done via read-modify-write of Cause register.
		 */
		vpflags = dvpe();
		clear_c0_cause(CPUCTR_IMASKBIT);
		evpe(vpflags);
		/*
		 * There are things we only want to do once per tick
		 * in an "MP" system.   One TC of each VPE will take
		 * the actual timer interrupt.  The others will get
		 * timer broadcast IPIs. We use whoever it is that takes
		 * the tick on VPE 0 to run the full timer_interrupt().
		 */
		if (cpu_data[cpu].vpe_id == 0) {
				timer_interrupt(irq, NULL);
				smtc_timer_broadcast(cpu_data[cpu].vpe_id);
				scroll_display_message();
		} else {
			write_c0_compare(read_c0_count() +
			                 (mips_hpt_frequency/HZ));
			local_timer_interrupt(irq, dev_id);
			smtc_timer_broadcast(cpu_data[cpu].vpe_id);
		}
	}
#else /* CONFIG_MIPS_MT_SMTC */
	int r2 = cpu_has_mips_r2;

	if (cpu == 0) {
		/*
		 * CPU 0 handles the global timer interrupt job and process
		 * accounting resets count/compare registers to trigger next
		 * timer int.
		 */
		if (!r2 || (read_c0_cause() & (1 << 26)))
			if (perf_irq())
				goto out;

		/* we keep interrupt disabled all the time */
		if (!r2 || (read_c0_cause() & (1 << 30)))
			timer_interrupt(irq, NULL);

		scroll_display_message();
	} else {
		/* Everyone else needs to reset the timer int here as
		   ll_local_timer_interrupt doesn't */
		/*
		 * FIXME: need to cope with counter underflow.
		 * More support needs to be added to kernel/time for
		 * counter/timer interrupts on multiple CPU's
		 */
		write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));

		/*
		 * Other CPUs should do profiling and process accounting
		 */
		local_timer_interrupt(irq, dev_id);
	}
out:
#endif /* CONFIG_MIPS_MT_SMTC */
	return IRQ_HANDLED;
}