Ejemplo n.º 1
0
static inline void disable_cpu_irq(void *unused, int irq)
{
	unsigned long eirr_bit = EIEM_MASK(irq);

	cpu_eiem &= ~eirr_bit;
	set_eiem(cpu_eiem);
        smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
}
Ejemplo n.º 2
0
static void enable_cpu_irq(void *unused, int irq)
{
	unsigned long eirr_bit = EIEM_MASK(irq);

	mtctl(eirr_bit, 23);	/* clear EIRR bit before unmasking */
	cpu_eiem |= eirr_bit;
        smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
	set_eiem(cpu_eiem);
}
Ejemplo n.º 3
0
void cpu_end_irq(unsigned int irq)
{
	unsigned long mask = EIEM_MASK(irq);
	int cpu = smp_processor_id();

	/* set it in the eiems---it's no longer in process */
	per_cpu(local_ack_eiem, cpu) |= mask;

	/* enable the interrupt */
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
}
Ejemplo n.º 4
0
void cpu_eoi_irq(struct irq_data *d)
{
	unsigned long mask = EIEM_MASK(d->irq);
	int cpu = smp_processor_id();

	
	per_cpu(local_ack_eiem, cpu) |= mask;

	
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
}
Ejemplo n.º 5
0
static inline void unmask_cpu_irq(void *unused, int irq)
{
	unsigned long eirr_bit = EIEM_MASK(irq);
	cpu_eiem |= eirr_bit;
	/* NOTE: sending an IPI will cause do_cpu_irq_mask() to
	** handle *any* unmasked pending interrupts.
	** ie We don't need to check for pending interrupts here.
	*/
        smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
	set_eiem(cpu_eiem);
}
Ejemplo n.º 6
0
void cpu_ack_irq(unsigned int irq)
{
	unsigned long mask = EIEM_MASK(irq);
	int cpu = smp_processor_id();

	/* Clear in EIEM so we can no longer process */
	per_cpu(local_ack_eiem, cpu) &= ~mask;

	/* disable the interrupt */
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));

	/* and now ack it */
	mtctl(mask, 23);
}
Ejemplo n.º 7
0
void __init init_IRQ(void)
{
	local_irq_disable();	/* PARANOID - should already be disabled */
	mtctl(~0UL, 23);	/* EIRR : clear all pending external intr */
	claim_cpu_irqs();
#ifdef CONFIG_SMP
	if (!cpu_eiem)
		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
#else
	cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
        set_eiem(cpu_eiem);	/* EIEM : enable all external intr */

}
Ejemplo n.º 8
0
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	unsigned long eirr_val;
	int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
	struct irq_desc *desc;
	cpumask_t dest;
#endif

	old_regs = set_irq_regs(regs);
	local_irq_disable();
	irq_enter();

	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
	if (!eirr_val)
		goto set_out;
	irq = eirr_to_irq(eirr_val);

#ifdef CONFIG_SMP
	desc = irq_to_desc(irq);
	cpumask_copy(&dest, desc->irq_data.affinity);
	if (irqd_is_per_cpu(&desc->irq_data) &&
	    !cpu_isset(smp_processor_id(), dest)) {
		int cpu = first_cpu(dest);

		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
		       irq, smp_processor_id(), cpu);
		gsc_writel(irq + CPU_IRQ_BASE,
			   per_cpu(cpu_data, cpu).hpa);
		goto set_out;
	}
#endif
	stack_overflow_check(regs);

#ifdef CONFIG_IRQSTACKS
	execute_on_irq_stack(&generic_handle_irq, irq);
#else
	generic_handle_irq(irq);
#endif /* CONFIG_IRQSTACKS */

 out:
	irq_exit();
	set_irq_regs(old_regs);
	return;

 set_out:
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
	goto out;
}
Ejemplo n.º 9
0
void __init init_IRQ(void)
{
	local_irq_disable();	
	mtctl(~0UL, 23);	
	claim_cpu_irqs();
#ifdef CONFIG_SMP
	if (!cpu_eiem)
		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
#else
	cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
        set_eiem(cpu_eiem);	

}
Ejemplo n.º 10
0
/*
 * This routine handles various exception codes.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
{
	static spinlock_t terminate_lock = SPIN_LOCK_UNLOCKED;

	set_eiem(0);
	__cli();
	spin_lock(&terminate_lock);

	/* unlock the pdc lock if necessary */
	pdc_emergency_unlock();

	/* restart pdc console if necessary */
	if (!console_drivers)
		pdc_console_restart();


	/* Not all switch paths will gutter the processor... */
	switch(code){

	case 1:
		transfer_pim_to_trap_frame(regs);
		break;
	    
	default:
		/* Fall through */
		break;
	}

	show_stack((unsigned long *)regs->gr[30]);

	printk("\n");
	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
			msg, code, regs, offset);
	show_regs(regs);

	spin_unlock(&terminate_lock);

	/* put soft power button back under hardware control;
	 * if the user had pressed it once at any time, the 
	 * system will shut down immediately right here. */
	pdc_soft_power_button(0);
	
	/* Gutter the processor... */
	for(;;)
	    ;
}
Ejemplo n.º 11
0
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	unsigned long eirr_val;
	int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
	cpumask_t dest;
#endif

	old_regs = set_irq_regs(regs);
	local_irq_disable();
	irq_enter();

	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
	if (!eirr_val)
		goto set_out;
	irq = eirr_to_irq(eirr_val);

#ifdef CONFIG_SMP
	cpumask_copy(&dest, irq_desc[irq].affinity);
	if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
	    !cpu_isset(smp_processor_id(), dest)) {
		int cpu = first_cpu(dest);

		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
		       irq, smp_processor_id(), cpu);
		gsc_writel(irq + CPU_IRQ_BASE,
			   per_cpu(cpu_data, cpu).hpa);
		goto set_out;
	}
#endif
	__do_IRQ(irq);

 out:
	irq_exit();
	set_irq_regs(old_regs);
	return;

 set_out:
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
	goto out;
}
Ejemplo n.º 12
0
static void cpu_set_eiem(void *info)
{
	set_eiem((unsigned long) info);
}