Пример #1
0
void bcm1480_unmask_irq(int cpu, int irq)
{
	unsigned long flags;
	u64 cur_ints,hl_spacing;

	spin_lock_irqsave(&bcm1480_imr_lock, flags);
	hl_spacing = 0;
	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
		hl_spacing = BCM1480_IMR_HL_SPACING;
		irq -= BCM1480_NR_IRQS_HALF;
	}
	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
	cur_ints &= ~(((u64) 1) << irq);
	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
	spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
Пример #2
0
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
{
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	struct irq_desc *desc = irq_desc + irq;
	unsigned long flags;
	unsigned int irq_dirty;

	i = first_cpu(mask);
	if (next_cpu(i, mask) <= NR_CPUS) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&desc->lock, flags);
	spin_lock(&bcm1480_imr_lock);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	spin_unlock(&bcm1480_imr_lock);
	spin_unlock_irqrestore(&desc->lock, flags);
}
Пример #3
0
static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
				bool force)
{
	unsigned int irq_dirty, irq = d->irq;
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	unsigned long flags;

	i = cpumask_first(mask);

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);

	return 0;
}
Пример #4
0
asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
{
	unsigned int pending;

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	/* Set compare to count to silence count/compare timer interrupts */
	write_c0_compare(read_c0_count());
#endif

	pending = read_c0_cause() & read_c0_status();

#ifdef CONFIG_SIBYTE_BCM1480_PROF
	if (pending & CAUSEF_IP7)	/* Cpu performance counter interrupt */
		sbprof_cpu_intr(exception_epc(regs));
	else
#endif

	if (pending & CAUSEF_IP4)
		bcm1480_timer_interrupt(regs);

#ifdef CONFIG_SMP
	else if (pending & CAUSEF_IP3)
		bcm1480_mailbox_interrupt(regs);
#endif

#ifdef CONFIG_KGDB
	else if (pending & CAUSEF_IP6)
		bcm1480_kgdb_interrupt(regs);		/* KGDB (uart 1) */
#endif

	else if (pending & CAUSEF_IP2) {
		unsigned long long mask_h, mask_l;
		unsigned long base;

		/*
		 * Default...we've hit an IP[2] interrupt, which means we've
		 * got to check the 1480 interrupt registers to figure out what
		 * to do.  Need to detect which CPU we're on, now that
		 * smp_affinity is supported.
		 */
		base = A_BCM1480_IMR_MAPPER(smp_processor_id());
		mask_h = __raw_readq(
			IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
		mask_l = __raw_readq(
			IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));

		if (mask_h) {
			if (mask_h ^ 1)
				do_IRQ(fls64(mask_h) - 1, regs);
			else
				do_IRQ(63 + fls64(mask_l), regs);
		}
	}
}
Пример #5
0
static inline void dispatch_ip2(void)
{
	unsigned long long mask_h, mask_l;
	unsigned int cpu = smp_processor_id();
	unsigned long base;

	/*
	 * Default...we've hit an IP[2] interrupt, which means we've got to
	 * check the 1480 interrupt registers to figure out what to do.  Need
	 * to detect which CPU we're on, now that smp_affinity is supported.
	 */
	base = A_BCM1480_IMR_MAPPER(cpu);
	mask_h = __raw_readq(
		IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H));
	mask_l = __raw_readq(
		IOADDR(base + R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L));

	if (mask_h) {
		if (mask_h ^ 1)
			do_IRQ(fls64(mask_h) - 1);
		else if (mask_l)
			do_IRQ(63 + fls64(mask_l));
	}
}