void play_dead(void) { idle_task_exit(); cpu_play_dead = 1; /* * Wakeup is on SW0 or SW1; disable everything else * Use BEV !IV (BRCM_WARM_RESTART_VEC) to avoid the regular Linux * IRQ handlers; this clears ST0_IE and returns immediately. */ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); irq_disable_hazard(); /* * wait for SW interrupt from brcmstb_boot_secondary(), then jump * back to start_secondary() */ do { __asm__ __volatile__( " wait\n" " nop\n" : : : "memory"); } while (cpu_play_dead); __asm__ __volatile__( " j brcmstb_tp1_reentry\n" : : : "memory"); }
static int c0_compare_int_usable(void) { unsigned int delta; unsigned int cnt; /* * IP7 already pending? Try to clear it by acking the timer. */ if (c0_compare_int_pending()) { write_c0_compare(read_c0_count()); irq_disable_hazard(); if (c0_compare_int_pending()) return 0; } for (delta = 0x10; delta <= 0x400000; delta <<= 1) { cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); irq_disable_hazard(); if ((int)(read_c0_count() - cnt) < 0) break; /* increase delta if the timer was already expired */ } while ((int)(read_c0_count() - cnt) <= 0) ; /* Wait for expiry */ if (!c0_compare_int_pending()) return 0; write_c0_compare(read_c0_count()); irq_disable_hazard(); if (c0_compare_int_pending()) return 0; /* * Feels like a real count / compare timer. */ return 1; }
static inline void mask_mips_mt_irq(unsigned int irq) { unsigned int vpflags = dvpe(); int cpu_irq = 0; if ((irq == SI_SWINT1_INT1) || (irq == SI_SWINT_INT1)) cpu_irq = 1; clear_c0_status(0x100 << cpu_irq); irq_disable_hazard(); evpe(vpflags); }
static inline void mask_loongson_irq(struct irq_data *d) { clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); irq_disable_hazard(); /* Workaround: UART IRQ may deliver to any core */ if (d->irq == LOONGSON_UART_IRQ) { int cpu = smp_processor_id(); int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node; int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node; u64 intenclr_addr = smp_group[node_id] | (u64)(&LOONGSON_INT_ROUTER_INTENCLR); u64 introuter_lpc_addr = smp_group[node_id] | (u64)(&LOONGSON_INT_ROUTER_LPC); *(volatile u32 *)intenclr_addr = 1 << 10; *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id); } }
static void bcm63xx_external_irq_shutdown(unsigned int irq) { bcm63xx_external_irq_mask(irq); clear_c0_status(0x100 << (irq - IRQ_MIPS_BASE)); irq_disable_hazard(); }
static inline void mask_mips_irq(unsigned int irq) { clear_c0_status(0x100 << (irq - mips_cpu_irq_base)); irq_disable_hazard(); }
/* * Route interrupts to ISR(s). * * This function is entered with the IE disabled. It can be * re-entered as soon as the IE is re-enabled in function * handle_IRQ_envet(). */ void BCMFASTPATH plat_irq_dispatch(struct pt_regs *regs) { u32 pending, ipvec; uint32 flags = 0; int irq; /* Disable MIPS IRQs with pending interrupts */ pending = read_c0_cause() & CAUSEF_IP; pending &= read_c0_status(); clear_c0_status(pending); irq_disable_hazard(); /* Handle MIPS timer interrupt. Re-enable MIPS IRQ7 * immediately after servicing the interrupt so that * we can take this kind of interrupt again later * while servicing other interrupts. */ if (pending & CAUSEF_IP7) { do_IRQ(7); pending &= ~CAUSEF_IP7; set_c0_status(STATUSF_IP7); irq_enable_hazard(); } /* Build bitvec for pending interrupts. Start with * MIPS IRQ2 and add linux IRQs to higher bits to * make the interrupt processing uniform. */ ipvec = pending >> CAUSEB_IP2; if (pending & CAUSEF_IP2) { if (ccsbr) flags = R_REG(NULL, &ccsbr->sbflagst); /* Read intstatus */ if (mips_corereg) flags = R_REG(NULL, &((mips74kregs_t *)mips_corereg)->intstatus); flags &= shints; ipvec |= flags << SBMIPS_VIRTIRQ_BASE; } #ifdef CONFIG_HND_BMIPS3300_PROF /* Handle MIPS core interrupt. Re-enable the MIPS IRQ that * MIPS core is assigned to immediately after servicing the * interrupt so that we can take this kind of interrupt again * later while servicing other interrupts. * * mipsirq < 0 indicates MIPS core IRQ # is unknown. */ if (mipsirq >= 0 && (ipvec & (1 << mipsirq))) { /* MIPS core raised the interrupt on the shared MIPS IRQ2. * Make sure MIPS core is the only interrupt source before * re-enabling the IRQ. */ if (mipsirq >= SBMIPS_VIRTIRQ_BASE) { if (flags == (1 << (mipsirq-SBMIPS_VIRTIRQ_BASE))) { irq = mipsirq + 2; do_IRQ(irq); ipvec &= ~(1 << mipsirq); pending &= ~CAUSEF_IP2; set_c0_status(STATUSF_IP2); irq_enable_hazard(); } } /* MIPS core raised the interrupt on a dedicated MIPS IRQ. * Re-enable the IRQ immediately. */ else { irq = mipsirq + 2; do_IRQ(irq); ipvec &= ~(1 << mipsirq); pending &= ~CR_IP(irq); set_c0_status(SR_IM(irq)); irq_enable_hazard(); } } #endif /* CONFIG_HND_BMIPS3300_PROF */ /* Shared interrupt bits are shifted to respective bit positions in * ipvec above. IP2 (bit 0) is of no significance, hence shifting the * bit map by 1 to the right. */ ipvec >>= 1; /* Handle all other interrupts. Re-enable disabled MIPS IRQs * after processing all pending interrupts. */ for (irq = 3; ipvec != 0; irq++) { if (ipvec & 1) do_IRQ(irq); ipvec >>= 1; } set_c0_status(pending); irq_enable_hazard(); #if 0 /* Process any pending softirqs (tasklets, softirqs ...) */ local_irq_save(flags); if (local_softirq_pending() && !in_interrupt()) __do_softirq(); local_irq_restore(flags); #endif }
static INLINE void disable_brcm_irq(unsigned int irq) { clear_c0_status(SR_IM(irq)); irq_disable_hazard(); }
static inline void mask_mips_irq(unsigned int irq) { clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); irq_disable_hazard(); }
static inline void mask_mips_irq(struct irq_data *d) { clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); irq_disable_hazard(); }
static void bcm63xx_external_irq_shutdown(struct irq_data *d) { bcm63xx_external_irq_mask(d); clear_c0_status(0x100 << (d->irq - IRQ_MIPS_BASE)); irq_disable_hazard(); }