static inline void invoke_softirq(void) { if (!force_irqthreads) __do_softirq(); else wakeup_softirqd(); }
static inline void invoke_softirq(void) { if (ksoftirqd_running()) return; if (!force_irqthreads) { #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty * at this stage. */ __do_softirq(); #else /* * Otherwise, irq_exit() is called on the task stack that can * be potentially deep already. So call softirq in its own stack * to prevent from any overrun. */ do_softirq_own_stack(); #endif } else { wakeup_softirqd(); } }
static inline void invoke_softirq(void) { #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED __do_softirq(); #else do_dpc_irq(); #endif }
asmlinkage void do_softirq(void) { unsigned long flags; local_irq_save(flags); if (local_softirq_pending()) __do_softirq(); local_irq_restore(flags); }
static void run_ksoftirqd(unsigned int cpu) { local_irq_disable(); if (local_softirq_pending()) { __do_softirq(); rcu_note_context_switch(cpu); local_irq_enable(); cond_resched(); return; } local_irq_enable(); }
static inline void invoke_softirq(void) { if (!force_irqthreads) { #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED __do_softirq(); #else do_softirq(); #endif } else { __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); wakeup_softirqd(); __local_bh_enable(SOFTIRQ_OFFSET); } }
static void run_ksoftirqd(unsigned int cpu) { local_irq_disable(); if (local_softirq_pending()) { /* * We can safely run softirq on inline stack, as we are not deep * in the task stack here. */ __do_softirq(); local_irq_enable(); cond_resched_rcu_qs(); return; } local_irq_enable(); }
asmlinkage void do_softirq(void) { __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); if (pending) __do_softirq(); local_irq_restore(flags); }
/* * Route interrupts to ISR(s). * * This function is entered with the IE disabled. It can be * re-entered as soon as the IE is re-enabled in function * handle_IRQ_envet(). */ void BCMFASTPATH plat_irq_dispatch(struct pt_regs *regs) { u32 pending, ipvec; uint32 flags = 0; int irq; /* Disable MIPS IRQs with pending interrupts */ pending = read_c0_cause() & CAUSEF_IP; pending &= read_c0_status(); clear_c0_status(pending); irq_disable_hazard(); /* Handle MIPS timer interrupt. Re-enable MIPS IRQ7 * immediately after servicing the interrupt so that * we can take this kind of interrupt again later * while servicing other interrupts. */ if (pending & CAUSEF_IP7) { do_IRQ(7); pending &= ~CAUSEF_IP7; set_c0_status(STATUSF_IP7); irq_enable_hazard(); } /* Build bitvec for pending interrupts. Start with * MIPS IRQ2 and add linux IRQs to higher bits to * make the interrupt processing uniform. */ ipvec = pending >> CAUSEB_IP2; if (pending & CAUSEF_IP2) { if (ccsbr) flags = R_REG(NULL, &ccsbr->sbflagst); /* Read intstatus */ if (mips_corereg) flags = R_REG(NULL, &((mips74kregs_t *)mips_corereg)->intstatus); flags &= shints; ipvec |= flags << SBMIPS_VIRTIRQ_BASE; } #ifdef CONFIG_HND_BMIPS3300_PROF /* Handle MIPS core interrupt. Re-enable the MIPS IRQ that * MIPS core is assigned to immediately after servicing the * interrupt so that we can take this kind of interrupt again * later while servicing other interrupts. * * mipsirq < 0 indicates MIPS core IRQ # is unknown. */ if (mipsirq >= 0 && (ipvec & (1 << mipsirq))) { /* MIPS core raised the interrupt on the shared MIPS IRQ2. * Make sure MIPS core is the only interrupt source before * re-enabling the IRQ. */ if (mipsirq >= SBMIPS_VIRTIRQ_BASE) { if (flags == (1 << (mipsirq-SBMIPS_VIRTIRQ_BASE))) { irq = mipsirq + 2; do_IRQ(irq); ipvec &= ~(1 << mipsirq); pending &= ~CAUSEF_IP2; set_c0_status(STATUSF_IP2); irq_enable_hazard(); } } /* MIPS core raised the interrupt on a dedicated MIPS IRQ. * Re-enable the IRQ immediately. */ else { irq = mipsirq + 2; do_IRQ(irq); ipvec &= ~(1 << mipsirq); pending &= ~CR_IP(irq); set_c0_status(SR_IM(irq)); irq_enable_hazard(); } } #endif /* CONFIG_HND_BMIPS3300_PROF */ /* Shared interrupt bits are shifted to respective bit positions in * ipvec above. IP2 (bit 0) is of no significance, hence shifting the * bit map by 1 to the right. */ ipvec >>= 1; /* Handle all other interrupts. Re-enable disabled MIPS IRQs * after processing all pending interrupts. */ for (irq = 3; ipvec != 0; irq++) { if (ipvec & 1) do_IRQ(irq); ipvec >>= 1; } set_c0_status(pending); irq_enable_hazard(); #if 0 /* Process any pending softirqs (tasklets, softirqs ...) */ local_irq_save(flags); if (local_softirq_pending() && !in_interrupt()) __do_softirq(); local_irq_restore(flags); #endif }
asmlinkage void do_softirq(void) { __do_softirq(0); }
void process_pending_softirqs(void) { ASSERT(!in_irq() && local_irq_is_enabled()); /* Do not enter scheduler as it can preempt the calling context. */ __do_softirq(1ul<<SCHEDULE_SOFTIRQ); }