void do_softirq(void) { unsigned pending = local_softirq_pending(); unsigned allowed = 1 << SCHED_SOFTIRQ | 1 << HRTIMER_SOFTIRQ; struct softirq_action *h; unsigned long flags; __local_bh_disable(SOFTIRQ_OFFSET); local_irq_save(flags); while ((pending = local_softirq_pending())) { /* * For now we only support SCHED_SOFTIRQ * and HRTIMER_SOFTIRQ */ BUG_ON((~allowed) & pending); cpumask_clear_cpu(smp_processor_id(), &linsched_cpu_softirq_raised); set_softirq_pending(0); h = softirq_vec; do { if (pending & 1) h->action(h); h++; pending >>= 1; } while (pending); } local_irq_restore(flags); __local_bh_enable(SOFTIRQ_OFFSET); }
static inline void invoke_softirq(void) { if (!force_irqthreads) { #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED __do_softirq(); #else do_softirq(); #endif } else { __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); wakeup_softirqd(); __local_bh_enable(SOFTIRQ_OFFSET); } }
asmlinkage void __do_softirq(void) { struct softirq_action *h; __u32 pending; int max_restart = MAX_SOFTIRQ_RESTART; int cpu; pending = local_softirq_pending(); account_system_vtime(current); __local_bh_disable((unsigned long)__builtin_return_address(0)); trace_softirq_enter(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); local_irq_enable(); h = softirq_vec; do { if (pending & 1) { kstat_incr_softirqs_this_cpu(h - softirq_vec); h->action(h); rcu_bh_qsctr_inc(cpu); } h++; pending >>= 1; } while (pending); local_irq_disable(); pending = local_softirq_pending(); if (pending && --max_restart) goto restart; if (pending) wakeup_softirqd(); trace_softirq_exit(); account_system_vtime(current); _local_bh_enable(); }
/* Called at irq disabled */ asmlinkage void __do_dpc_irq(void) { struct dpc_irq_action *h; u32 pending; int max_restart = MAX_SOFTIRQ_RESTART; int cpu; /* Get mask */ pending = local_dpc_irq_pending(); __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); cpu = smp_processor_id(); restart: /* Reset the pending bit mask before enabling irqs */ set_dpc_irq_pending(0); local_irq_enable(); /* Handle it */ h = dpc_irq_vec; do { if (pending & 1) { unsigned int vec_nr = h - dpc_irq_vec; int prev_count = hal_preempt_count(); h->action(h); if (unlikely(prev_count != hal_preempt_count())) { hal_preempt_count() = prev_count; } //TODO: RCU } h++; pending >>= 1; } while (pending); local_irq_disable(); pending = local_dpc_irq_pending(); if (pending && --max_restart) goto restart; if (pending) wakeup_dpc_thread(); __local_bh_enable(SOFTIRQ_OFFSET); }
asmlinkage void __do_softirq(void) { struct softirq_action *h; __u32 pending; unsigned long end = jiffies + MAX_SOFTIRQ_TIME; int cpu; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; /* * Mask out PF_MEMALLOC s current task context is borrowed for the * softirq. A softirq handled such as network RX might set PF_MEMALLOC * again if the socket is related to swap */ current->flags &= ~PF_MEMALLOC; pending = local_softirq_pending(); account_system_vtime(current); __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); local_irq_enable(); h = softirq_vec; do { if (pending & 1) { unsigned int vec_nr = h - softirq_vec; int prev_count = preempt_count(); kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); h->action(h); trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { printk(KERN_ERR "huh, entered softirq %u %s %p" "with preempt_count %08x," " exited with %08x?\n", vec_nr, softirq_to_name[vec_nr], h->action, prev_count, preempt_count()); preempt_count_set(prev_count); } rcu_bh_qs(cpu); } h++; pending >>= 1; } while (pending); local_irq_disable(); pending = local_softirq_pending(); if (pending) { if (time_before(jiffies, end) && !need_resched() && --max_restart) goto restart; wakeup_softirqd(); } lockdep_softirq_exit(); account_system_vtime(current); __local_bh_enable(SOFTIRQ_OFFSET); tsk_restore_flags(current, old_flags, PF_MEMALLOC); }
void local_bh_disable(void) { __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_DISABLE_OFFSET); }
void local_bh_disable(void) { __local_bh_disable((unsigned long)__builtin_return_address(0)); }
asmlinkage void __do_softirq(void) { struct softirq_action *h; __u32 pending; unsigned long end = jiffies + MAX_SOFTIRQ_TIME; int cpu; pending = local_softirq_pending(); account_system_vtime(current); __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); local_irq_enable(); h = softirq_vec; do { if (pending & 1) { unsigned int vec_nr = h - softirq_vec; int prev_count = preempt_count(); kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); h->action(h); trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { printk(KERN_ERR "huh, entered softirq %u %s %p" "with preempt_count %08x," " exited with %08x?\n", vec_nr, softirq_to_name[vec_nr], h->action, prev_count, preempt_count()); preempt_count_set(prev_count); } rcu_bh_qs(cpu); } h++; pending >>= 1; } while (pending); local_irq_disable(); pending = local_softirq_pending(); if (pending) { if (time_before(jiffies, end) && !need_resched()) goto restart; wakeup_softirqd(); } lockdep_softirq_exit(); account_system_vtime(current); __local_bh_enable(SOFTIRQ_OFFSET); }
void local_bh_disable(void) { __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET); }