void local_bh_enable(void) { #ifdef CONFIG_TRACE_IRQFLAGS unsigned long flags; WARN_ON_ONCE(in_irq()); #endif WARN_ON_ONCE(irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_save(flags); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on((unsigned long)__builtin_return_address(0)); /* * Keep preemption disabled until we are done with * softirq processing: */ sub_preempt_count(SOFTIRQ_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_restore(flags); #endif preempt_check_resched(); }
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) { WARN_ON_ONCE(in_irq() || irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_disable(); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already. */ do_softirq(); } preempt_count_dec(); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_enable(); #endif preempt_check_resched(); }
/* leave me here to see if it catches a weird crash */ static void ramster_check_irq_counts(void) { static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt; int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt; cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT; if (cur_hardirq_cnt > last_hardirq_cnt) { last_hardirq_cnt = cur_hardirq_cnt; if (!(last_hardirq_cnt&(last_hardirq_cnt-1))) pr_err("RAMSTER TESTING RRP hardirq_count=%d\n", last_hardirq_cnt); } cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT; if (cur_softirq_cnt > last_softirq_cnt) { last_softirq_cnt = cur_softirq_cnt; if (!(last_softirq_cnt&(last_softirq_cnt-1))) pr_err("RAMSTER TESTING RRP softirq_count=%d\n", last_softirq_cnt); } cur_preempt_cnt = preempt_count() & PREEMPT_MASK; if (cur_preempt_cnt > last_preempt_cnt) { last_preempt_cnt = cur_preempt_cnt; if (!(last_preempt_cnt&(last_preempt_cnt-1))) pr_err("RAMSTER TESTING RRP preempt_count=%d\n", last_preempt_cnt); } }
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); /* * The preempt tracer hooks into preempt_count_add and will break * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ __preempt_count_add(cnt); /* * Were softirqs turned off above: */ if (softirq_count() == (cnt & SOFTIRQ_MASK)) trace_softirqs_off(ip); raw_local_irq_restore(flags); if (preempt_count() == cnt) { #ifdef CONFIG_DEBUG_PREEMPT current->preempt_disable_ip = get_lock_parent_ip(); #endif trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); } }
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { unsigned long saved_preempt_count = preempt_count(); int expected_failure = 0; #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES) int saved_lock_count = current->lock_count; #endif WARN_ON(irqs_disabled()); testcase_fn(); /* * Filter out expected failures: */ #ifndef CONFIG_PROVE_LOCKING if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected) expected_failure = 1; #endif if (debug_locks != expected) { if (expected_failure) { expected_testcase_failures++; printk("failed|"); } else { unexpected_testcase_failures++; printk("FAILED|"); dump_stack(); } } else { testcase_successes++; printk(" ok |"); } testcase_total++; if (debug_locks_verbose) printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); /* * Some tests (e.g. double-unlock) might corrupt the preemption * count, so restore it: */ preempt_count() = saved_preempt_count; #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; else current->softirqs_enabled = 1; #endif reset_locks(); #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_DEBUG_RT_MUTEXES) current->lock_count = saved_lock_count; #endif }
static inline void _local_bh_enable_ip(unsigned long ip) { WARN_ON_ONCE(in_irq() || irqs_disabled()); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_disable(); #endif /* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); #ifdef CONFIG_TRACE_IRQFLAGS local_irq_enable(); #endif preempt_check_resched(); }
static void __local_bh_disable(unsigned long ip, unsigned int cnt) { unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); /* * The preempt tracer hooks into add_preempt_count and will break * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ add_preempt_count_notrace(cnt); /* * Were softirqs turned off above: */ if (softirq_count() == cnt) trace_softirqs_off(ip); raw_local_irq_restore(flags); if (preempt_count() == cnt) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); }
static void __local_bh_enable(unsigned int cnt) { WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == (cnt & SOFTIRQ_MASK)) trace_softirqs_on(_RET_IP_); preempt_count_sub(cnt); }
/* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), * without processing still-pending softirqs: */ void _local_bh_enable(void) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(SOFTIRQ_OFFSET); }
static void __local_bh_enable(unsigned int cnt) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == cnt) trace_softirqs_on(_RET_IP_); sub_preempt_count(cnt); }
static void __local_bh_enable(unsigned int cnt) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == cnt) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(cnt); }
/* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), * without processing still-pending softirqs: */ void _local_bh_enable(void) { #ifdef CONFIG_TRACE_IRQFLAGS WARN_ON_ONCE(in_irq()); #endif WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(SOFTIRQ_OFFSET); }
void __local_bh_enable(void) { WARN_ON_ONCE(in_irq()); /* * softirqs should never be enabled by __local_bh_enable(), * it always nests inside local_bh_enable() sections: */ WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET); sub_preempt_count(SOFTIRQ_OFFSET); }
static void __local_bh_disable(unsigned long ip) { unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); add_preempt_count(SOFTIRQ_OFFSET); /* * Were softirqs turned off above: */ if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_off(ip); raw_local_irq_restore(flags); }
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { unsigned long saved_preempt_count = preempt_count(); int expected_failure = 0; WARN_ON(irqs_disabled()); testcase_fn(); #ifndef CONFIG_PROVE_LOCKING if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) expected_failure = 1; if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected) expected_failure = 1; #endif if (debug_locks != expected) { if (expected_failure) { expected_testcase_failures++; printk("failed|"); } else { unexpected_testcase_failures++; printk("FAILED|"); dump_stack(); } } else { testcase_successes++; printk(" ok |"); } testcase_total++; if (debug_locks_verbose) printk(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); preempt_count() = saved_preempt_count; #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; else current->softirqs_enabled = 1; #endif reset_locks(); }
asmlinkage void do_softirq(void) { __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); /* Switch to interrupt stack */ if (pending) { call_softirq(); WARN_ON_ONCE(softirq_count()); } local_irq_restore(flags); }
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { unsigned long saved_preempt_count = preempt_count(); WARN_ON(irqs_disabled()); testcase_fn(); /* * Filter out expected failures: */ #ifndef CONFIG_PROVE_LOCKING if (expected == FAILURE && debug_locks) { expected_testcase_failures++; pr_cont("failed|"); } else #endif if (debug_locks != expected) { unexpected_testcase_failures++; pr_cont("FAILED|"); dump_stack(); } else { testcase_successes++; pr_cont(" ok |"); } testcase_total++; if (debug_locks_verbose) pr_cont(" lockclass mask: %x, debug_locks: %d, expected: %d\n", lockclass_mask, debug_locks, expected); /* * Some tests (e.g. double-unlock) might corrupt the preemption * count, so restore it: */ preempt_count_set(saved_preempt_count); #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; else current->softirqs_enabled = 1; #endif reset_locks(); }