static inline void __lock_kernel(void) { preempt_disable(); if (unlikely(!__raw_spin_trylock(&kernel_flag))) { /* * If preemption was disabled even before this * was called, there's nothing we can be polite * about - just spin. */ if (preempt_count() > 1) { __raw_spin_lock(&kernel_flag); return; } /* * Otherwise, let's wait for the kernel lock * with preemption enabled.. */ do { preempt_enable(); while (spin_is_locked(&kernel_flag)) cpu_relax(); preempt_disable(); } while (!__raw_spin_trylock(&kernel_flag)); } }
void _raw_spin_lock(spinlock_t *lock) { debug_spin_lock_before(lock); if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); }
static void __spin_lock_debug(spinlock_t *lock) { u64 i; u64 loops = loops_per_jiffy * HZ; int print_once = 1; for (;;) { for (i = 0; i < loops; i++) { if (__raw_spin_trylock(&lock->raw_lock)) return; __delay(1); } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %p\n", raw_smp_processor_id(), current->comm, task_pid_nr(current), lock); dump_stack(); #ifdef CONFIG_SMP trigger_all_cpu_backtrace(); #endif } } }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; trace_hw_branch_oops(); oops_enter(); raw_local_irq_save(flags); cpu = smp_processor_id(); if (!__raw_spin_trylock(&die_lock)) { if (cpu == die_owner) ; else __raw_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; /* notify the hw-branch tracer so it may disable tracing and add the last trace to the trace buffer - the earlier this happens, the more useful the trace. */ trace_hw_branch_oops(); oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!__raw_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else __raw_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
/* * Acquire/release the underlying lock from the scheduler. * * This is called with preemption disabled, and should * return an error value if it cannot get the lock and * TIF_NEED_RESCHED gets set. * * If it successfully gets the lock, it should increment * the preemption count like any spinlock does. * * (This works on UP too - _raw_spin_trylock will never * return false in that case) */ int __lockfunc __reacquire_kernel_lock(void) { local_irq_enable(); while (!__raw_spin_trylock(&kernel_flag)) { if (test_thread_flag(TIF_NEED_RESCHED)) return -EAGAIN; cpu_relax(); } local_irq_disable(); preempt_disable(); return 0; }
int _raw_spin_trylock(spinlock_t *lock) { int ret = __raw_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); #ifndef CONFIG_SMP /* * Must not happen on UP: */ SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); #endif return ret; }
static void __spin_lock_debug(spinlock_t *lock) { int print_once = 1; u64 i; for (;;) { for (i = 0; i < loops_per_jiffy * HZ; i++) { if (__raw_spin_trylock(&lock->raw_lock)) return; } /* lockup suspected: */ if (print_once) { print_once = 0; printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " "%s/%d, %p (%s)\n", raw_smp_processor_id(), current->comm, current->pid, lock, print_tainted()); dump_stack(); } } }
unsigned __kprobes long oops_begin(void) { int cpu; unsigned long flags; oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!__raw_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else __raw_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; }
int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) { return __raw_spin_trylock(lock); }