/* * Getting the big kernel lock. * * This cannot happen asynchronously, so we only need to * worry about other CPU's. */ void __lockfunc _lock_kernel(const char *func, const char *file, int line) { int depth = current->lock_depth + 1; trace_lock_kernel(func, file, line); if (likely(!depth)) { might_sleep(); __lock_kernel(); } current->lock_depth = depth; }
/* * Getting the big kernel semaphore. */ void __lockfunc _lock_kernel(const char *func, const char *file, int line) { int depth = current->lock_depth + 1; trace_lock_kernel(func, file, line); if (likely(!depth)) { might_sleep(); /* * No recursion worries - we set up lock_depth _after_ */ mutex_lock(&kernel_sem); #ifdef CONFIG_DEBUG_RT_MUTEXES current->last_kernel_lock = __builtin_return_address(0); #endif } current->lock_depth = depth; }