/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; unsigned long flags; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { spin_unlock_irqrestore(&lock->wait_lock, flags); return 0; } /* * We drop the BKL here before we go into the wait loop to avoid a * possible deadlock in the scheduler. */ if (unlikely(current->lock_depth >= 0)) saved_lock_depth = rt_release_bkl(lock, flags); set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) hrtimer_start(&timeout->timer, timeout->timer.expires, HRTIMER_MODE_ABS); for (;;) { unsigned long saved_flags; /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } /* * waiter.task is NULL the first time we come here and * when we have been woken up by the previous owner * but the lock got stolen by a higher prio task. */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, detect_deadlock, flags); /* * If we got woken up by the owner then start loop * all over without going into schedule to try * to get the lock now: */ if (unlikely(!waiter.task)) continue; if (unlikely(ret)) break; } saved_flags = current->flags & PF_NOSCHED; current->flags &= ~PF_NOSCHED; spin_unlock_irq(&lock->wait_lock); debug_rt_mutex_print_deadlock(&waiter); if (waiter.task) schedule_rt_mutex(lock); spin_lock_irq(&lock->wait_lock); current->flags |= saved_flags; set_current_state(state); } set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) remove_waiter(lock, &waiter, flags); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); spin_unlock_irqrestore(&lock->wait_lock, flags); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); /* * Readjust priority, when we did not get the lock. We might * have been the pending owner and boosted. Since we did not * take the lock, the PI boost has to go. */ if (unlikely(ret)) rt_mutex_adjust_prio(current); /* Must we reaquire the BKL? */ if (unlikely(saved_lock_depth >= 0)) rt_reacquire_bkl(saved_lock_depth); debug_rt_mutex_free_waiter(&waiter); return ret; }
/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; unsigned long flags; debug_rt_mutex_init_waiter(&waiter); raw_spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return 0; } set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock, flags, 0); /* * We drop the BKL here before we go into the wait loop to avoid a * possible deadlock in the scheduler. * * Note: This must be done after we call task_blocks_on_rt_mutex * because rt_release_bkl() releases the wait_lock and will * cause a race between setting the mark waiters flag in * the owner field and adding this task to the wait list. Those * two must be done within the protection of the wait_lock. */ if (unlikely(current->lock_depth >= 0)) saved_lock_depth = rt_release_bkl(lock, flags); if (likely(!ret)) ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, flags); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, &waiter, flags); BUG_ON(!plist_node_empty(&waiter.list_entry)); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); /* Must we reaquire the BKL? */ if (unlikely(saved_lock_depth >= 0)) rt_reacquire_bkl(saved_lock_depth); debug_rt_mutex_free_waiter(&waiter); return ret; }