/** * rt_mutex_start_proxy_lock() - Start lock acquisition for another task * @lock: the rt_mutex to take * @waiter: the pre-initialized rt_mutex_waiter * @task: the task to prepare * @detect_deadlock: perform deadlock detection (1) or not (0) * * Returns: * 0 - task blocked on lock * 1 - acquired the lock for task, caller should wake it up * <0 - error * * Special API call for FUTEX_REQUEUE_PI support. */ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, int detect_deadlock) { int ret; raw_spin_lock(&lock->wait_lock); if (try_to_take_rt_mutex(lock, task, NULL)) { raw_spin_unlock(&lock->wait_lock); return 1; } ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); if (ret && !rt_mutex_owner(lock)) { /* * Reset the return value. We might have * returned with -EDEADLK and the owner * released the lock while we were walking the * pi chain. Let the waiter sort it out. */ ret = 0; } if (unlikely(ret)) remove_waiter(lock, waiter); raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); return ret; }
/* * Slow path try-lock function: */ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret; /* * If the lock already has an owner we fail to get the lock. * This can be done without taking the @lock->wait_lock as * it is only being read, and this is a trylock anyway. */ if (rt_mutex_owner(lock)) return 0; /* * The mutex has currently no owner. Lock the wait lock and * try to acquire the lock. */ raw_spin_lock(&lock->wait_lock); ret = try_to_take_rt_mutex(lock, current, NULL); /* * try_to_take_rt_mutex() sets the lock waiters bit * unconditionally. Clean this up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock(&lock->wait_lock); return ret; }
/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; raw_spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { raw_spin_unlock(&lock->wait_lock); return 0; } set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, detect_deadlock); set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) remove_waiter(lock, &waiter); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); /* * Readjust priority, when we did not get the lock. We might * have been the pending owner and boosted. Since we did not * take the lock, the PI boost has to go. */ if (unlikely(ret)) rt_mutex_adjust_prio(current); debug_rt_mutex_free_waiter(&waiter); return ret; }
/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; debug_rt_mutex_init_waiter(&waiter); RB_CLEAR_NODE(&waiter.pi_tree_entry); RB_CLEAR_NODE(&waiter.tree_entry); raw_spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { raw_spin_unlock(&lock->wait_lock); return 0; } set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); if (likely(!ret)) ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, &waiter); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); debug_rt_mutex_free_waiter(&waiter); return ret; }
/** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * * lock->wait_lock must be held by the caller. */ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) { int ret = 0; int was_disabled; for (;;) { /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock, current, waiter)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } raw_spin_unlock(&lock->wait_lock); was_disabled = irqs_disabled(); if (was_disabled) local_irq_enable(); debug_rt_mutex_print_deadlock(waiter); schedule_rt_mutex(lock); if (was_disabled) local_irq_disable(); raw_spin_lock(&lock->wait_lock); set_current_state(state); } return ret; }
/* * Slow path try-lock function: */ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = 0; raw_spin_lock(&lock->wait_lock); if (likely(rt_mutex_owner(lock) != current)) { ret = try_to_take_rt_mutex(lock, current, NULL); /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. */ fixup_rt_mutex_waiters(lock); } raw_spin_unlock(&lock->wait_lock); return ret; }
/* * Slow path try-lock function: */ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) { unsigned long flags; int ret = 0; spin_lock_irqsave(&lock->wait_lock, flags); if (likely(rt_mutex_owner(lock) != current)) { init_lists(lock); ret = try_to_take_rt_mutex(lock); /* * try_to_take_rt_mutex() sets the lock waiters * bit unconditionally. Clean this up. */ fixup_rt_mutex_waiters(lock); } spin_unlock_irqrestore(&lock->wait_lock, flags); return ret; }
/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; unsigned long flags; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { spin_unlock_irqrestore(&lock->wait_lock, flags); return 0; } /* * We drop the BKL here before we go into the wait loop to avoid a * possible deadlock in the scheduler. */ if (unlikely(current->lock_depth >= 0)) saved_lock_depth = rt_release_bkl(lock, flags); set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) hrtimer_start(&timeout->timer, timeout->timer.expires, HRTIMER_MODE_ABS); for (;;) { unsigned long saved_flags; /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } /* * waiter.task is NULL the first time we come here and * when we have been woken up by the previous owner * but the lock got stolen by a higher prio task. */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, detect_deadlock, flags); /* * If we got woken up by the owner then start loop * all over without going into schedule to try * to get the lock now: */ if (unlikely(!waiter.task)) continue; if (unlikely(ret)) break; } saved_flags = current->flags & PF_NOSCHED; current->flags &= ~PF_NOSCHED; spin_unlock_irq(&lock->wait_lock); debug_rt_mutex_print_deadlock(&waiter); if (waiter.task) schedule_rt_mutex(lock); spin_lock_irq(&lock->wait_lock); current->flags |= saved_flags; set_current_state(state); } set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) remove_waiter(lock, &waiter, flags); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); spin_unlock_irqrestore(&lock->wait_lock, flags); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); /* * Readjust priority, when we did not get the lock. We might * have been the pending owner and boosted. Since we did not * take the lock, the PI boost has to go. */ if (unlikely(ret)) rt_mutex_adjust_prio(current); /* Must we reaquire the BKL? */ if (unlikely(saved_lock_depth >= 0)) rt_reacquire_bkl(saved_lock_depth); debug_rt_mutex_free_waiter(&waiter); return ret; }
/* * Slow path lock function spin_lock style: this variant is very * careful not to miss any non-lock wakeups. * * The wakeup side uses wake_up_process_mutex, which, combined with * the xchg code of this function is a transparent sleep/wakeup * mechanism nested within any existing sleep/wakeup mechanism. This * enables the seemless use of arbitrary (blocking) spinlocks within * sleep/wakeup event loops. */ static void fastcall noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock) { struct rt_mutex_waiter waiter; unsigned long saved_state, state, flags; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { spin_unlock_irqrestore(&lock->wait_lock, flags); return; } BUG_ON(rt_mutex_owner(lock) == current); /* * Here we save whatever state the task was in originally, * we'll restore it at the end of the function and we'll take * any intermediate wakeup into account as well, independently * of the lock sleep/wakeup mechanism. When we get a real * wakeup the task->state is TASK_RUNNING and we change * saved_state accordingly. If we did not get a real wakeup * then we return with the saved state. */ saved_state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); for (;;) { unsigned long saved_flags; int saved_lock_depth = current->lock_depth; /* Try to acquire the lock */ if (try_to_take_rt_mutex(lock)) break; /* * waiter.task is NULL the first time we come here and * when we have been woken up by the previous owner * but the lock got stolen by an higher prio task. */ if (!waiter.task) { task_blocks_on_rt_mutex(lock, &waiter, 0, flags); /* Wakeup during boost ? */ if (unlikely(!waiter.task)) continue; } /* * Prevent schedule() to drop BKL, while waiting for * the lock ! We restore lock_depth when we come back. */ saved_flags = current->flags & PF_NOSCHED; current->lock_depth = -1; current->flags &= ~PF_NOSCHED; spin_unlock_irqrestore(&lock->wait_lock, flags); debug_rt_mutex_print_deadlock(&waiter); schedule_rt_mutex(lock); spin_lock_irqsave(&lock->wait_lock, flags); current->flags |= saved_flags; current->lock_depth = saved_lock_depth; state = xchg(¤t->state, TASK_UNINTERRUPTIBLE); if (unlikely(state == TASK_RUNNING)) saved_state = TASK_RUNNING; } state = xchg(¤t->state, saved_state); if (unlikely(state == TASK_RUNNING)) current->state = TASK_RUNNING; /* * Extremely rare case, if we got woken up by a non-mutex wakeup, * and we managed to steal the lock despite us not being the * highest-prio waiter (due to SCHED_OTHER changing prio), then we * can end up with a non-NULL waiter.task: */ if (unlikely(waiter.task)) remove_waiter(lock, &waiter, flags); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up: */ fixup_rt_mutex_waiters(lock); spin_unlock_irqrestore(&lock->wait_lock, flags); debug_rt_mutex_free_waiter(&waiter); }
/** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take * @state: the state the task should block in (TASK_INTERRUPTIBLE * or TASK_UNINTERRUPTIBLE) * @timeout: the pre-initialized and started timer, or NULL for none * @waiter: the pre-initialized rt_mutex_waiter * @detect_deadlock: passed to task_blocks_on_rt_mutex * * lock->wait_lock must be held by the caller. */ static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter, int detect_deadlock) { int ret = 0; for (;;) { /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } /* * waiter->task is NULL the first time we come here and * when we have been woken up by the previous owner * but the lock got stolen by a higher prio task. */ if (!waiter->task) { ret = task_blocks_on_rt_mutex(lock, waiter, current, detect_deadlock); /* * If we got woken up by the owner then start loop * all over without going into schedule to try * to get the lock now: */ if (unlikely(!waiter->task)) { /* * Reset the return value. We might * have returned with -EDEADLK and the * owner released the lock while we * were walking the pi chain. */ ret = 0; continue; } if (unlikely(ret)) break; } raw_spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(waiter); if (waiter->task) schedule_rt_mutex(lock); raw_spin_lock(&lock->wait_lock); set_current_state(state); } return ret; }
/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { int ret = 0, saved_lock_depth = -1; struct rt_mutex_waiter waiter; unsigned long flags; debug_rt_mutex_init_waiter(&waiter); raw_spin_lock_irqsave(&lock->wait_lock, flags); init_lists(lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { raw_spin_unlock_irqrestore(&lock->wait_lock, flags); return 0; } set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) { hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); if (!hrtimer_active(&timeout->timer)) timeout->task = NULL; } ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock, flags, 0); /* * We drop the BKL here before we go into the wait loop to avoid a * possible deadlock in the scheduler. * * Note: This must be done after we call task_blocks_on_rt_mutex * because rt_release_bkl() releases the wait_lock and will * cause a race between setting the mark waiters flag in * the owner field and adding this task to the wait list. Those * two must be done within the protection of the wait_lock. */ if (unlikely(current->lock_depth >= 0)) saved_lock_depth = rt_release_bkl(lock, flags); if (likely(!ret)) ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, flags); set_current_state(TASK_RUNNING); if (unlikely(ret)) remove_waiter(lock, &waiter, flags); BUG_ON(!plist_node_empty(&waiter.list_entry)); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); /* Must we reaquire the BKL? */ if (unlikely(saved_lock_depth >= 0)) rt_reacquire_bkl(saved_lock_depth); debug_rt_mutex_free_waiter(&waiter); return ret; }
/* * Slow path lock function: */ static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, int detect_deadlock) { struct rt_mutex_waiter waiter; int ret = 0; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; spin_lock(&lock->wait_lock); /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock)) { spin_unlock(&lock->wait_lock); return 0; } set_current_state(state); /* Setup the timer, when timeout != NULL */ if (unlikely(timeout)) hrtimer_start(&timeout->timer, timeout->timer.expires, HRTIMER_MODE_ABS); for (;;) { /* Try to acquire the lock: */ if (try_to_take_rt_mutex(lock)) break; /* * TASK_INTERRUPTIBLE checks for signals and * timeout. Ignored otherwise. */ if (unlikely(state == TASK_INTERRUPTIBLE)) { /* Signal pending? */ if (signal_pending(current)) ret = -EINTR; if (timeout && !timeout->task) ret = -ETIMEDOUT; if (ret) break; } /* * waiter.task is NULL the first time we come here and * when we have been woken up by the previous owner * but the lock got stolen by a higher prio task. */ if (!waiter.task) { ret = task_blocks_on_rt_mutex(lock, &waiter, detect_deadlock); /* * If we got woken up by the owner then start loop * all over without going into schedule to try * to get the lock now: */ if (unlikely(!waiter.task)) { /* * Reset the return value. We might * have returned with -EDEADLK and the * owner released the lock while we * were walking the pi chain. */ ret = 0; continue; } if (unlikely(ret)) break; } spin_unlock(&lock->wait_lock); debug_rt_mutex_print_deadlock(&waiter); if (waiter.task) schedule_rt_mutex(lock); spin_lock(&lock->wait_lock); set_current_state(state); } set_current_state(TASK_RUNNING); if (unlikely(waiter.task)) remove_waiter(lock, &waiter); /* * try_to_take_rt_mutex() sets the waiter bit * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); spin_unlock(&lock->wait_lock); /* Remove pending timer: */ if (unlikely(timeout)) hrtimer_cancel(&timeout->timer); /* * Readjust priority, when we did not get the lock. We might * have been the pending owner and boosted. Since we did not * take the lock, the PI boost has to go. */ if (unlikely(ret)) rt_mutex_adjust_prio(current); debug_rt_mutex_free_waiter(&waiter); return ret; }