/** * rt_mutex_proxy_unlock - release a lock on behalf of owner * * @lock: the rt_mutex to be locked * * No locking. Caller has to do serializing itself * Special API call for PI-futex support */ void rt_mutex_proxy_unlock(struct rt_mutex *lock, struct task_struct *proxy_owner) { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); rt_mutex_deadlock_account_unlock(proxy_owner); }
static inline void rt_mutex_fastunlock(struct rt_mutex *lock, void (*slowfn)(struct rt_mutex *lock)) { if (likely(rt_mutex_cmpxchg(lock, current, NULL))) rt_mutex_deadlock_account_unlock(current); else slowfn(lock); }
static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, void (*slowfn)(struct rt_mutex *lock)) { /* Temporary HACK! */ if (unlikely(rt_mutex_owner(lock) != current) && current->in_printk) /* don't grab locks for printk in atomic */ return; if (likely(rt_mutex_cmpxchg(lock, current, NULL))) rt_mutex_deadlock_account_unlock(current); else slowfn(lock); }
/* * Slow path to release a rt-mutex: */ static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { raw_spin_lock(&lock->wait_lock); debug_rt_mutex_unlock(lock); rt_mutex_deadlock_account_unlock(current); if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; raw_spin_unlock(&lock->wait_lock); return; } wakeup_next_waiter(lock); raw_spin_unlock(&lock->wait_lock); /* Undo pi boosting if necessary: */ rt_mutex_adjust_prio(current); }
/* * Slow path to release a rt_mutex spin_lock style */ static void fastcall noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) { unsigned long flags; spin_lock_irqsave(&lock->wait_lock, flags); debug_rt_mutex_unlock(lock); rt_mutex_deadlock_account_unlock(current); if (!rt_mutex_has_waiters(lock)) { lock->owner = NULL; spin_unlock_irqrestore(&lock->wait_lock, flags); return; } wakeup_next_waiter(lock, 1); spin_unlock_irqrestore(&lock->wait_lock, flags); /* Undo pi boosting.when necessary */ rt_mutex_adjust_prio(current); }