void __lockfunc rt_read_unlock(rwlock_t *rwlock) { rwlock_release(&rwlock->dep_map, 1, _RET_IP_); /* Release the lock only when read_depth is down to 0 */ if (--rwlock->read_depth == 0) __rt_spin_unlock(&rwlock->lock); }
void __lockfunc rt_read_unlock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; unsigned long flags; rwlock_release(&rwlock->dep_map, 1, _RET_IP_); // TRACE_WARN_ON(lock->save_state != 1); /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&lock->wait_lock, flags); if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) { spin_unlock_irqrestore(&lock->wait_lock, flags); rwlock->read_depth--; return; } spin_unlock_irqrestore(&lock->wait_lock, flags); __rt_spin_unlock(&rwlock->lock); }
void __lockfunc rt_write_unlock(rwlock_t *rwlock) { /* NOTE: we always pass in '1' for nested, for simplicity */ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); __rt_spin_unlock(&rwlock->lock); }