int fastcall rt_down_read_trylock(struct rw_semaphore *rwsem) { unsigned long flags; int ret; /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rwsem->read_depth++; /* * NOTE: we handle it as a write-lock: */ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); return 1; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); ret = rt_mutex_trylock(&rwsem->lock); if (ret) rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc rt_read_trylock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; unsigned long flags; int ret; /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&lock->wait_lock, flags); if (rt_mutex_real_owner(lock) == current) { spin_unlock_irqrestore(&lock->wait_lock, flags); rwlock->read_depth++; /* * NOTE: we handle it as a write-lock: */ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); return 1; } spin_unlock_irqrestore(&lock->wait_lock, flags); ret = rt_mutex_trylock(lock); if (ret) rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); return ret; }
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { struct rt_mutex *lock = &rwsem->lock; rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); if (rt_mutex_real_owner(lock) != current) rt_mutex_lock(&rwsem->lock); rwsem->read_depth++; }
void __lockfunc rt_read_lock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); /* * recursive read locks succeed when current owns the lock */ if (rt_mutex_real_owner(lock) != current) __rt_spin_lock(lock); rwlock->read_depth++; }
void fastcall rt_up_read_non_owner(struct rw_semaphore *rwsem) { unsigned long flags; /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rwsem->read_depth--; return; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rt_mutex_unlock(&rwsem->lock); }
void __lockfunc rt_read_lock(rwlock_t *rwlock) { unsigned long flags; struct rt_mutex *lock = &rwlock->lock; rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); /* * Read locks within the write lock succeed. */ spin_lock_irqsave(&lock->wait_lock, flags); if (rt_mutex_real_owner(lock) == current) { spin_unlock_irqrestore(&lock->wait_lock, flags); rwlock->read_depth++; return; } spin_unlock_irqrestore(&lock->wait_lock, flags); __rt_spin_lock(lock); }
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { unsigned long flags; rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); /* * Read locks within the write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rwsem->read_depth++; return; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rt_mutex_lock(&rwsem->lock); }
void __lockfunc rt_read_unlock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; unsigned long flags; rwlock_release(&rwlock->dep_map, 1, _RET_IP_); // TRACE_WARN_ON(lock->save_state != 1); /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&lock->wait_lock, flags); if (rt_mutex_real_owner(lock) == current && rwlock->read_depth) { spin_unlock_irqrestore(&lock->wait_lock, flags); rwlock->read_depth--; return; } spin_unlock_irqrestore(&lock->wait_lock, flags); __rt_spin_unlock(&rwlock->lock); }
int rt_down_read_trylock(struct rw_semaphore *rwsem) { struct rt_mutex *lock = &rwsem->lock; int ret = 1; /* * recursive read locks succeed when current owns the rwsem, * but not when read_depth == 0 which means that the rwsem is * write locked. */ if (rt_mutex_real_owner(lock) != current) ret = rt_mutex_trylock(&rwsem->lock); else if (!rwsem->read_depth) ret = 0; if (ret) { rwsem->read_depth++; rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); } return ret; }
int __lockfunc rt_read_trylock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; int ret = 1; /* * recursive read locks succeed when current owns the lock, * but not when read_depth == 0 which means that the lock is * write locked. */ if (rt_mutex_real_owner(lock) != current) ret = rt_mutex_trylock(lock); else if (!rwlock->read_depth) ret = 0; if (ret) { rwlock->read_depth++; rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); } return ret; }
void fastcall rt_down_read(struct rw_semaphore *rwsem) { unsigned long flags; /* * NOTE: we handle it as a write-lock: */ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); /* * Read locks within the write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); /* TODO: lockdep: acquire-read here? */ rwsem->read_depth++; return; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rt_mutex_lock(&rwsem->lock); }
/* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ void rt_downgrade_write(struct rw_semaphore *rwsem) { BUG_ON(rt_mutex_real_owner(&rwsem->lock) != current); rwsem->read_depth = 1; }