int fastcall rt_down_read_trylock(struct rw_semaphore *rwsem) { unsigned long flags; int ret; /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rwsem->read_depth++; /* * NOTE: we handle it as a write-lock: */ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); return 1; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); ret = rt_mutex_trylock(&rwsem->lock); if (ret) rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc rt_read_trylock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; unsigned long flags; int ret; /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&lock->wait_lock, flags); if (rt_mutex_real_owner(lock) == current) { spin_unlock_irqrestore(&lock->wait_lock, flags); rwlock->read_depth++; /* * NOTE: we handle it as a write-lock: */ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); return 1; } spin_unlock_irqrestore(&lock->wait_lock, flags); ret = rt_mutex_trylock(lock); if (ret) rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); return ret; }
int rt_down_write_trylock(struct rw_semaphore *rwsem) { int ret = rt_mutex_trylock(&rwsem->lock); if (ret) rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc rt_spin_trylock(spinlock_t *lock) { int ret = rt_mutex_trylock(&lock->lock); if (ret) spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return ret; }
/* * rwlock_t functions */ int __lockfunc rt_write_trylock(rwlock_t *rwlock) { int ret = rt_mutex_trylock(&rwlock->lock); if (ret) rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc _mutex_trylock(struct mutex *lock) { int ret = rt_mutex_trylock(&lock->lock); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); return ret; }
int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) { int ret; *flags = 0; ret = rt_mutex_trylock(&lock->lock); if (ret) spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return ret; }
static int i2c_parent_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct i2c_mux_priv *priv = adapter->algo_data; struct i2c_adapter *parent = priv->muxc->parent; if (!rt_mutex_trylock(&parent->mux_lock)) return 0; /* mux_lock not locked, failure */ if (i2c_trylock_bus(parent, flags)) return 1; /* parent locked too, success */ rt_mutex_unlock(&parent->mux_lock); return 0; /* parent not locked, failure */ }
/* * try to down the semaphore, 0 on success and 1 on failure. (inverted) */ int rt_down_trylock(struct semaphore *sem) { /* * Here we are a tiny bit different from ordinary Linux semaphores, * because we can get 'transient' locking-failures when say a * process decreases the count from 9 to 8 and locks/releases the * embedded mutex internally. It would be quite complex to remove * these transient failures so lets try it the simple way first: */ if (rt_mutex_trylock(&sem->lock)) { __down_complete(sem); return 0; } return 1; }
int rt_down_read_trylock(struct rw_semaphore *rwsem) { struct rt_mutex *lock = &rwsem->lock; int ret = 1; /* * recursive read locks succeed when current owns the rwsem, * but not when read_depth == 0 which means that the rwsem is * write locked. */ if (rt_mutex_real_owner(lock) != current) ret = rt_mutex_trylock(&rwsem->lock); else if (!rwsem->read_depth) ret = 0; if (ret) { rwsem->read_depth++; rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); } return ret; }
/*************I2C functions*******************/ static int ds2482_get_i2c_bus(struct i2c_client *client) { struct i2c_adapter *adap = client->adapter; int ret; if (adap->algo->master_xfer) { if (in_atomic() || irqs_disabled()) { ret = rt_mutex_trylock(&adap->bus_lock); if (!ret) /* I2C activity is ongoing. */ return -EAGAIN; } else { rt_mutex_lock(&adap->bus_lock); } return 0; } else { dev_err(&client->dev, "I2C level transfers not supported\n"); return -EOPNOTSUPP; } }
int __lockfunc rt_read_trylock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; int ret = 1; /* * recursive read locks succeed when current owns the lock, * but not when read_depth == 0 which means that the lock is * write locked. */ if (rt_mutex_real_owner(lock) != current) ret = rt_mutex_trylock(lock); else if (!rwlock->read_depth) ret = 0; if (ret) { rwlock->read_depth++; rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_); } return ret; }