Example #1
0
int fastcall rt_down_read_trylock(struct rw_semaphore *rwsem)
{
	unsigned long flags;
	int ret;

	/*
	 * Read locks within the self-held write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);
	if (rt_mutex_real_owner(&rwsem->lock) == current) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		rwsem->read_depth++;
		/*
		 * NOTE: we handle it as a write-lock:
		 */
		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
		return 1;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);

	ret = rt_mutex_trylock(&rwsem->lock);
	if (ret)
		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
	return ret;
}
Example #2
0
/*
 * lock for writing
 */
void __sched down_write(struct rw_semaphore *sem)
{
	might_sleep();
	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);

	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
Example #3
0
void down_write_nested(struct rw_semaphore *sem, int subclass)
{
	might_sleep();
	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);

	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
Example #4
0
/**
 *	kernfs_deactivate - deactivate kernfs_node
 *	@kn: kernfs_node to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void kernfs_deactivate(struct kernfs_node *kn)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(!(kn->flags & KERNFS_REMOVED));

	if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
		return;

	kn->u.completion = (void *)&wait;

	if (kn->flags & KERNFS_LOCKDEP)
		rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated kn->u.completion.
	 */
	v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);

	if (v != KN_DEACTIVATED_BIAS) {
		if (kn->flags & KERNFS_LOCKDEP)
			lock_contended(&kn->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	if (kn->flags & KERNFS_LOCKDEP) {
		lock_acquired(&kn->dep_map, _RET_IP_);
		rwsem_release(&kn->dep_map, 1, _RET_IP_);
	}
}
/**
 *	sysfs_deactivate - deactivate sysfs_dirent
 *	@sd: sysfs_dirent to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void sysfs_deactivate(struct sysfs_dirent *sd)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(!(sd->s_flags & SYSFS_FLAG_REMOVED));

	if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
		return;

	sd->u.completion = (void *)&wait;

	rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated sd->u.completion.
	 */
	v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);

	if (v != SD_DEACTIVATED_BIAS) {
		lock_contended(&sd->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	lock_acquired(&sd->dep_map, _RET_IP_);
	rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
Example #6
0
/*
 * trylock for writing -- returns 1 if successful, 0 if contention
 */
int down_write_trylock(struct rw_semaphore *sem)
{
	int ret = __down_write_trylock(sem);

	if (ret == 1)
		rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
	return ret;
}
Example #7
0
int  rt_down_write_trylock(struct rw_semaphore *rwsem)
{
	int ret = rt_mutex_trylock(&rwsem->lock);

	if (ret)
		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
	return ret;
}
Example #8
0
int down_write_trylock_nested(struct rw_semaphore *sem, int subclass)
{
	int ret = __down_write_trylock(sem);

	if (ret == 1)
		rwsem_acquire(&sem->dep_map, subclass, 1, _RET_IP_);
	return ret;
}
Example #9
0
/*
 * lock for writing
 */
int __sched down_write_killable(struct rw_semaphore *sem)
{
	might_sleep();
	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);

	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
		rwsem_release(&sem->dep_map, 1, _RET_IP_);
		return -EINTR;
	}

	rwsem_set_owner(sem);
	return 0;
}
Example #10
0
int
rwsem_tryupgrade(struct rw_semaphore *rwsem)
{
	if (__rwsem_tryupgrade(rwsem)) {
		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
		rwsem->owner = current;
#endif
		return (1);
	}
	return (0);
}
Example #11
0
/**
 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
 * @kn: the self kernfs_node
 *
 * If kernfs_break_active_protection() was called, this function must be
 * invoked before finishing the kernfs operation.  Note that while this
 * function restores the active reference, it doesn't and can't actually
 * restore the active protection - @kn may already or be in the process of
 * being removed.  Once kernfs_break_active_protection() is invoked, that
 * protection is irreversibly gone for the kernfs operation instance.
 *
 * While this function may be called at any point after
 * kernfs_break_active_protection() is invoked, its most useful location
 * would be right before the enclosing kernfs operation returns.
 */
void kernfs_unbreak_active_protection(struct kernfs_node *kn)
{
	/*
	 * @kn->active could be in any state; however, the increment we do
	 * here will be undone as soon as the enclosing kernfs operation
	 * finishes and this temporary bump can't break anything.  If @kn
	 * is alive, nothing changes.  If @kn is being deactivated, the
	 * soon-to-follow put will either finish deactivation or restore
	 * deactivated state.  If @kn is already removed, the temporary
	 * bump is guaranteed to be gone before @kn is released.
	 */
	atomic_inc(&kn->active);
	if (kernfs_lockdep(kn))
		rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
}
Example #12
0
int  rt_down_read_trylock(struct rw_semaphore *rwsem)
{
	struct rt_mutex *lock = &rwsem->lock;
	int ret = 1;

	/*
	 * recursive read locks succeed when current owns the rwsem,
	 * but not when read_depth == 0 which means that the rwsem is
	 * write locked.
	 */
	if (rt_mutex_real_owner(lock) != current)
		ret = rt_mutex_trylock(&rwsem->lock);
	else if (!rwsem->read_depth)
		ret = 0;

	if (ret) {
		rwsem->read_depth++;
		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
	}
	return ret;
}
Example #13
0
void fastcall rt_down_read(struct rw_semaphore *rwsem)
{
	unsigned long flags;

	/*
	 * NOTE: we handle it as a write-lock:
	 */
	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);

	/*
	 * Read locks within the write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);

	if (rt_mutex_real_owner(&rwsem->lock) == current) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		/* TODO: lockdep: acquire-read here? */
		rwsem->read_depth++;
		return;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
	rt_mutex_lock(&rwsem->lock);
}
Example #14
0
void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
{
	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
	rt_mutex_lock(&rwsem->lock);
}
Example #15
0
void  rt_down_write(struct rw_semaphore *rwsem)
{
	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
	rt_mutex_lock(&rwsem->lock);
}