Example #1
0
/*
 * release a write lock
 */
void up_write(struct rw_semaphore *sem)
{
	rwsem_release(&sem->dep_map, 1, _RET_IP_);

	rwsem_clear_owner(sem);
	__up_write(sem);
}
Example #2
0
STATIC int
xfs_setfilesize_trans_alloc(
	struct xfs_ioend	*ioend)
{
	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
	struct xfs_trans	*tp;
	int			error;

	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);

	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
	if (error) {
		xfs_trans_cancel(tp, 0);
		return error;
	}

	ioend->io_append_trans = tp;

	/*
	 * We may pass freeze protection with a transaction.  So tell lockdep
	 * we released it.
	 */
	rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
		      1, _THIS_IP_);
	/*
	 * We hand off the transaction to the completion thread now, so
	 * clear the flag here.
	 */
	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
	return 0;
}
Example #3
0
/*! 2016.10.15 study -ing */
void up_write(struct rw_semaphore *sem)
{
	/*! Do Nothing  */
	rwsem_release(&sem->dep_map, 1, _RET_IP_);

	__up_write(sem);
}
Example #4
0
/**
 *	kernfs_deactivate - deactivate kernfs_node
 *	@kn: kernfs_node to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void kernfs_deactivate(struct kernfs_node *kn)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(!(kn->flags & KERNFS_REMOVED));

	if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
		return;

	kn->u.completion = (void *)&wait;

	if (kn->flags & KERNFS_LOCKDEP)
		rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated kn->u.completion.
	 */
	v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);

	if (v != KN_DEACTIVATED_BIAS) {
		if (kn->flags & KERNFS_LOCKDEP)
			lock_contended(&kn->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	if (kn->flags & KERNFS_LOCKDEP) {
		lock_acquired(&kn->dep_map, _RET_IP_);
		rwsem_release(&kn->dep_map, 1, _RET_IP_);
	}
}
/**
 *	sysfs_deactivate - deactivate sysfs_dirent
 *	@sd: sysfs_dirent to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void sysfs_deactivate(struct sysfs_dirent *sd)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(!(sd->s_flags & SYSFS_FLAG_REMOVED));

	if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
		return;

	sd->u.completion = (void *)&wait;

	rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated sd->u.completion.
	 */
	v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);

	if (v != SD_DEACTIVATED_BIAS) {
		lock_contended(&sd->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	lock_acquired(&sd->dep_map, _RET_IP_);
	rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
Example #6
0
/*
 * lock for writing
 */
int __sched down_write_killable(struct rw_semaphore *sem)
{
	might_sleep();
	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);

	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
		rwsem_release(&sem->dep_map, 1, _RET_IP_);
		return -EINTR;
	}

	rwsem_set_owner(sem);
	return 0;
}
Example #7
0
int
rwsem_tryupgrade(struct rw_semaphore *rwsem)
{
	if (__rwsem_tryupgrade(rwsem)) {
		rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
		rwsem->owner = current;
#endif
		return (1);
	}
	return (0);
}
Example #8
0
/**
 *	kernfs_put_active - put an active reference to kernfs_node
 *	@kn: kernfs_node to put an active reference to
 *
 *	Put an active reference to @kn.  This function is noop if @kn
 *	is NULL.
 */
void kernfs_put_active(struct kernfs_node *kn)
{
	struct kernfs_root *root = kernfs_root(kn);
	int v;

	if (unlikely(!kn))
		return;

	if (kernfs_lockdep(kn))
		rwsem_release(&kn->dep_map, 1, _RET_IP_);
	v = atomic_dec_return(&kn->active);
	if (likely(v != KN_DEACTIVATED_BIAS))
		return;

	wake_up_all(&root->deactivate_waitq);
}
Example #9
0
void fastcall rt_up_read(struct rw_semaphore *rwsem)
{
	unsigned long flags;

	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
	/*
	 * Read locks within the self-held write lock succeed.
	 */
	spin_lock_irqsave(&rwsem->lock.wait_lock, flags);
	if (rt_mutex_real_owner(&rwsem->lock) == current && rwsem->read_depth) {
		spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
		rwsem->read_depth--;
		return;
	}
	spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags);
	rt_mutex_unlock(&rwsem->lock);
}
/**
 *	sysfs_put_active - put an active reference to sysfs_dirent
 *	@sd: sysfs_dirent to put an active reference to
 *
 *	Put an active reference to @sd.  This function is noop if @sd
 *	is NULL.
 */
void sysfs_put_active(struct sysfs_dirent *sd)
{
	int v;

	if (unlikely(!sd))
		return;

	if (likely(!sysfs_ignore_lockdep(sd)))
		rwsem_release(&sd->dep_map, 1, _RET_IP_);
	v = atomic_dec_return(&sd->s_active);
	if (likely(v != SD_DEACTIVATED_BIAS))
		return;

	/* atomic_dec_return() is a mb(), we'll always see the updated
	 * sd->u.completion.
	 */
	complete(sd->u.completion);
}
Example #11
0
/**
 *	kernfs_put_active - put an active reference to kernfs_node
 *	@kn: kernfs_node to put an active reference to
 *
 *	Put an active reference to @kn.  This function is noop if @kn
 *	is NULL.
 */
void kernfs_put_active(struct kernfs_node *kn)
{
	int v;

	if (unlikely(!kn))
		return;

	if (kn->flags & KERNFS_LOCKDEP)
		rwsem_release(&kn->dep_map, 1, _RET_IP_);
	v = atomic_dec_return(&kn->active);
	if (likely(v != KN_DEACTIVATED_BIAS))
		return;

	/*
	 * atomic_dec_return() is a mb(), we'll always see the updated
	 * kn->u.completion.
	 */
	complete(kn->u.completion);
}
Example #12
0
/**
 *	sysfs_put_active - put an active reference to sysfs_dirent
 *	@sd: sysfs_dirent to put an active reference to
 *
 *	Put an active reference to @sd.  This function is noop if @sd
 *	is NULL.
 */
void sysfs_put_active(struct sysfs_dirent *sd)
{
	struct completion *cmpl;
	int v;

	if (unlikely(!sd))
		return;

	rwsem_release(&sd->dep_map, 1, _RET_IP_);
	v = atomic_dec_return(&sd->s_active);
	if (likely(v != SD_DEACTIVATED_BIAS))
		return;

	/* atomic_dec_return() is a mb(), we'll always see the updated
	 * sd->s_sibling.
	 */
	cmpl = (void *)sd->s_sibling;
	complete(cmpl);
}
Example #13
0
/*
 * release a read lock
 */
void up_read(struct rw_semaphore *sem)
{
	rwsem_release(&sem->dep_map, 1, _RET_IP_);

	__up_read(sem);
}
Example #14
0
void  rt_up_read(struct rw_semaphore *rwsem)
{
	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
	if (--rwsem->read_depth == 0)
		rt_mutex_unlock(&rwsem->lock);
}
Example #15
0
void  rt_up_write(struct rw_semaphore *rwsem)
{
	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
	rt_mutex_unlock(&rwsem->lock);
}