Esempio n. 1
0
STATIC void
vn_wakeup(
	struct vnode	*vp)
{
	VN_LOCK(vp);
	if (vp->v_flag & VWAIT)
		sv_broadcast(vptosync(vp));
	vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
	VN_UNLOCK(vp, 0);
}
Esempio n. 2
0
static int sv_test_2_s_1(void *arg) 
{
	int i;
	sv_t *sv = (sv_t *)arg;

	down(&monitor);
	for(i = 0; i < 3; i++) {
		printk("sv_test_2_s_1: waking one thread.\n");
		sv_signal(sv);
		down(&sem);
	}

	printk("sv_test_2_s_1: signaling and broadcasting again.  Nothing should happen.\n");
	sv_signal(sv);
	sv_broadcast(sv);
	sv_signal(sv);
	sv_broadcast(sv);

	printk("sv_test_2_s_1: talkbacking.\n");
	up(&talkback);
	up(&monitor);
	return 0;
}
Esempio n. 3
0
/* ARGSUSED */
STATIC void
xfs_qm_dquot_logitem_unpin(
	xfs_dq_logitem_t *logitem,
	int		  stale)
{
	xfs_dquot_t *dqp;

	dqp = logitem->qli_dquot;
	ASSERT(dqp->q_pincount > 0);
	spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
	dqp->q_pincount--;
	if (dqp->q_pincount == 0) {
		sv_broadcast(&dqp->q_pinwait);
	}
	spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
}
Esempio n. 4
0
/*
 * Decrement the pin count of the given dquot, and wake up
 * anyone in xfs_dqwait_unpin() if the count goes to 0.  The
 * dquot must have been previously pinned with a call to xfs_dqpin().
 */
STATIC void
xfs_qm_dquot_logitem_unpin(
	xfs_dq_logitem_t *logitem)
{
	int 	s;
	xfs_dquot_t *dqp;

	dqp = logitem->qli_dquot;
	ASSERT(dqp->q_pincount > 0);
	s = XFS_DQ_PINLOCK(dqp);
	dqp->q_pincount--;
	if (dqp->q_pincount == 0) {
		sv_broadcast(&dqp->q_pinwait);
	}
	XFS_DQ_PINUNLOCK(dqp, s);
}
Esempio n. 5
0
/*
 *  Call VOP_INACTIVE on last reference.
 */
void
vn_rele(
	struct vnode	*vp)
{
	int		vcnt;
	int		cache;

	XFS_STATS_INC(vn_rele);

	VN_LOCK(vp);

	vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
	vcnt = vn_count(vp);

	/*
	 * Since we always get called from put_inode we know
	 * that i_count won't be decremented after we
	 * return.
	 */
	if (!vcnt) {
		/*
		 * As soon as we turn this on, noone can find us in vn_get
		 * until we turn off VINACT or VRECLM
		 */
		vp->v_flag |= VINACT;
		VN_UNLOCK(vp, 0);

		/*
		 * Do not make the VOP_INACTIVE call if there
		 * are no behaviors attached to the vnode to call.
		 */
		if (vp->v_fbhv)
			VOP_INACTIVE(vp, NULL, cache);

		VN_LOCK(vp);
		if (vp->v_flag & VWAIT)
			sv_broadcast(vptosync(vp));

		vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
	}

	VN_UNLOCK(vp, 0);

	vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
}
Esempio n. 6
0
static int sv_test_2_s(void *arg) 
{
	int i;
	sv_t *sv = (sv_t *)arg;

	down(&monitor);
	for(i = 0; i < 3; i++) {
		printk("sv_test_2_s: waking one thread (should be %d.)\n", i);
		sv_signal(sv);
		down(&sem);
	}

	printk("sv_test_3_s: waking remaining threads with broadcast.\n");
	sv_broadcast(sv);
	for(; i < 10; i++)
		down(&sem);

	printk("sv_test_3_s: sending talkback.\n");
	up(&talkback);

	printk("sv_test_3_s: exiting.\n");
	up(&monitor);
	return 0;
}
Esempio n. 7
0
vnode_t	*
dm_handle_to_vp(
	xfs_handle_t	*handlep,
	short		*typep)
{
	dm_fsreg_t	*fsrp;
	vnode_t		*vp;
	short		type;
	int		lc;			/* lock cookie */
	int		error;
	fid_t		*fidp;

	if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handlep->ha_fsid, &lc)) == NULL)
		return(NULL);

	if (fsrp->fr_state == DM_STATE_MOUNTING) {
		mutex_spinunlock(&fsrp->fr_lock, lc);
		return(NULL);
	}

	for (;;) {
		if (fsrp->fr_state == DM_STATE_MOUNTED)
			break;
		if (fsrp->fr_state == DM_STATE_UNMOUNTED) {
			if (fsrp->fr_unmount && fsrp->fr_hdlcnt == 0)
				sv_broadcast(&fsrp->fr_queue);
			mutex_spinunlock(&fsrp->fr_lock, lc);
			return(NULL);
		}

		/* Must be DM_STATE_UNMOUNTING. */

		fsrp->fr_hdlcnt++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_hdlcnt--;
	}

	fsrp->fr_vfscnt++;
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Now that the mutex is released, wait until we have access to the
	   vnode.
	*/

	fidp = (fid_t*)&handlep->ha_fid;
	if (fidp->fid_len == 0) {	/* filesystem handle */
		VFS_ROOT(fsrp->fr_vfsp, &vp, error);
	} else {				/* file object handle */
		VFS_VGET(fsrp->fr_vfsp, &vp, fidp, error);
	}

	lc = mutex_spinlock(&fsrp->fr_lock);

	fsrp->fr_vfscnt--;
	if (fsrp->fr_unmount && fsrp->fr_vfscnt == 0)
		sv_broadcast(&fsrp->fr_queue);

	mutex_spinunlock(&fsrp->fr_lock, lc);
	if (error || vp == NULL)
		return(NULL);

	if (fidp->fid_len == 0) {
		type = DM_TDT_VFS;
	} else if (vp->v_type == VREG) {
		type = DM_TDT_REG;
	} else if (vp->v_type == VDIR) {
		type = DM_TDT_DIR;
	} else if (vp->v_type == VLNK) {
		type = DM_TDT_LNK;
	} else {
		type = DM_TDT_OTH;
	}
	*typep = type;
	return(vp);
}
Esempio n. 8
0
void
dm_change_fsys_entry(
	vfs_t		*vfsp,
	dm_fsstate_t	newstate)
{
	dm_fsreg_t	*fsrp;
	int		seq_error;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t.  This should
	   always succeed.
	*/

	if ((fsrp = dm_find_fsreg_and_lock(vfsp->vfs_altfsid, &lc)) == NULL) {
		panic("dm_change_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	/* Make sure that the new state is acceptable given the current state
	   of the filesystem.  Any error here is a major DMAPI/filesystem
	   screwup.
	*/

	seq_error = 0;
	switch (newstate) {
	case DM_STATE_MOUNTED:
		if (fsrp->fr_state != DM_STATE_MOUNTING &&
		    fsrp->fr_state != DM_STATE_UNMOUNTING) {
			seq_error++;
		}
		break;
	case DM_STATE_UNMOUNTING:
		if (fsrp->fr_state != DM_STATE_MOUNTED)
			seq_error++;
		break;
	case DM_STATE_UNMOUNTED:
		if (fsrp->fr_state != DM_STATE_UNMOUNTING)
			seq_error++;
		break;
	default:
		seq_error++;
		break;
	}
	if (seq_error) {
		panic("dm_change_fsys_entry: DMAPI sequence error: old state "
			"%d, new state %d, fsrp %p\n", fsrp->fr_state,
			newstate, fsrp);
	}

	/* If the old state was DM_STATE_UNMOUNTING, then processes could be
	   sleeping in dm_handle_to_vp() waiting for their DM_NO_TOKEN handles
	   to be translated to vnodes.  Wake them up so that they either
	   continue (new state is DM_STATE_MOUNTED) or fail (new state is
	   DM_STATE_UNMOUNTED).
	*/

	if (fsrp->fr_state == DM_STATE_UNMOUNTING) {
		if (fsrp->fr_hdlcnt)
			sv_broadcast(&fsrp->fr_queue);
	}

	/* Change the filesystem's mount state to its new value. */

	fsrp->fr_state = newstate;
	fsrp->fr_tevp = NULL;		/* not valid after DM_STATE_MOUNTING */

	/* If the new state is DM_STATE_UNMOUNTING, wait until any application
	   threads currently in the process of making VFS_VGET and VFS_ROOT
	   calls are done before we let this unmount thread continue the
	   unmount.  (We want to make sure that the unmount will see these
	   vnode references during its scan.)
	*/

	if (newstate == DM_STATE_UNMOUNTING) {
		while (fsrp->fr_vfscnt) {
			fsrp->fr_unmount++;
			sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
			lc = mutex_spinlock(&fsrp->fr_lock);
			fsrp->fr_unmount--;
		}
	}

	mutex_spinunlock(&fsrp->fr_lock, lc);
}
Esempio n. 9
0
int
dm_set_return_on_destroy(
	dm_sessid_t	sid,
	void		*hanp,
	size_t		hlen,
	dm_token_t	token,
	dm_attrname_t	*attrnamep,
	dm_boolean_t	enable)
{
	dm_attrname_t	attrname;
	dm_tokdata_t	*tdp;
	dm_fsreg_t      *fsrp;
	dm_session_t	*s;
	int		error;
	int		lc1;		/* first lock cookie */
	int		lc2;		/* second lock cookie */

	/* If a dm_attrname_t is provided, copy it in and validate it. */

	if (enable && (error = copy_from_user(&attrname, attrnamep, sizeof(attrname))) != 0)
		return(error);

	/* Validate the filesystem handle and use it to get the filesystem's
	   disposition structure.
	*/

	error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
		DM_RIGHT_EXCL, &tdp);
        if (error != 0)
                return(error);

	fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc1);
	if (fsrp == NULL) {
		dm_app_put_tdp(tdp);
		return(EINVAL);
	}

	/* Now that we own 'fsrp->fr_lock', get the lock on the session so that
	   it can't disappear while we add it to the filesystem's event mask.
	*/

	if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
		mutex_spinunlock(&fsrp->fr_lock, lc1);
		dm_app_put_tdp(tdp);
		return(error);
	}

	/* A caller cannot disable return-on-destroy if he is not registered
	   for DM_EVENT_DESTROY.  Enabling return-on-destroy is an implicit
	   dm_set_disp() for DM_EVENT_DESTROY; we wake up all processes
	   waiting for a disposition in case any was waiting for a
	   DM_EVENT_DESTROY event.
	*/

	error = 0;
	if (enable) {
		fsrp->fr_sessp[DM_EVENT_DESTROY] = s;
		fsrp->fr_rattr = attrname;
		if (fsrp->fr_dispcnt)
			sv_broadcast(&fsrp->fr_dispq);
	} else if (fsrp->fr_sessp[DM_EVENT_DESTROY] != s) {
		error = EINVAL;
	} else {
		bzero(&fsrp->fr_rattr, sizeof(fsrp->fr_rattr));
	}
	mutex_spinunlock(&s->sn_qlock, lc2);	/* reverse cookie order */
	mutex_spinunlock(&fsrp->fr_lock, lc1);
	dm_app_put_tdp(tdp);
	return(error);
}
Esempio n. 10
0
int
dm_set_disp(
	dm_sessid_t	sid,
	void		*hanp,
	size_t		hlen,
	dm_token_t	token,
	dm_eventset_t	*eventsetp,
	u_int		maxevent)
{
	dm_session_t	*s;
	dm_fsreg_t	*fsrp;
	dm_tokdata_t	*tdp;
	dm_eventset_t	eventset;
	int		error;
	int		lc1;		/* first lock cookie */
	int		lc2;		/* second lock cookie */
	u_int		i;

	/* Copy in and validate the event mask.  Only the lower maxevent bits
	   are meaningful, so clear any bits set above maxevent.
	*/

	if (maxevent == 0 || maxevent > DM_EVENT_MAX)
		return(EINVAL);
	if (copy_from_user(&eventset, eventsetp, sizeof(eventset)))
		return(EFAULT);
	eventset &= (1 << maxevent) - 1;

	/* If the caller specified the global handle, then the only valid token
	   is DM_NO_TOKEN, and the only valid event in the event mask is
	   DM_EVENT_MOUNT.  If it is set, add the session to the list of
	   sessions that want to receive mount events.  If it is clear, remove
	   the session from the list.  Since DM_EVENT_MOUNT events never block
	   waiting for a session to register, there is noone to wake up if we
	   do add the session to the list.
	*/

	if (DM_GLOBALHAN(hanp, hlen)) {
		if (token != DM_NO_TOKEN)
			return(EINVAL);
		if ((error = dm_find_session_and_lock(sid, &s, &lc1)) != 0)
			return(error);
		if (eventset == 0) {
			s->sn_flags &= ~DM_SN_WANTMOUNT;
			error = 0;
		} else if (eventset == 1 << DM_EVENT_MOUNT) {
			s->sn_flags |= DM_SN_WANTMOUNT;
			error = 0;
		} else {
			error = EINVAL;
		}
		mutex_spinunlock(&s->sn_qlock, lc1);
		return(error);
	}

	/* Since it's not the global handle, it had better be a filesystem
	   handle.  Verify that the first 'maxevent' events in the event list
	   are all valid for a filesystem handle.
	*/

	if (eventset & ~DM_VALID_DISP_EVENTS)
		return(EINVAL);

	/* Verify that the session is valid, that the handle is a filesystem
	   handle, and that the filesystem is capable of sending events.  (If
	   a dm_fsreg_t structure exists, then the filesystem can issue events.)
	*/

	error = dm_app_get_tdp(sid, hanp, hlen, token, DM_TDT_VFS,
		DM_RIGHT_EXCL, &tdp);
	if (error != 0)
		return(error);

	fsrp = dm_find_fsreg_and_lock((fsid_t*)&tdp->td_handle.ha_fsid, &lc1);
	if (fsrp == NULL) {
		dm_app_put_tdp(tdp);
		return(EINVAL);
	}

	/* Now that we own 'fsrp->fr_lock', get the lock on the session so that
	   it can't disappear while we add it to the filesystem's event mask.
	*/

	if ((error = dm_find_session_and_lock(sid, &s, &lc2)) != 0) {
		mutex_spinunlock(&fsrp->fr_lock, lc1);
		dm_app_put_tdp(tdp);
		return(error);
	}

	/* Update the event disposition array for this filesystem, adding
	   and/or removing the session as appropriate.  If this session is
	   dropping registration for DM_EVENT_DESTROY, or is overriding some
	   other session's registration for DM_EVENT_DESTROY, then clear any
	   any attr-on-destroy attribute name also.
	*/

	for (i = 0; i < DM_EVENT_MAX; i++) {
		if (DMEV_ISSET(i, eventset)) {
			if (i == DM_EVENT_DESTROY && fsrp->fr_sessp[i] != s)
				bzero(&fsrp->fr_rattr, sizeof(fsrp->fr_rattr));
			fsrp->fr_sessp[i] = s;
		} else if (fsrp->fr_sessp[i] == s) {
			if (i == DM_EVENT_DESTROY)
				bzero(&fsrp->fr_rattr, sizeof(fsrp->fr_rattr));
			fsrp->fr_sessp[i] = NULL;
		}
	}
	mutex_spinunlock(&s->sn_qlock, lc2);	/* reverse cookie order */

	/* Wake up all processes waiting for a disposition on this filesystem
	   in case any of them happen to be waiting for an event which we just
	   added.
	*/

	if (fsrp->fr_dispcnt)
		sv_broadcast(&fsrp->fr_dispq);

	mutex_spinunlock(&fsrp->fr_lock, lc1);

	dm_app_put_tdp(tdp);
	return(0);
}