示例#1
0
/*
 * Build directory vnodes based on the profile and the global
 * dev instance.
 */
void
prof_filldir(sdev_node_t *ddv)
{
	sdev_node_t *gdir;

	ASSERT(RW_READ_HELD(&ddv->sdev_contents));

	if (!prof_dev_needupdate(ddv)) {
		ASSERT(RW_READ_HELD(&ddv->sdev_contents));
		return;
	}
	/*
	 * Upgrade to writer lock
	 */
	if (rw_tryupgrade(&ddv->sdev_contents) == 0) {
		/*
		 * We need to drop the read lock and re-acquire it as a
		 * write lock. While we do this the condition may change so we
		 * need to re-check condition
		 */
		rw_exit(&ddv->sdev_contents);
		rw_enter(&ddv->sdev_contents, RW_WRITER);
		if (!prof_dev_needupdate(ddv)) {
			/* Downgrade back to the read lock before returning */
			rw_downgrade(&ddv->sdev_contents);
			return;
		}
	}
	/* At this point we should have a write lock */
	ASSERT(RW_WRITE_HELD(&ddv->sdev_contents));

	sdcmn_err10(("devtree_gen (%s): %ld -> %ld\n",
	    ddv->sdev_path, ddv->sdev_devtree_gen, devtree_gen));

	gdir = ddv->sdev_origin;

	if (gdir != NULL)
		sdcmn_err10(("sdev_dir_gen (%s): %ld -> %ld\n",
		    ddv->sdev_path, ddv->sdev_ldir_gen,
		    gdir->sdev_gdir_gen));

	/* update flags and generation number so next filldir is quick */
	if ((ddv->sdev_flags & SDEV_BUILD) == SDEV_BUILD) {
		ddv->sdev_flags &= ~SDEV_BUILD;
	}
	ddv->sdev_devtree_gen = devtree_gen;
	if (gdir != NULL)
		ddv->sdev_ldir_gen = gdir->sdev_gdir_gen;

	prof_make_symlinks(ddv);
	prof_make_maps(ddv);
	prof_make_names(ddv);
	rw_downgrade(&ddv->sdev_contents);
}
示例#2
0
static int
splat_rwlock_test6(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	int rc = -EINVAL;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	rw_enter(&rwp->rw_rwlock, RW_READER);
	if (!RW_READ_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
		             "rwlock should be read lock: %d\n",
			     RW_READ_HELD(&rwp->rw_rwlock));
		goto out;
	}
#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
	/* With one reader upgrade should never fail */
	rc = rw_tryupgrade(&rwp->rw_rwlock);
	if (!rc) {
		splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME,
			     "rwlock contended preventing upgrade: %d\n",
			     RW_READ_HELD(&rwp->rw_rwlock));
		goto out;
	}

	if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should "
			   "have 0 (not %d) reader and 1 (not %d) writer\n",
			   RW_READ_HELD(&rwp->rw_rwlock),
			   RW_WRITE_HELD(&rwp->rw_rwlock));
		goto out;
	}

	rc = 0;
	splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
		     "rwlock properly upgraded\n");
#else
        rc = 0;
        splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s",
                "rw_tryupgrade() is disabled for this arch\n");
#endif

out:
	rw_exit(&rwp->rw_rwlock);
	rw_destroy(&rwp->rw_rwlock);
	kfree(rwp);

	return rc;
}
示例#3
0
static g_cache_ptr
get_g_cache(void)
{
	g_cache_ptr gc;

	/* always enter with a READ LOCK and exit with one too */
	ASSERT(RW_READ_HELD(&g_cache_lock));
	if ((gc = groups_cache) != 0) {
		return (gc);
	}
	(void) rw_unlock(&g_cache_lock);

	/* write lock the cache and try again */
	(void) rw_wrlock(&g_cache_lock);
	if ((gc = groups_cache) != 0) {
		(void) rw_unlock(&g_cache_lock);
		(void) rw_rdlock(&g_cache_lock);
		return (gc);
	}

	gc = groups_cache = calloc(1, sizeof (*groups_cache));
	if (groups_cache == 0) {
		(void) rw_unlock(&g_cache_lock);
		(void) rw_rdlock(&g_cache_lock);
		return (0);
	}
	(void) rw_unlock(&g_cache_lock);
	(void) rw_rdlock(&g_cache_lock);
	return (gc);
}
示例#4
0
/* ARGSUSED2 */
static int
xmem_read(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cred,
	struct caller_context *ct)
{
	struct xmemnode *xp = (struct xmemnode *)VTOXN(vp);
	struct xmount *xm = (struct xmount *)VTOXM(vp);
	int error;

	/*
	 * We don't currently support reading non-regular files
	 */
	if (vp->v_type != VREG)
		return (EINVAL);
	/*
	 * xmem_rwlock should have already been called from layers above
	 */
	ASSERT(RW_READ_HELD(&xp->xn_rwlock));

	rw_enter(&xp->xn_contents, RW_READER);

	error = rdxmem(xm, xp, uiop, ct);

	rw_exit(&xp->xn_contents);

	return (error);
}
示例#5
0
/*ARGSUSED*/
static int
devfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
	struct caller_context *ct)
{
	dcmn_err2(("devfs_read %s\n", VTODV(vp)->dv_name));
	ASSERT(vp->v_type == VDIR);
	ASSERT(RW_READ_HELD(&VTODV(vp)->dv_contents));
	return (EISDIR);
}
示例#6
0
/*
 * Previous sequence for active path change
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_cmn_prechg(queue_t *q, mblk_t *mp, int rw_flag, mblk_t **term_mp,
    int *term_ioctl, int *term_stat)
{

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if (oplmsu_uinst->tcsets_p != NULL) {
		struct iocblk	*iocp;

		if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tcsets_p,
		    rw_flag) == -1) {
			return (FAILURE);
		}

		iocp = (struct iocblk *)(*term_mp)->b_rptr;
		*term_ioctl = iocp->ioc_cmd;
		*term_stat = MSU_WTCS_ACK;
	} else if (oplmsu_uinst->tiocmset_p != NULL) {
		if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocmset_p,
		    rw_flag) == -1) {
			return (FAILURE);
		}

		*term_ioctl = TIOCMSET;
		*term_stat = MSU_WTMS_ACK;
	} else if (oplmsu_uinst->tiocspps_p != NULL) {
		if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocspps_p,
		    rw_flag) == -1) {
			return (FAILURE);
		}

		*term_ioctl = TIOCSPPS;
		*term_stat = MSU_WPPS_ACK;
	} else if (oplmsu_uinst->tiocswinsz_p != NULL) {
		if (oplmsu_cmn_copymb(q, mp, term_mp,
		    oplmsu_uinst->tiocswinsz_p, rw_flag) == -1) {
			return (FAILURE);
		}

		*term_ioctl = TIOCSWINSZ;
		*term_stat = MSU_WWSZ_ACK;
	} else if (oplmsu_uinst->tiocssoftcar_p != NULL) {
		if (oplmsu_cmn_copymb(q, mp, term_mp,
		    oplmsu_uinst->tiocssoftcar_p, rw_flag) == -1) {
			return (FAILURE);
		}

		*term_ioctl = TIOCSSOFTCAR;
		*term_stat = MSU_WCAR_ACK;
	} else {
		*term_stat = MSU_WPTH_CHG;
		*term_mp = NULL;
	}
	return (SUCCESS);
}
示例#7
0
/*
 * Through message handle for read side stream
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_rcmn_through_hndl(queue_t *q, mblk_t *mp, int pri_flag)
{
	lpath_t	*lpath;
	ctrl_t	*ctrl;
	queue_t	*dst_queue = NULL;
	int	act_flag;

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	mutex_enter(&oplmsu_uinst->l_lock);
	lpath = (lpath_t *)q->q_ptr;
	if (lpath->uinst != NULL) {
		act_flag = ACTIVE_RES;
	} else {
		act_flag = NOT_ACTIVE_RES;
	}
	mutex_exit(&oplmsu_uinst->l_lock);

	mutex_enter(&oplmsu_uinst->c_lock);
	if (((ctrl = oplmsu_uinst->user_ctrl) != NULL) &&
	    (((mp->b_datap->db_type == M_IOCACK) ||
	    (mp->b_datap->db_type == M_IOCNAK)) || (act_flag == ACTIVE_RES))) {
		dst_queue = RD(ctrl->queue);
	} else {
		mutex_exit(&oplmsu_uinst->c_lock);
		freemsg(mp);
		return (SUCCESS);
	}

	if (pri_flag == MSU_HIGH) {
		putq(dst_queue, mp);
	} else {
		if (canput(dst_queue)) {
			putq(dst_queue, mp);
		} else {
			/*
			 * Place a normal priority message at the head of
			 * read queue
			 */

			ctrl = (ctrl_t *)dst_queue->q_ptr;
			ctrl->lrq_flag = 1;
			ctrl->lrq_queue = q;
			mutex_exit(&oplmsu_uinst->c_lock);
			putbq(q, mp);
			return (FAILURE);
		}
	}
	mutex_exit(&oplmsu_uinst->c_lock);
	return (SUCCESS);
}
示例#8
0
static int
splat_rwlock_test5(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	int rc = -EINVAL;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	rw_enter(&rwp->rw_rwlock, RW_WRITER);
	if (!RW_WRITE_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
			     "rwlock should be write lock: %d\n",
			     RW_WRITE_HELD(&rwp->rw_rwlock));
		goto out;
	}

	rw_downgrade(&rwp->rw_rwlock);
	if (!RW_READ_HELD(&rwp->rw_rwlock)) {
		splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME,
			     "rwlock should be read lock: %d\n",
			     RW_READ_HELD(&rwp->rw_rwlock));
		goto out;
	}

	rc = 0;
	splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s",
		     "rwlock properly downgraded\n");
out:
	rw_exit(&rwp->rw_rwlock);
	rw_destroy(&rwp->rw_rwlock);
	kfree(rwp);

	return rc;
}
示例#9
0
/*
 * Copy a message
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_cmn_copymb(queue_t *q, mblk_t *mp, mblk_t **nmp, mblk_t *cmp,
    int rw_flag)
{
	int	rval = SUCCESS;

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if ((*nmp = copymsg(cmp)) == NULL) {
		oplmsu_cmn_bufcall(q, mp, msgsize(cmp), rw_flag);
		rval = FAILURE;
	}
	return (rval);
}
示例#10
0
/*
 * Flush handle for read side stream
 *
 * Requires lock ( M: mandatory  P: prohibited  A: allowed
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : P
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_rcmn_flush_hndl(queue_t *q, mblk_t *mp)
{
	queue_t	*dst_queue = NULL;
	ctrl_t	*ctrl;

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if (*mp->b_rptr & FLUSHR) {
		/* Remove only data messages from read queue */
		flushq(q, FLUSHDATA);
	}

	mutex_enter(&oplmsu_uinst->c_lock);
	if ((ctrl = oplmsu_uinst->user_ctrl) != NULL) {
		dst_queue = RD(ctrl->queue);
		mutex_exit(&oplmsu_uinst->c_lock);

		if (dst_queue != NULL) {
			putq(dst_queue, mp);
		} else {
			if (*mp->b_rptr & FLUSHW) {
				flushq(WR(q), FLUSHDATA);
				*mp->b_rptr &= ~FLUSHR;

				rw_exit(&oplmsu_uinst->lock);
				OPLMSU_TRACE(q, mp, MSU_TRC_LO);
				qreply(q, mp);
				rw_enter(&oplmsu_uinst->lock, RW_READER);
			} else {
				freemsg(mp);
			}
		}
	} else {
		mutex_exit(&oplmsu_uinst->c_lock);
		if (*mp->b_rptr & FLUSHW) {
			flushq(WR(q), FLUSHDATA);
			*mp->b_rptr &= ~FLUSHR;

			rw_exit(&oplmsu_uinst->lock);
			OPLMSU_TRACE(q, mp, MSU_TRC_LO);
			qreply(q, mp);
			rw_enter(&oplmsu_uinst->lock, RW_READER);
		} else {
			freemsg(mp);
		}
	}
}
示例#11
0
/*
 * sppp_dlprsendup()
 *
 * Description:
 *    For any valid promiscuous streams (marked with SPS_PROMISC and its
 *    sps_dlstate is DL_IDLE), send data upstream. The caller is expected
 *    to hold ppa_sib_lock when calling this procedure.
 */
void
sppp_dlprsendup(spppstr_t *sps, mblk_t *mp, t_scalar_t proto, boolean_t header)
{
	sppa_t	*ppa;
	mblk_t	*dmp;

	ASSERT(sps != NULL);
	ASSERT(mp != NULL && mp->b_rptr != NULL);
	ppa = sps->sps_ppa;
	ASSERT(ppa != NULL);

	/* NOTE: caller must hold ppa_sib_lock in RW_READER mode */
	ASSERT(RW_READ_HELD(&ppa->ppa_sib_lock));

	for (; sps != NULL; sps = sps->sps_nextsib) {
		/*
		 * We specifically test to ensure that the DLPI state for the
		 * promiscous stream is IDLE (DL_IDLE), since such state tells
		 * us that the promiscous stream has been bound to PPP_ALLSAP.
		 */
		if (IS_SPS_PROMISC(sps) && (sps->sps_dlstate == DL_IDLE) &&
		    canputnext(sps->sps_rq)) {
			if ((dmp = dupmsg(mp)) == NULL) {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_allocbfail++;
				mutex_exit(&ppa->ppa_sta_lock);
				continue;
			}
			if (header) {
				dmp->b_rptr += PPP_HDRLEN;
			}
			if (IS_SPS_RAWDATA(sps)) {
				/* function frees original message if fails */
				dmp = sppp_dladdether(sps, dmp, proto);
			} else {
				/* function frees original message if fails */
				dmp = sppp_dladdud(sps, dmp, proto, B_TRUE);
			}
			if (dmp != NULL) {
				putnext(sps->sps_rq, dmp);
			} else {
				mutex_enter(&ppa->ppa_sta_lock);
				ppa->ppa_allocbfail++;
				mutex_exit(&ppa->ppa_sta_lock);
			}
		}
	}
}
示例#12
0
/*
 * Allocate a message block
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_cmn_allocmb(queue_t *q, mblk_t *mp, mblk_t **nmp, size_t size,
    int rw_flag)
{
	int	rval = SUCCESS;

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if ((*nmp = (mblk_t *)allocb(size, BPRI_LO)) == NULL) {
		oplmsu_cmn_bufcall(q, mp, size, rw_flag);
		rval = FAILURE;
	} else {
		(*nmp)->b_wptr = (*nmp)->b_rptr + size;
	}
	return (rval);
}
示例#13
0
/*
 * Previous sequence for active path change termio
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_cmn_prechg_termio(queue_t *q, mblk_t *mp, int rw_flag, int prev_flag,
    mblk_t **term_mp, int *term_stat)
{

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if ((prev_flag == MSU_TIOS_TCSETS) &&
	    (oplmsu_uinst->tiocmset_p != NULL)) {
		if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocmset_p,
		    rw_flag) == FAILURE) {
			return (FAILURE);
		}

		*term_stat = MSU_WTMS_ACK;
	} else if ((prev_flag <= MSU_TIOS_MSET) &&
	    (oplmsu_uinst->tiocspps_p != NULL)) {
		if (oplmsu_cmn_copymb(q, mp, term_mp, oplmsu_uinst->tiocspps_p,
		    rw_flag) == FAILURE) {
			return (FAILURE);
		}

		*term_stat = MSU_WPPS_ACK;
	} else if ((prev_flag <= MSU_TIOS_PPS) &&
	    (oplmsu_uinst->tiocswinsz_p != NULL)) {
		if (oplmsu_cmn_copymb(q, mp, term_mp,
		    oplmsu_uinst->tiocswinsz_p, rw_flag) == FAILURE) {
			return (FAILURE);
		}

		*term_stat = MSU_WWSZ_ACK;
	} else if ((prev_flag <= MSU_TIOS_WINSZP) &&
	    (oplmsu_uinst->tiocssoftcar_p != NULL)) {
		if (oplmsu_cmn_copymb(q, mp, term_mp,
		    oplmsu_uinst->tiocssoftcar_p, rw_flag) == FAILURE) {
			return (FAILURE);
		}

		*term_stat = MSU_WCAR_ACK;
	} else if (prev_flag <= MSU_TIOS_SOFTCAR) {
		*term_mp = NULL;
		*term_stat = MSU_WPTH_CHG;
	}
	return (SUCCESS);
}
示例#14
0
/*
 * Link msgb structure of high priority
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : A [It depends on caller]
 *  -. uinst_t->c_lock : A [It depends on caller]
 */
void
oplmsu_link_high_primsg(mblk_t **first, mblk_t **last, mblk_t *add_msg)
{

	ASSERT(add_msg != NULL);
	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if (*first == NULL) {
		*first = add_msg;
		add_msg->b_prev = NULL;
	} else {
		(*last)->b_next = add_msg;
		add_msg->b_prev = *last;
	}

	*last = add_msg;
	add_msg->b_next = NULL;
}
示例#15
0
static void
mdeg_notify_client(void *arg)
{
	mdeg_clnt_t		*clnt = (mdeg_clnt_t *)arg;
	md_diff_cookie_t	mdd = MD_INVAL_DIFF_COOKIE;
	mdeg_result_t		mdeg_res;
	mde_cookie_t		md_prev_start;
	mde_cookie_t		md_curr_start;

	/*
	 * mdeg.rwlock must be held as a reader while this function
	 * executes. However, we do not need to acquire the lock as a
	 * reader here because it is held as a reader by the thread
	 * executing mdeg_notify_clients which triggers the execution
	 * of this function from a taskq. Since mdeg_notify_clients
	 * holds the lock as a reader until the taskq callbacks have
	 * completed, it will be held for the life of this function call.
	 * Furthermore, we must not attempt to acquire the lock as a
	 * reader with rw_enter because if there is a pending writer,
	 * we will block, creating a circular deadlock with this function,
	 * the writer, and mdeg_notify_clients. Since we do not need
	 * to acquire the lock, just assert that it is held.
	 */
	ASSERT(RW_READ_HELD(&mdeg.rwlock));

	if (!mdeg.enabled) {
		/* trying to shutdown */
		MDEG_DBG("mdeg_notify_client: mdeg disabled, aborting\n");
		goto cleanup;
	}

	/*
	 * Handle the special case where the node specification
	 * is NULL. In this case, call the client callback without
	 * any results. All processing is left to the client.
	 */
	if (clnt->pspec == NULL) {
		/* call the client callback */
		(*clnt->cb)(clnt->cb_arg, NULL);

		MDEG_DBG("MDEG client callback done\n");
		goto cleanup;
	}

	/* find our start nodes */
	md_prev_start = mdeg_find_start_node(mdeg.md_prev, clnt->pspec);
	if (md_prev_start == MDE_INVAL_ELEM_COOKIE) {
		goto cleanup;
	}

	md_curr_start = mdeg_find_start_node(mdeg.md_curr, clnt->pspec);
	if (md_curr_start == MDE_INVAL_ELEM_COOKIE) {
		goto cleanup;
	}

	/* diff the MDs */
	mdd = md_diff_init(mdeg.md_prev, md_prev_start, mdeg.md_curr,
	    md_curr_start, clnt->nmatch->namep, clnt->nmatch->matchp);

	if (mdd == MD_INVAL_DIFF_COOKIE) {
		MDEG_DBG("unable to diff MDs\n");
		goto cleanup;
	}

	/*
	 * Cache the results of the diff
	 */
	mdeg_get_diff_results(mdd, &mdeg_res);

	/* call the client callback */
	(*clnt->cb)(clnt->cb_arg, &mdeg_res);

	MDEG_DBG("MDEG client callback done\n");

cleanup:
	if (mdd != MD_INVAL_DIFF_COOKIE)
		(void) md_diff_fini(mdd);
}
示例#16
0
/*
 * If DV_BUILD is set, we call into nexus driver to do a BUS_CONFIG_ALL.
 * Otherwise, simply return cached dv_node's. Hotplug code always call
 * devfs_clean() to invalid the dv_node cache.
 */
static int
devfs_readdir(struct vnode *dvp, struct uio *uiop, struct cred *cred, int *eofp)
{
	struct dv_node *ddv, *dv;
	struct dirent64 *de, *bufp;
	offset_t diroff;
	offset_t	soff;
	size_t reclen, movesz;
	int error;
	struct vattr va;
	size_t bufsz;

	ddv = VTODV(dvp);
	dcmn_err2(("devfs_readdir %s: offset %lld len %ld\n",
	    ddv->dv_name, uiop->uio_loffset, uiop->uio_iov->iov_len));
	ASSERT(ddv->dv_attr || ddv->dv_attrvp);
	ASSERT(RW_READ_HELD(&ddv->dv_contents));

	if (uiop->uio_loffset >= MAXOFF_T) {
		if (eofp)
			*eofp = 1;
		return (0);
	}

	if (uiop->uio_iovcnt != 1)
		return (EINVAL);

	if (dvp->v_type != VDIR)
		return (ENOTDIR);

	/* Load the initial contents */
	if (ddv->dv_flags & DV_BUILD) {
		if (!rw_tryupgrade(&ddv->dv_contents)) {
			rw_exit(&ddv->dv_contents);
			rw_enter(&ddv->dv_contents, RW_WRITER);
		}

		/* recheck and fill */
		if (ddv->dv_flags & DV_BUILD)
			dv_filldir(ddv);

		rw_downgrade(&ddv->dv_contents);
	}

	soff = uiop->uio_offset;
	bufsz = uiop->uio_iov->iov_len;
	de = bufp = kmem_alloc(bufsz, KM_SLEEP);
	movesz = 0;
	dv = (struct dv_node *)-1;

	/*
	 * Move as many entries into the uio structure as it will take.
	 * Special case "." and "..".
	 */
	diroff = 0;
	if (soff == 0) {				/* . */
		reclen = DIRENT64_RECLEN(strlen("."));
		if ((movesz + reclen) > bufsz)
			goto full;
		de->d_ino = (ino64_t)ddv->dv_ino;
		de->d_off = (off64_t)diroff + 1;
		de->d_reclen = (ushort_t)reclen;

		/* use strncpy(9f) to zero out uninitialized bytes */

		(void) strncpy(de->d_name, ".", DIRENT64_NAMELEN(reclen));
		movesz += reclen;
		de = (dirent64_t *)((char *)de + reclen);
		dcmn_err3(("devfs_readdir: A: diroff %lld, soff %lld: '%s' "
		    "reclen %lu\n", diroff, soff, ".", reclen));
	}

	diroff++;
	if (soff <= 1) {				/* .. */
		reclen = DIRENT64_RECLEN(strlen(".."));
		if ((movesz + reclen) > bufsz)
			goto full;
		de->d_ino = (ino64_t)ddv->dv_dotdot->dv_ino;
		de->d_off = (off64_t)diroff + 1;
		de->d_reclen = (ushort_t)reclen;

		/* use strncpy(9f) to zero out uninitialized bytes */

		(void) strncpy(de->d_name, "..", DIRENT64_NAMELEN(reclen));
		movesz += reclen;
		de = (dirent64_t *)((char *)de + reclen);
		dcmn_err3(("devfs_readdir: B: diroff %lld, soff %lld: '%s' "
		    "reclen %lu\n", diroff, soff, "..", reclen));
	}

	diroff++;
	for (dv = ddv->dv_dot; dv; dv = dv->dv_next, diroff++) {
		/*
		 * although DDM_INTERNAL_PATH minor nodes are skipped for
		 * readdirs outside the kernel, they still occupy directory
		 * offsets
		 */
		if (diroff < soff ||
		    ((dv->dv_flags & DV_INTERNAL) && (cred != kcred)))
			continue;

		reclen = DIRENT64_RECLEN(strlen(dv->dv_name));
		if ((movesz + reclen) > bufsz) {
			dcmn_err3(("devfs_readdir: C: diroff "
			    "%lld, soff %lld: '%s' reclen %lu\n",
			    diroff, soff, dv->dv_name, reclen));
			goto full;
		}
		de->d_ino = (ino64_t)dv->dv_ino;
		de->d_off = (off64_t)diroff + 1;
		de->d_reclen = (ushort_t)reclen;

		/* use strncpy(9f) to zero out uninitialized bytes */

		ASSERT(strlen(dv->dv_name) + 1 <=
		    DIRENT64_NAMELEN(reclen));
		(void) strncpy(de->d_name, dv->dv_name,
		    DIRENT64_NAMELEN(reclen));

		movesz += reclen;
		de = (dirent64_t *)((char *)de + reclen);
		dcmn_err4(("devfs_readdir: D: diroff "
		    "%lld, soff %lld: '%s' reclen %lu\n", diroff, soff,
		    dv->dv_name, reclen));
	}

	/* the buffer is full, or we exhausted everything */
full:	dcmn_err3(("devfs_readdir: moving %lu bytes: "
	    "diroff %lld, soff %lld, dv %p\n",
	    movesz, diroff, soff, (void *)dv));

	if ((movesz == 0) && dv)
		error = EINVAL;		/* cannot be represented */
	else {
		error = uiomove(bufp, movesz, UIO_READ, uiop);
		if (error == 0) {
			if (eofp)
				*eofp = dv ? 0 : 1;
			uiop->uio_offset = diroff;
		}

		va.va_mask = AT_ATIME;
		gethrestime(&va.va_atime);
		rw_exit(&ddv->dv_contents);
		(void) devfs_setattr(dvp, &va, 0, cred, NULL);
		rw_enter(&ddv->dv_contents, RW_READER);
	}

	kmem_free(bufp, bufsz);
	return (error);
}
示例#17
0
static int
xmem_readdir(struct vnode *vp, struct uio *uiop, struct cred *cred, int *eofp)
{
	struct xmemnode *xp = (struct xmemnode *)VTOXN(vp);
	struct xdirent *xdp;
	int error;
	register struct dirent64 *dp;
	register ulong_t offset;
	register ulong_t total_bytes_wanted;
	register long outcount = 0;
	register long bufsize;
	int reclen;
	caddr_t outbuf;

	if (uiop->uio_loffset >= MAXOFF_T) {
		if (eofp)
			*eofp = 1;
		return (0);
	}
	/*
	 * assuming system call has already called xmem_rwlock
	 */
	ASSERT(RW_READ_HELD(&xp->xn_rwlock));

	if (uiop->uio_iovcnt != 1)
		return (EINVAL);

	if (vp->v_type != VDIR)
		return (ENOTDIR);

	/*
	 * There's a window here where someone could have removed
	 * all the entries in the directory after we put a hold on the
	 * vnode but before we grabbed the rwlock.  Just return unless
	 * there are still references to the current file in which case panic.
	 */
	if (xp->xn_dir == NULL) {
		if (xp->xn_nlink)
			cmn_err(CE_PANIC, "empty directory 0x%p", (void *)xp);
		return (0);
	}

	/*
	 * Get space for multiple directory entries
	 */
	total_bytes_wanted = uiop->uio_iov->iov_len;
	bufsize = total_bytes_wanted + sizeof (struct dirent64);
	outbuf = kmem_alloc(bufsize, KM_SLEEP);

	dp = (struct dirent64 *)outbuf;


	offset = 0;
	xdp = xp->xn_dir;
	while (xdp) {
		offset = xdp->xd_offset;
		if (offset >= uiop->uio_offset) {
			reclen = (int)DIRENT64_RECLEN(strlen(xdp->xd_name));
			if (outcount + reclen > total_bytes_wanted)
				break;
			ASSERT(xdp->xd_xmemnode != NULL);

			/* use strncpy(9f) to zero out uninitialized bytes */

			ASSERT(strlen(xdp->xd_name) + 1 <=
			    DIRENT64_NAMELEN(reclen));
			(void) strncpy(dp->d_name, xdp->xd_name,
			    DIRENT64_NAMELEN(reclen));
			dp->d_reclen = (ushort_t)reclen;
			dp->d_ino = (ino64_t)xdp->xd_xmemnode->xn_nodeid;
			dp->d_off = (offset_t)xdp->xd_offset + 1;
			dp = (struct dirent64 *)
			    ((uintptr_t)dp + dp->d_reclen);
			outcount += reclen;
			ASSERT(outcount <= bufsize);
		}
		xdp = xdp->xd_next;
	}
	error = uiomove(outbuf, outcount, UIO_READ, uiop);
	if (!error) {
		/* If we reached the end of the list our offset */
		/* should now be just past the end. */
		if (!xdp) {
			offset += 1;
			if (eofp)
				*eofp = 1;
		} else if (eofp)
			*eofp = 0;
		uiop->uio_offset = offset;
	}
	gethrestime(&xp->xn_atime);
	kmem_free(outbuf, bufsize);
	return (error);
}
示例#18
0
static vnode_t *
make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp,
    struct vnodeops *vops,
    int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *),
    int *newnode, cred_t *cr)
{
	rnode4_t *rp;
	rnode4_t *trp;
	vnode_t *vp;
	mntinfo4_t *mi;

	ASSERT(RW_READ_HELD(&rhtp->r_lock));

	mi = VFTOMI4(vfsp);

start:
	if ((rp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(rp);
		*newnode = 0;
		return (vp);
	}
	rw_exit(&rhtp->r_lock);

	mutex_enter(&rp4freelist_lock);

	if (rp4freelist != NULL && rnode4_new >= nrnode) {
		rp = rp4freelist;
		rp4_rmfree(rp);
		mutex_exit(&rp4freelist_lock);

		vp = RTOV4(rp);

		if (rp->r_flags & R4HASHED) {
			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
			mutex_enter(&vp->v_lock);
			if (vp->v_count > 1) {
				vp->v_count--;
				mutex_exit(&vp->v_lock);
				rw_exit(&rp->r_hashq->r_lock);
				rw_enter(&rhtp->r_lock, RW_READER);
				goto start;
			}
			mutex_exit(&vp->v_lock);
			rp4_rmhash_locked(rp);
			rw_exit(&rp->r_hashq->r_lock);
		}

		r4inactive(rp, cr);

		mutex_enter(&vp->v_lock);
		if (vp->v_count > 1) {
			vp->v_count--;
			mutex_exit(&vp->v_lock);
			rw_enter(&rhtp->r_lock, RW_READER);
			goto start;
		}
		mutex_exit(&vp->v_lock);
		vn_invalid(vp);

		/*
		 * destroy old locks before bzero'ing and
		 * recreating the locks below.
		 */
		uninit_rnode4(rp);

		/*
		 * Make sure that if rnode is recycled then
		 * VFS count is decremented properly before
		 * reuse.
		 */
		VFS_RELE(vp->v_vfsp);
		vn_reinit(vp);
	} else {
		vnode_t *new_vp;

		mutex_exit(&rp4freelist_lock);

		rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
		new_vp = vn_alloc(KM_SLEEP);

		atomic_add_long((ulong_t *)&rnode4_new, 1);
#ifdef DEBUG
		clstat4_debug.nrnode.value.ui64++;
#endif
		vp = new_vp;
	}

	bzero(rp, sizeof (*rp));
	rp->r_vnode = vp;
	nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
	nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
	mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
	rp->created_v4 = 0;
	list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
	    offsetof(nfs4_open_stream_t, os_node));
	rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
	rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
	cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
	rp->r_flags = R4READDIRWATTR;
	rp->r_fh = fh;
	rp->r_hashq = rhtp;
	sfh4_hold(rp->r_fh);
	rp->r_server = mi->mi_curr_serv;
	rp->r_deleg_type = OPEN_DELEGATE_NONE;
	rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE;
	nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL);

	rddir4_cache_create(rp);
	rp->r_putapage = putapage;
	vn_setops(vp, vops);
	vp->v_data = (caddr_t)rp;
	vp->v_vfsp = vfsp;
	VFS_HOLD(vfsp);
	vp->v_type = VNON;
	if (isrootfh(fh, rp))
		vp->v_flag = VROOT;
	vn_exists(vp);

	/*
	 * There is a race condition if someone else
	 * alloc's the rnode while no locks are held, so we
	 * check again and recover if found.
	 */
	rw_enter(&rhtp->r_lock, RW_WRITER);
	if ((trp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(trp);
		*newnode = 0;
		rw_exit(&rhtp->r_lock);
		rp4_addfree(rp, cr);
		rw_enter(&rhtp->r_lock, RW_READER);
		return (vp);
	}
	rp4_addhash(rp);
	*newnode = 1;
	return (vp);
}
示例#19
0
/* ARGSUSED */
static int
auto_readdir(
	vnode_t *vp,
	uio_t *uiop,
	cred_t *cred,
	int *eofp,
	caller_context_t *ct,
	int flags)
{
	struct autofs_rddirargs	rda;
	autofs_rddirres rd;
	fnnode_t *fnp = vntofn(vp);
	fnnode_t *cfnp, *nfnp;
	dirent64_t *dp;
	ulong_t offset;
	ulong_t outcount = 0, count = 0;
	size_t namelen;
	ulong_t alloc_count;
	void *outbuf = NULL;
	fninfo_t *fnip = vfstofni(vp->v_vfsp);
	struct iovec *iovp;
	int error = 0;
	int reached_max = 0;
	int myeof = 0;
	int this_reclen;
	struct autofs_globals *fngp = vntofn(fnip->fi_rootvp)->fn_globals;

	AUTOFS_DPRINT((4, "auto_readdir vp=%p offset=%lld\n",
	    (void *)vp, uiop->uio_loffset));

	if (eofp != NULL)
		*eofp = 0;

	if (uiop->uio_iovcnt != 1)
		return (EINVAL);

	iovp = uiop->uio_iov;
	alloc_count = iovp->iov_len;

	gethrestime(&fnp->fn_atime);
	fnp->fn_ref_time = fnp->fn_atime.tv_sec;

	dp = outbuf = kmem_zalloc(alloc_count, KM_SLEEP);

	/*
	 * Held when getdents calls VOP_RWLOCK....
	 */
	ASSERT(RW_READ_HELD(&fnp->fn_rwlock));
	if (uiop->uio_offset >= AUTOFS_DAEMONCOOKIE) {
again:
		/*
		 * Do readdir of daemon contents only
		 * Drop readers lock and reacquire after reply.
		 */
		rw_exit(&fnp->fn_rwlock);
		bzero(&rd, sizeof (struct autofs_rddirres));
		count = 0;
		rda.rda_map = fnip->fi_map;
		rda.rda_offset = (uint_t)uiop->uio_offset;
		rd.rd_rddir.rddir_entries = dp;
		rda.rda_count = rd.rd_rddir.rddir_size = (uint_t)alloc_count;
		rda.uid = crgetuid(cred);

		error = auto_calldaemon(fngp->fng_zoneid,
		    AUTOFS_READDIR,
		    xdr_autofs_rddirargs,
		    &rda,
		    xdr_autofs_rddirres,
		    (void *)&rd,
		    sizeof (autofs_rddirres),
		    TRUE);

		/*
		 * reacquire previously dropped lock
		 */
		rw_enter(&fnp->fn_rwlock, RW_READER);

		if (!error) {
			error = rd.rd_status;
			dp = rd.rd_rddir.rddir_entries;
		}

		if (error) {
			if (error == AUTOFS_SHUTDOWN) {
				/*
				 * treat as empty directory
				 */
				error = 0;
				myeof = 1;
				if (eofp)
					*eofp = 1;
			}
			goto done;
		}
		if (rd.rd_rddir.rddir_size) {
			dirent64_t *odp = dp;   /* next in output buffer */
			dirent64_t *cdp = dp;   /* current examined entry */

			/*
			 * Check for duplicates here
			 */
			do {
				this_reclen = cdp->d_reclen;
				if (auto_search(fnp, cdp->d_name,
				    NULL, cred)) {
					/*
					 * entry not found in kernel list,
					 * include it in readdir output.
					 *
					 * If we are skipping entries. then
					 * we need to copy this entry to the
					 * correct position in the buffer
					 * to be copied out.
					 */
					if (cdp != odp)
						bcopy(cdp, odp,
						    (size_t)this_reclen);
					odp = nextdp(odp);
					outcount += this_reclen;
				} else {
					/*
					 * Entry was found in the kernel
					 * list. If it is the first entry
					 * in this buffer, then just skip it
					 */
					if (odp == dp) {
						dp = nextdp(dp);
						odp = dp;
					}
				}
				count += this_reclen;
				cdp = (struct dirent64 *)
				    ((char *)cdp + this_reclen);
			} while (count < rd.rd_rddir.rddir_size);

			if (outcount)
				error = uiomove(dp, outcount, UIO_READ, uiop);
			uiop->uio_offset = rd.rd_rddir.rddir_offset;
		} else {
			if (rd.rd_rddir.rddir_eof == 0) {
				/*
				 * alloc_count not large enough for one
				 * directory entry
				 */
				error = EINVAL;
			}
		}
		if (rd.rd_rddir.rddir_eof && !error) {
			myeof = 1;
			if (eofp)
				*eofp = 1;
		}
		if (!error && !myeof && outcount == 0) {
			/*
			 * call daemon with new cookie, all previous
			 * elements happened to be duplicates
			 */
			dp = outbuf;
			goto again;
		}
		goto done;
	}

	if (uiop->uio_offset == 0) {
		/*
		 * first time: so fudge the . and ..
		 */
		this_reclen = DIRENT64_RECLEN(1);
		if (alloc_count < this_reclen) {
			error = EINVAL;
			goto done;
		}
		dp->d_ino = (ino64_t)fnp->fn_nodeid;
		dp->d_off = (off64_t)1;
		dp->d_reclen = (ushort_t)this_reclen;

		/* use strncpy(9f) to zero out uninitialized bytes */

		(void) strncpy(dp->d_name, ".",
		    DIRENT64_NAMELEN(this_reclen));
		outcount += dp->d_reclen;
		dp = nextdp(dp);

		this_reclen = DIRENT64_RECLEN(2);
		if (alloc_count < outcount + this_reclen) {
			error = EINVAL;
			goto done;
		}
		dp->d_reclen = (ushort_t)this_reclen;
		dp->d_ino = (ino64_t)fnp->fn_parent->fn_nodeid;
		dp->d_off = (off64_t)2;

		/* use strncpy(9f) to zero out uninitialized bytes */

		(void) strncpy(dp->d_name, "..",
		    DIRENT64_NAMELEN(this_reclen));
		outcount += dp->d_reclen;
		dp = nextdp(dp);
	}

	offset = 2;
	cfnp = fnp->fn_dirents;
	while (cfnp != NULL) {
		nfnp = cfnp->fn_next;
		offset = cfnp->fn_offset;
		if ((offset >= uiop->uio_offset) &&
		    (!(cfnp->fn_flags & MF_LOOKUP))) {
			int reclen;

			/*
			 * include node only if its offset is greater or
			 * equal to the one required and it is not in
			 * transient state (not being looked-up)
			 */
			namelen = strlen(cfnp->fn_name);
			reclen = (int)DIRENT64_RECLEN(namelen);
			if (outcount + reclen > alloc_count) {
				reached_max = 1;
				break;
			}
			dp->d_reclen = (ushort_t)reclen;
			dp->d_ino = (ino64_t)cfnp->fn_nodeid;
			if (nfnp != NULL) {
				/*
				 * get the offset of the next element
				 */
				dp->d_off = (off64_t)nfnp->fn_offset;
			} else {
				/*
				 * This is the last element, make
				 * offset one plus the current
				 */
				dp->d_off = (off64_t)cfnp->fn_offset + 1;
			}

			/* use strncpy(9f) to zero out uninitialized bytes */

			(void) strncpy(dp->d_name, cfnp->fn_name,
			    DIRENT64_NAMELEN(reclen));
			outcount += dp->d_reclen;
			dp = nextdp(dp);
		}
		cfnp = nfnp;
	}

	if (outcount)
		error = uiomove(outbuf, outcount, UIO_READ, uiop);

	if (!error) {
		if (reached_max) {
			/*
			 * This entry did not get added to the buffer on this,
			 * call. We need to add it on the next call therefore
			 * set uio_offset to this entry's offset.  If there
			 * wasn't enough space for one dirent, return EINVAL.
			 */
			uiop->uio_offset = offset;
			if (outcount == 0)
				error = EINVAL;
		} else if (autofs_nobrowse ||
		    auto_nobrowse_option(fnip->fi_opts) ||
		    (fnip->fi_flags & MF_DIRECT) ||
		    (fnp->fn_trigger != NULL) ||
		    (((vp->v_flag & VROOT) == 0) &&
		    ((fntovn(fnp->fn_parent))->v_flag & VROOT) &&
		    (fnp->fn_dirents == NULL))) {
			/*
			 * done reading directory entries
			 */
			uiop->uio_offset = offset + 1;
			if (eofp)
				*eofp = 1;
		} else {
			/*
			 * Need to get the rest of the entries from the daemon.
			 */
			uiop->uio_offset = AUTOFS_DAEMONCOOKIE;
		}
	}

done:
	kmem_free(outbuf, alloc_count);
	AUTOFS_DPRINT((5, "auto_readdir vp=%p offset=%lld eof=%d\n",
	    (void *)vp, uiop->uio_loffset, myeof));
	return (error);
}
示例#20
0
/*ARGSUSED*/
static faultcode_t
segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
	enum fault_type type, enum seg_rw rw)
{
	pgcnt_t npages;
	spgcnt_t pg;
	page_t *pp;
	struct vnode *vp = seg->s_data;

	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas || size > seg->s_size ||
	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
		panic("segkmem_fault: bad args");

	/*
	 * If it is one of segkp pages, call segkp_fault.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));

	if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
		return (FC_NOSUPPORT);

	npages = btopr(size);

	switch (type) {
	case F_SOFTLOCK:	/* lock down already-loaded translations */
		for (pg = 0; pg < npages; pg++) {
			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
			    SE_SHARED);
			if (pp == NULL) {
				/*
				 * Hmm, no page. Does a kernel mapping
				 * exist for it?
				 */
				if (!hat_probe(kas.a_hat, addr)) {
					addr -= PAGESIZE;
					while (--pg >= 0) {
						pp = page_find(vp, (u_offset_t)
						    (uintptr_t)addr);
						if (pp)
							page_unlock(pp);
						addr -= PAGESIZE;
					}
					return (FC_NOMAP);
				}
			}
			addr += PAGESIZE;
		}
		if (rw == S_OTHER)
			hat_reserve(seg->s_as, addr, size);
		return (0);
	case F_SOFTUNLOCK:
		while (npages--) {
			pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
			if (pp)
				page_unlock(pp);
			addr += PAGESIZE;
		}
		return (0);
	default:
		return (FC_NOSUPPORT);
	}
	/*NOTREACHED*/
}
示例#21
0
/*
 * bufcall request
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_cmn_bufcall(queue_t *q, mblk_t *mp, size_t size, int rw_flag)
{

	ASSERT(RW_READ_HELD(&oplmsu_uinst->lock));

	if (rw_flag == MSU_WRITE_SIDE) {
		ctrl_t	*ctrl;

		putbq(q, mp);

		mutex_enter(&oplmsu_uinst->c_lock);
		ctrl = (ctrl_t *)q->q_ptr;
		if (ctrl->wbuf_id != 0) {
			mutex_exit(&oplmsu_uinst->c_lock);
			return;
		}

		ctrl->wbuftbl->q = q;
		ctrl->wbuftbl->rw_flag = rw_flag;
		ctrl->wbuf_id = bufcall(size, BPRI_LO, oplmsu_cmn_bufcb,
		    (void *)ctrl->wbuftbl);

		if (ctrl->wbuf_id == 0) {
			if (ctrl->wtout_id != 0) {
				mutex_exit(&oplmsu_uinst->c_lock);
				return;
			}

			ctrl->wtout_id = timeout(oplmsu_cmn_bufcb,
			    (void *)ctrl->wbuftbl, drv_usectohz(MSU_TM_500MS));
		}
		mutex_exit(&oplmsu_uinst->c_lock);
	} else if (rw_flag == MSU_READ_SIDE) {
		lpath_t	*lpath;
		mblk_t	*wrk_msg;

		mutex_enter(&oplmsu_uinst->l_lock);
		lpath = (lpath_t *)q->q_ptr;
		if (mp->b_datap->db_type >= QPCTL) {
			if (lpath->first_lpri_hi == NULL) {
				lpath->last_lpri_hi = mp;
				mp->b_next = NULL;
			} else {
				wrk_msg = lpath->first_lpri_hi;
				wrk_msg->b_prev = mp;
				mp->b_next = wrk_msg;
			}
			mp->b_prev = NULL;
			lpath->first_lpri_hi = mp;
		} else {
			putbq(q, mp);
		}

		if (lpath->rbuf_id != 0) {
			mutex_exit(&oplmsu_uinst->l_lock);
			return;
		}

		lpath->rbuftbl->q = q;
		lpath->rbuftbl->rw_flag = rw_flag;
		lpath->rbuf_id = bufcall(size, BPRI_LO, oplmsu_cmn_bufcb,
		    (void *)lpath->rbuftbl);

		if (lpath->rbuf_id == 0) {
			if (lpath->rtout_id != 0) {
				mutex_exit(&oplmsu_uinst->l_lock);
				return;
			}

			lpath->rtout_id = timeout(oplmsu_cmn_bufcb,
			    (void *)lpath->rbuftbl, drv_usectohz(MSU_TM_500MS));
		}
		mutex_exit(&oplmsu_uinst->l_lock);
	}
}
示例#22
0
/*
 * Caller must hold kcpc_cpuctx_lock.
 */
int
kcpc_enable(kthread_t *t, int cmd, int enable)
{
	kcpc_ctx_t	*ctx = t->t_cpc_ctx;
	kcpc_set_t	*set = t->t_cpc_set;
	kcpc_set_t	*newset;
	int		i;
	int		flag;
	int		err;

	ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock));

	if (ctx == NULL) {
		/*
		 * This thread has a set but no context; it must be a
		 * CPU-bound set.
		 */
		ASSERT(t->t_cpc_set != NULL);
		ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1);
		return (EINVAL);
	} else if (ctx->kc_flags & KCPC_CTX_INVALID)
		return (EAGAIN);

	if (cmd == CPC_ENABLE) {
		if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0)
			return (EINVAL);
		kpreempt_disable();
		atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
		kcpc_restore(ctx);
		kpreempt_enable();
	} else if (cmd == CPC_DISABLE) {
		if (ctx->kc_flags & KCPC_CTX_FREEZE)
			return (EINVAL);
		kpreempt_disable();
		kcpc_save(ctx);
		atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE);
		kpreempt_enable();
	} else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) {
		/*
		 * Strategy for usr/sys: stop counters and update set's presets
		 * with current counter values, unbind, update requests with
		 * new config, then re-bind.
		 */
		flag = (cmd == CPC_USR_EVENTS) ?
		    CPC_COUNT_USER: CPC_COUNT_SYSTEM;

		kpreempt_disable();
		atomic_or_uint(&ctx->kc_flags,
		    KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED);
		pcbe_ops->pcbe_allstop();
		kpreempt_enable();
		for (i = 0; i < set->ks_nreqs; i++) {
			set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data);
			if (enable)
				set->ks_req[i].kr_flags |= flag;
			else
				set->ks_req[i].kr_flags &= ~flag;
		}
		newset = kcpc_dup_set(set);
		if (kcpc_unbind(set) != 0)
			return (EINVAL);
		t->t_cpc_set = newset;
		if (kcpc_bind_thread(newset, t, &err) != 0) {
			t->t_cpc_set = NULL;
			kcpc_free_set(newset);
			return (EINVAL);
		}
	} else
		return (EINVAL);

	return (0);
}