Ejemplo n.º 1
0
/*
 * Get high priority message from buffer for upper write stream
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : A
 *  -. uinst_t->c_lock : P
 */
mblk_t *
oplmsu_wcmn_high_getq(queue_t *uwq)
{
	mblk_t	*mp;
	ctrl_t	*ctrl;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	mutex_enter(&oplmsu_uinst->c_lock);
	ctrl = (ctrl_t *)uwq->q_ptr;
	mp = ctrl->first_upri_hi;
	if (mp != NULL) {
		if (mp->b_next == NULL) {
			ctrl->first_upri_hi = NULL;
			ctrl->last_upri_hi = NULL;
		} else {
			ctrl->first_upri_hi = mp->b_next;
			mp->b_next->b_prev = NULL;
			mp->b_next = NULL;
		}
		mp->b_prev = NULL;
	}
	mutex_exit(&oplmsu_uinst->c_lock);
	return (mp);
}
Ejemplo n.º 2
0
/*
 * Set queue and ioctl to lpath_t
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : M
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_set_ioctl_path(lpath_t *lpath, queue_t *hndl_queue, mblk_t *mp)
{
	int	rval = SUCCESS;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
	ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));

	if ((lpath->hndl_uqueue == NULL) && (lpath->hndl_mp == NULL) &&
	    (lpath->sw_flag == 0)) {
		if ((lpath->status == MSU_EXT_NOTUSED) ||
		    (lpath->status == MSU_EXT_ACTIVE_CANDIDATE) ||
		    (lpath->status == MSU_SETID_NU)) {
			if (hndl_queue == NULL) {
				lpath->hndl_uqueue = hndl_queue;
			} else {
				lpath->hndl_uqueue = WR(hndl_queue);
			}
			lpath->hndl_mp = mp;
		} else {
			rval = BUSY;
		}
	} else {
		rval = BUSY;
	}
	return (rval);
}
Ejemplo n.º 3
0
/*
 * Check control node and driver privilege
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : A
 *  -. uinst_t->c_lock : P
 */
int
oplmsu_wcmn_chknode(queue_t *q, int node, mblk_t *mp)
{
	struct iocblk	*iocp;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	mutex_enter(&oplmsu_uinst->c_lock);
	if (((ctrl_t *)q->q_ptr)->node_type != node) {
		mutex_exit(&oplmsu_uinst->c_lock);
		cmn_err(CE_WARN, "oplmsu: chk-node: ctrl node type = %d", node);
		return (EINVAL);
	}
	mutex_exit(&oplmsu_uinst->c_lock);

	/* Check super-user by oplmsu.conf */
	if (oplmsu_check_su != 0) {
		iocp = (struct iocblk *)mp->b_rptr;
		if (drv_priv(iocp->ioc_cr) != 0) {
			cmn_err(CE_WARN, "oplmsu: chk-node: Permission denied");
			return (EPERM);
		}
	}
	return (SUCCESS);
}
Ejemplo n.º 4
0
/*
 * Flush handle for write side stream
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : P
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_wcmn_flush_hndl(queue_t *q, mblk_t *mp, krw_t rw)
{
	queue_t	*dst_queue = NULL;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	if (*mp->b_rptr & FLUSHW) {	/* Write side */
		flushq(q, FLUSHDATA);
	}

	dst_queue = oplmsu_uinst->lower_queue;
	if (dst_queue == NULL) {
		if (*mp->b_rptr & FLUSHR) {
			flushq(RD(q), FLUSHDATA);
			*mp->b_rptr &= ~FLUSHW;

			rw_exit(&oplmsu_uinst->lock);
			OPLMSU_TRACE(q, mp, MSU_TRC_UO);
			qreply(q, mp);
			rw_enter(&oplmsu_uinst->lock, rw);
		} else {
			freemsg(mp);
		}
	} else {
		putq(WR(dst_queue), mp);
	}
}
Ejemplo n.º 5
0
static int
zpl_xattr_get_sa(struct inode *ip, const char *name, void *value, size_t size)
{
	znode_t *zp = ITOZ(ip);
	uchar_t *nv_value;
	uint_t nv_size;
	int error = 0;

	ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));

	mutex_enter(&zp->z_lock);
	if (zp->z_xattr_cached == NULL)
		error = -zfs_sa_get_xattr(zp);
	mutex_exit(&zp->z_lock);

	if (error)
		return (error);

	ASSERT(zp->z_xattr_cached);
	error = -nvlist_lookup_byte_array(zp->z_xattr_cached, name,
	    &nv_value, &nv_size);
	if (error)
		return (error);

	if (!size)
		return (nv_size);

	if (size < nv_size)
		return (-ERANGE);

	memcpy(value, nv_value, nv_size);

	return (nv_size);
}
Ejemplo n.º 6
0
int
dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname,
    int intsz, int numints, void *buf, char *setpoint)
{
	zfs_prop_t prop = zfs_name_to_prop(propname);
	boolean_t inheritable;
	boolean_t snapshot;
	uint64_t zapobj;

	ASSERT(RW_LOCK_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock));
	inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
	snapshot = (ds->ds_phys != NULL && dsl_dataset_is_snapshot(ds));
	zapobj = (ds->ds_phys == NULL ? 0 : ds->ds_phys->ds_props_obj);

	if (zapobj != 0) {
		objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
		int err;

		ASSERT(snapshot);

		/* Check for a local value. */
		err = zap_lookup(mos, zapobj, propname, intsz, numints, buf);
		if (err != ENOENT) {
			if (setpoint != NULL && err == 0)
				dsl_dataset_name(ds, setpoint);
			return (err);
		}

		/*
		 * Skip the check for a received value if there is an explicit
		 * inheritance entry.
		 */
		if (inheritable) {
			char *inheritstr = kmem_asprintf("%s%s", propname,
			    ZPROP_INHERIT_SUFFIX);
			err = zap_contains(mos, zapobj, inheritstr);
			strfree(inheritstr);
			if (err != 0 && err != ENOENT)
				return (err);
		}

		if (err == ENOENT) {
			/* Check for a received value. */
			char *recvdstr = kmem_asprintf("%s%s", propname,
			    ZPROP_RECVD_SUFFIX);
			err = zap_lookup(mos, zapobj, recvdstr,
			    intsz, numints, buf);
			strfree(recvdstr);
			if (err != ENOENT) {
				if (setpoint != NULL && err == 0)
					(void) strlcpy(setpoint,
                                   ZPROP_SOURCE_VAL_RECVD, MAXNAMELEN);
				return (err);
			}
		}
	}

	return (dsl_prop_get_dd(ds->ds_dir, propname,
	    intsz, numints, buf, setpoint, snapshot));
}
Ejemplo n.º 7
0
static void
xdirfixdotdot(
	struct xmemnode	*fromxp,	/* child directory */
	struct xmemnode	*fromparent,	/* old parent directory */
	struct xmemnode	*toparent)	/* new parent directory */
{
	struct xdirent	*dotdot;

	ASSERT(RW_LOCK_HELD(&toparent->xn_rwlock));

	/*
	 * Increment the link count in the new parent xmemnode
	 */
	INCR_COUNT(&toparent->xn_nlink, &toparent->xn_tlock);
	gethrestime(&toparent->xn_ctime);

	dotdot = xmemfs_hash_lookup("..", fromxp, 0, NULL);

	ASSERT(dotdot->xd_xmemnode == fromparent);
	dotdot->xd_xmemnode = toparent;

	/*
	 * Decrement the link count of the old parent xmemnode.
	 * If fromparent is NULL, then this is a new directory link;
	 * it has no parent, so we need not do anything.
	 */
	if (fromparent != NULL) {
		mutex_enter(&fromparent->xn_tlock);
		if (fromparent->xn_nlink != 0) {
			fromparent->xn_nlink--;
			gethrestime(&fromparent->xn_ctime);
		}
		mutex_exit(&fromparent->xn_tlock);
	}
}
Ejemplo n.º 8
0
/* check where the xattr resides */
static int
__zpl_xattr_where(struct inode *ip, const char *name, int *where, cred_t *cr)
{
	znode_t *zp = ITOZ(ip);
	zfs_sb_t *zsb = ZTOZSB(zp);
	int error;

	ASSERT(where);
	ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));

	*where = XATTR_NOENT;
	if (zsb->z_use_sa && zp->z_is_sa) {
		error = zpl_xattr_get_sa(ip, name, NULL, 0);
		if (error >= 0)
			*where |= XATTR_IN_SA;
		else if (error != -ENOENT)
			return (error);
	}

	error = zpl_xattr_get_dir(ip, name, NULL, 0, cr);
	if (error >= 0)
		*where |= XATTR_IN_DIR;
	else if (error != -ENOENT)
		return (error);

	if (*where == (XATTR_IN_SA|XATTR_IN_DIR))
		cmn_err(CE_WARN, "ZFS: inode %p has xattr \"%s\""
		    " in both SA and dir", ip, name);
	if (*where == XATTR_NOENT)
		error = -ENODATA;
	else
		error = 0;
	return (error);
}
Ejemplo n.º 9
0
static mzap_ent_t *
mze_find(zap_t *zap, const char *name, uint64_t hash)
{
	mzap_ent_t mze_tofind;
	mzap_ent_t *mze;
	avl_index_t idx;
	avl_tree_t *avl = &zap->zap_m.zap_avl;

	ASSERT(zap->zap_ismicro);
	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
	ASSERT3U(zap_hash(zap, name), ==, hash);

	if (strlen(name) >= sizeof (mze_tofind.mze_phys.mze_name))
		return (NULL);

	mze_tofind.mze_hash = hash;
	mze_tofind.mze_phys.mze_cd = 0;

	mze = avl_find(avl, &mze_tofind, &idx);
	if (mze == NULL)
		mze = avl_nearest(avl, idx, AVL_AFTER);
	for (; mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
		if (strcmp(name, mze->mze_phys.mze_name) == 0)
			return (mze);
	}
	return (NULL);
}
Ejemplo n.º 10
0
static uint32_t
mze_find_unused_cd(zap_t *zap, uint64_t hash)
{
	mzap_ent_t mze_tofind;
	mzap_ent_t *mze;
	avl_index_t idx;
	avl_tree_t *avl = &zap->zap_m.zap_avl;
	uint32_t cd;

	ASSERT(zap->zap_ismicro);
	ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));

	mze_tofind.mze_hash = hash;
	mze_tofind.mze_phys.mze_cd = 0;

	cd = 0;
	for (mze = avl_find(avl, &mze_tofind, &idx);
	    mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
		if (mze->mze_phys.mze_cd != cd)
			break;
		cd++;
	}

	return (cd);
}
Ejemplo n.º 11
0
Archivo: zfs_sa.c Proyecto: Alyseo/zfs
int
zfs_sa_get_xattr(znode_t *zp)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	char *obj;
	int size;
	int error;

	ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
	ASSERT(!zp->z_xattr_cached);
	ASSERT(zp->z_is_sa);

	error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), &size);
	if (error) {
		if (error == ENOENT)
			return nvlist_alloc(&zp->z_xattr_cached,
			    NV_UNIQUE_NAME, KM_SLEEP);
		else
			return (error);
	}

	obj = zio_buf_alloc(size);

	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), obj, size);
	if (error == 0)
		error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP);

	zio_buf_free(obj, size);

	return (error);
}
Ejemplo n.º 12
0
/*
 * Register to receive an event notification when the system
 * machine description is updated.
 *
 * Passing NULL for the node specification parameter is valid
 * as long as the match specification is also NULL. In this
 * case, the client will receive a notification when the MD
 * has been updated, but the callback will not include any
 * information. The client is then responsible for obtaining
 * its own copy of the system MD and performing any processing
 * manually.
 */
int
mdeg_register(mdeg_node_spec_t *pspecp, mdeg_node_match_t *nmatchp,
    mdeg_cb_t cb, void *cb_arg, mdeg_handle_t *hdlp)
{
	mdeg_clnt_t	*clnt;

	/*
	 * If the RW lock is held, a client is calling
	 * register from its own callback.
	 */
	if (RW_LOCK_HELD(&mdeg.rwlock)) {
		MDEG_DBG("mdeg_register: rwlock already held\n");
		return (MDEG_FAILURE);
	}

	/* node spec and node match must both be valid, or both NULL */
	if (((pspecp != NULL) && (nmatchp == NULL)) ||
	    ((pspecp == NULL) && (nmatchp != NULL))) {
		MDEG_DBG("mdeg_register: invalid parameters\n");
		return (MDEG_FAILURE);
	}

	rw_enter(&mdeg.rwlock, RW_WRITER);

	clnt = mdeg_alloc_clnt();

	ASSERT(clnt);

	/*
	 * Fill in the rest of the data
	 */
	clnt->nmatch = nmatchp;
	clnt->pspec = pspecp;
	clnt->cb = cb;
	clnt->cb_arg = cb_arg;
	clnt->magic = MDEG_MAGIC;

	/* do this last */
	clnt->valid = B_TRUE;

	MDEG_DBG("client registered (0x%lx):\n", clnt->hdl);
	MDEG_DUMP_CLNT(clnt);

	mdeg.nclnts++;

	if (mdeg_notify_client_reg(clnt) != MDEG_SUCCESS) {
		bzero(clnt, sizeof (mdeg_clnt_t));
		rw_exit(&mdeg.rwlock);
		return (MDEG_FAILURE);
	}

	rw_exit(&mdeg.rwlock);

	*hdlp = clnt->hdl;

	return (MDEG_SUCCESS);
}
Ejemplo n.º 13
0
/*
 * Clear queue and ioctl to lpath_t
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : M
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_clear_ioctl_path(lpath_t *lpath)
{

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
	ASSERT(MUTEX_HELD(&oplmsu_uinst->l_lock));

	lpath->hndl_uqueue = NULL;
	lpath->hndl_mp = NULL;
}
Ejemplo n.º 14
0
/*
 * Cancel an automatic unmount of a snapname.  This callback is responsible
 * for dropping the reference on the zfs_snapentry_t which was taken when
 * during dispatch.
 */
static void
zfsctl_snapshot_unmount_cancel(zfs_snapentry_t *se)
{
	ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));

	if (taskq_cancel_id(zfs_expire_taskq, se->se_taskqid) == 0) {
		se->se_taskqid = -1;
		zfsctl_snapshot_rele(se);
	}
}
Ejemplo n.º 15
0
int				/* ERRNO if error, 0 if successful. */
sam_access_ino(
	sam_node_t *ip,		/* pointer to inode. */
	int mode,		/* mode of access to be verified */
	boolean_t locked,	/* is ip->inode_rwl held by caller? */
	cred_t *credp)		/* credentials pointer. */
{
	int shift = 0;

	ASSERT(!locked || RW_LOCK_HELD(&ip->inode_rwl));

	/*
	 * If requesting write access, and read only filesystem or WORM file
	 * return error.
	 */
	if (mode & S_IWRITE) {
		if (ip->mp->mt.fi_mflag & MS_RDONLY) {
			return (EROFS);
		}
		if (ip->di.status.b.worm_rdonly && !S_ISDIR(ip->di.mode)) {
			return (EROFS);
		}
	}

	if (!locked) {
		RW_LOCK_OS(&ip->inode_rwl, RW_READER);
	}

	/* Use ACL, if present, to check access. */
	if (ip->di.status.b.acl) {
		int error;

		error = sam_acl_access(ip, mode, credp);
		if (!locked) {
			RW_UNLOCK_OS(&ip->inode_rwl, RW_READER);
		}
		return (error);
	}

	if (!locked) {
		RW_UNLOCK_OS(&ip->inode_rwl, RW_READER);
	}

	if (crgetuid(credp) != ip->di.uid) {
		shift += 3;
		if (!groupmember((uid_t)ip->di.gid, credp)) {
			shift += 3;
		}
	}
	mode &= ~(ip->di.mode << shift);
	if (mode == 0) {
		return (0);
	}
	return (secpolicy_vnode_access(credp, SAM_ITOV(ip), ip->di.uid, mode));
}
Ejemplo n.º 16
0
vnode_t *
makenfs4node_by_fh(nfs4_sharedfh_t *sfh, nfs4_sharedfh_t *psfh,
    nfs4_fname_t **npp, nfs4_ga_res_t *garp,
    mntinfo4_t *mi, cred_t *cr, hrtime_t t)
{
	vfs_t *vfsp = mi->mi_vfsp;
	int newnode = 0;
	vnode_t *vp;
	rnode4_t *rp;
	svnode_t *svp;
	nfs4_fname_t *name, *svpname;
	int index;

	ASSERT(npp && *npp);
	name = *npp;
	*npp = NULL;

	index = rtable4hash(sfh);
	rw_enter(&rtable4[index].r_lock, RW_READER);

	vp = make_rnode4(sfh, &rtable4[index], vfsp,
	    nfs4_vnodeops, nfs4_putapage, &newnode, cr);

	svp = VTOSV(vp);
	rp = VTOR4(vp);
	if (newnode) {
		svp->sv_forw = svp->sv_back = svp;
		svp->sv_name = name;
		if (psfh != NULL)
			sfh4_hold(psfh);
		svp->sv_dfh = psfh;
	} else {
		/*
		 * It is possible that due to a server
		 * side rename fnames have changed.
		 * update the fname here.
		 */
		mutex_enter(&rp->r_svlock);
		svpname = svp->sv_name;
		if (svp->sv_name != name) {
			svp->sv_name = name;
			mutex_exit(&rp->r_svlock);
			fn_rele(&svpname);
		} else {
			mutex_exit(&rp->r_svlock);
			fn_rele(&name);
		}
	}

	ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock));
	r4_do_attrcache(vp, garp, newnode, t, cr, index);
	ASSERT(rw_owner(&rtable4[index].r_lock) != curthread);

	return (vp);
}
Ejemplo n.º 17
0
/*
 * Lookup a rnode by fhandle.  Ignores rnodes that had failed recovery.
 * Returns NULL if no match.  If an rnode is returned, the reference count
 * on the master vnode is incremented.
 *
 * The caller must be holding the hash queue lock, either shared or exclusive.
 */
rnode4_t *
r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp)
{
	rnode4_t *rp;
	vnode_t *vp;

	ASSERT(RW_LOCK_HELD(&rhtp->r_lock));

	for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) {
		vp = RTOV4(rp);
		if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) {

			mutex_enter(&rp->r_statelock);
			if (rp->r_flags & R4RECOVERR) {
				mutex_exit(&rp->r_statelock);
				continue;
			}
			mutex_exit(&rp->r_statelock);
#ifdef DEBUG
			r4_dup_check(rp, vfsp);
#endif
			if (rp->r_freef != NULL) {
				mutex_enter(&rp4freelist_lock);
				/*
				 * If the rnode is on the freelist,
				 * then remove it and use that reference
				 * as the new reference.  Otherwise,
				 * need to increment the reference count.
				 */
				if (rp->r_freef != NULL) {
					rp4_rmfree(rp);
					mutex_exit(&rp4freelist_lock);
				} else {
					mutex_exit(&rp4freelist_lock);
					VN_HOLD(vp);
				}
			} else
				VN_HOLD(vp);

			/*
			 * if root vnode, set v_flag to indicate that
			 */
			if (isrootfh(fh, rp)) {
				if (!(vp->v_flag & VROOT)) {
					mutex_enter(&vp->v_lock);
					vp->v_flag |= VROOT;
					mutex_exit(&vp->v_lock);
				}
			}
			return (rp);
		}
	}
	return (NULL);
}
Ejemplo n.º 18
0
/*
 * Set security attributes (acl's)
 *
 * Note that the dv_contents lock has already been acquired
 * by the caller's VOP_RWLOCK.
 */
static int
devfs_setsecattr(struct vnode *vp, struct vsecattr *vsap, int flags,
    struct cred *cr)
{
	dvnode_t *dv = VTODV(vp);
	struct vnode *avp;
	int	error;

	dcmn_err2(("devfs_setsecattr %s\n", dv->dv_name));
	ASSERT(vp->v_type == VDIR || vp->v_type == VCHR || vp->v_type == VBLK);
	ASSERT(RW_LOCK_HELD(&dv->dv_contents));

	/*
	 * Not a supported operation on drivers not providing
	 * file system based permissions.
	 */
	if (dv->dv_flags & DV_NO_FSPERM)
		return (ENOTSUP);

	/*
	 * To complete, the setsecattr requires an underlying attribute node.
	 */
	if (dv->dv_attrvp == NULL) {
		ASSERT(vp->v_type == VCHR || vp->v_type == VBLK);
		dv_shadow_node(DVTOV(dv->dv_dotdot), dv->dv_name, vp,
		    NULL, NULLVP, cr, DV_SHADOW_CREATE | DV_SHADOW_WRITE_HELD);
	}

	if ((avp = dv->dv_attrvp) == NULL) {
		dcmn_err2(("devfs_setsecattr %s: "
		    "cannot construct attribute node\n", dv->dv_name));
		return (fs_nosys());
	}

	/*
	 * The acl(2) system call issues a VOP_RWLOCK before setting an ACL.
	 * Since backing file systems expect the lock to be held before seeing
	 * a VOP_SETSECATTR ACL, we need to issue the VOP_RWLOCK to the backing
	 * store before forwarding the ACL.
	 */
	(void) VOP_RWLOCK(avp, V_WRITELOCK_TRUE, NULL);
	error = VOP_SETSECATTR(avp, vsap, flags, cr);
	dsysdebug(error, ("vop_setsecattr %s %d\n", VTODV(vp)->dv_name, error));
	VOP_RWUNLOCK(avp, V_WRITELOCK_TRUE, NULL);

	/*
	 * Set DV_ACL if we have a non-trivial set of ACLs.  It is not
	 * necessary to hold VOP_RWLOCK since fs_acl_nontrivial only does
	 * VOP_GETSECATTR calls.
	 */
	if (fs_acl_nontrivial(avp, cr))
		dv->dv_flags |= DV_ACL;
	return (error);
}
Ejemplo n.º 19
0
/*
 * Set status informations of upath_t
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : M
 *  -. uinst_t->l_lock : A
 *  -. uinst_t->c_lock : A
 */
void
oplmsu_cmn_set_upath_sts(upath_t *upath, int sts, int prev_sts,
    ulong_t trad_sts)
{

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
	ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));

	upath->status = sts;
	upath->prev_status = prev_sts;
	upath->traditional_status = trad_sts;
}
Ejemplo n.º 20
0
int
dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
    int intsz, int numint, void *buf, char *setpoint)
{
	int err = ENOENT;
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	zfs_prop_t prop;

	ASSERT(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock));

	if (setpoint)
		setpoint[0] = '\0';

	prop = zfs_name_to_prop(propname);

	/*
	 * Note: dd may be NULL, therefore we shouldn't dereference it
	 * ouside this loop.
	 */
	for (; dd != NULL; dd = dd->dd_parent) {
		ASSERT(RW_LOCK_HELD(&dd->dd_pool->dp_config_rwlock));
		err = zap_lookup(mos, dd->dd_phys->dd_props_zapobj,
		    propname, intsz, numint, buf);
		if (err != ENOENT) {
			if (setpoint)
				dsl_dir_name(dd, setpoint);
			break;
		}

		/*
		 * Break out of this loop for non-inheritable properties.
		 */
		if (prop != ZPROP_INVAL && !zfs_prop_inheritable(prop))
			break;
	}
	if (err == ENOENT)
		err = dodefault(propname, intsz, numint, buf);

	return (err);
}
Ejemplo n.º 21
0
/*
 * Pick up termios to re-set
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : A
 *  -. uinst_t->c_lock : A
 */
int
oplmsu_stop_prechg(mblk_t **term_mp, int *term_ioctl, int *term_stat)
{

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	if (oplmsu_uinst->tcsets_p != NULL) {
		struct iocblk	*iocp;

		if ((*term_mp = copymsg(oplmsu_uinst->tcsets_p)) == NULL) {
			return (FAILURE);
		}

		iocp = (struct iocblk *)(*term_mp)->b_rptr;
		*term_ioctl = iocp->ioc_cmd;
		*term_stat = MSU_WTCS_ACK;
	} else if (oplmsu_uinst->tiocmset_p != NULL) {
		if ((*term_mp = copymsg(oplmsu_uinst->tiocmset_p)) == NULL) {
			return (FAILURE);
		}

		*term_ioctl = TIOCMSET;
		*term_stat = MSU_WTMS_ACK;
	} else if (oplmsu_uinst->tiocspps_p != NULL) {
		if ((*term_mp = copymsg(oplmsu_uinst->tiocspps_p)) == NULL) {
			return (FAILURE);
		}

		*term_ioctl = TIOCSPPS;
		*term_stat = MSU_WPPS_ACK;
	} else if (oplmsu_uinst->tiocswinsz_p != NULL) {
		if ((*term_mp = copymsg(oplmsu_uinst->tiocswinsz_p)) == NULL) {
			return (FAILURE);
		}

		*term_ioctl = TIOCSWINSZ;
		*term_stat = MSU_WWSZ_ACK;
	} else if (oplmsu_uinst->tiocssoftcar_p != NULL) {
		if ((*term_mp = copymsg(oplmsu_uinst->tiocssoftcar_p))
		    == NULL) {
			return (FAILURE);
		}

		*term_ioctl = TIOCSSOFTCAR;
		*term_stat = MSU_WCAR_ACK;
	} else {
		*term_stat = MSU_WPTH_CHG;
		*term_mp = NULL;
	}
	return (SUCCESS);
}
Ejemplo n.º 22
0
/*
 * Find a zfs_snapentry_t in zfs_snapshots_by_name.  If the snapname
 * is found a pointer to the zfs_snapentry_t is returned and a reference
 * taken on the structure.  The caller is responsible for dropping the
 * reference with zfsctl_snapshot_rele().  If the snapname is not found
 * NULL will be returned.
 */
static zfs_snapentry_t *
zfsctl_snapshot_find_by_name(char *snapname)
{
	zfs_snapentry_t *se, search;

	ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));

	search.se_name = snapname;
	se = avl_find(&zfs_snapshots_by_name, &search, NULL);
	if (se)
		refcount_add(&se->se_refcount, NULL);

	return (se);
}
Ejemplo n.º 23
0
static sdev_nc_node_t *
sdev_nc_findpath(sdev_nc_list_t *ncl, char *path)
{
	sdev_nc_node_t *lp;

	ASSERT(RW_LOCK_HELD(&ncl->ncl_lock));

	for (lp = list_head(&ncl->ncl_list); lp;
	    lp = list_next(&ncl->ncl_list, lp)) {
		if (strcmp(path, lp->ncn_name) == 0)
			return (lp);
	}

	return (NULL);
}
Ejemplo n.º 24
0
/*
 * Wake up flow control
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : P
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_cmn_wakeup(queue_t *q)
{
	ctrl_t	*ctrl;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	mutex_enter(&oplmsu_uinst->c_lock);
	ctrl = (ctrl_t *)q->q_ptr;
	if (ctrl->sleep_flag == CV_SLEEP) {
		ctrl->sleep_flag = CV_WAKEUP;
		cv_signal(&ctrl->cvp);
	}
	mutex_exit(&oplmsu_uinst->c_lock);
}
Ejemplo n.º 25
0
/*
 * putbq() function for normal priority message of write stream
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : A
 *  -. uinst_t->l_lock : P
 *  -. uinst_t->c_lock : P
 */
void
oplmsu_wcmn_norm_putbq(queue_t *uwq, mblk_t *mp, queue_t *dq)
{
	lpath_t	*lpath;

	ASSERT(mp != NULL);
	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));

	mutex_enter(&oplmsu_uinst->l_lock);
	lpath = (lpath_t *)dq->q_ptr;
	lpath->uwq_flag = 1;
	lpath->uwq_queue = uwq;
	mutex_exit(&oplmsu_uinst->l_lock);
	putbq(uwq, mp);
}
Ejemplo n.º 26
0
/*
 * Find a zfs_snapentry_t in zfs_snapshots_by_objsetid given the objset id
 * rather than the snapname.  In all other respects it behaves the same
 * as zfsctl_snapshot_find_by_name().
 */
static zfs_snapentry_t *
zfsctl_snapshot_find_by_objsetid(spa_t *spa, uint64_t objsetid)
{
	zfs_snapentry_t *se, search;

	ASSERT(RW_LOCK_HELD(&zfs_snapshot_lock));

	search.se_spa = spa;
	search.se_objsetid = objsetid;
	se = avl_find(&zfs_snapshots_by_objsetid, &search, NULL);
	if (se)
		refcount_add(&se->se_refcount, NULL);

	return (se);
}
Ejemplo n.º 27
0
/*
 * Clean-up state associated with a zfetch structure (e.g. destroy the
 * streams).  This doesn't free the zfetch_t itself, that's left to the caller.
 */
void
dmu_zfetch_fini(zfetch_t *zf)
{
	zstream_t *zs;

	ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));

	rw_enter(&zf->zf_rwlock, RW_WRITER);
	while ((zs = list_head(&zf->zf_stream)) != NULL)
		dmu_zfetch_stream_remove(zf, zs);
	rw_exit(&zf->zf_rwlock);
	list_destroy(&zf->zf_stream);
	rw_destroy(&zf->zf_rwlock);

	zf->zf_dnode = NULL;
}
Ejemplo n.º 28
0
/*
 * Validate that a node is up-to-date and correct.
 * A validator may not update the node state or
 * contents as a read lock permits entry by
 * multiple threads.
 */
int
devvt_validate(struct sdev_node *dv)
{
	minor_t min;
	char *nm = dv->sdev_name;
	int rval;

	ASSERT(dv->sdev_state == SDEV_READY);
	ASSERT(RW_LOCK_HELD(&(dv->sdev_dotdot)->sdev_contents));

	/* validate only READY nodes */
	if (dv->sdev_state != SDEV_READY) {
		sdcmn_err(("dev fs: skipping: node not ready %s(%p)",
		    nm, (void *)dv));
		return (SDEV_VTOR_SKIP);
	}

	if (vt_wc_attached() == (major_t)-1)
		return (SDEV_VTOR_INVALID);

	if (strcmp(nm, DEVVT_ACTIVE_NAME) == 0) {
		char *link = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
		(void) vt_getactive(link, MAXPATHLEN);
		rval = (strcmp(link, dv->sdev_symlink) == 0) ?
		    SDEV_VTOR_VALID : SDEV_VTOR_STALE;
		kmem_free(link, MAXPATHLEN);
		return (rval);
	}

	if (strcmp(nm, DEVVT_CONSUSER_NAME) == 0) {
		char *link = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
		(void) vt_getconsuser(link, MAXPATHLEN);
		rval = (strcmp(link, dv->sdev_symlink) == 0) ?
		    SDEV_VTOR_VALID : SDEV_VTOR_STALE;
		kmem_free(link, MAXPATHLEN);
		return (rval);
	}

	if (devvt_str2minor(nm, &min) != 0) {
		return (SDEV_VTOR_INVALID);
	}

	if (vt_minor_valid(min) == B_FALSE)
		return (SDEV_VTOR_INVALID);

	return (SDEV_VTOR_VALID);
}
Ejemplo n.º 29
0
/*
 * Search upath_t by path number
 *
 * Requires Lock (( M: Mandatory, P: Prohibited, A: Allowed ))
 *  -. uinst_t->lock   : M [RW_READER or RW_WRITER]
 *  -. uinst_t->u_lock : M
 *  -. uinst_t->l_lock : A
 *  -. uinst_t->c_lock : P
 */
upath_t	*
oplmsu_search_upath_info(int path_no)
{
	upath_t	*upath;

	ASSERT(RW_LOCK_HELD(&oplmsu_uinst->lock));
	ASSERT(MUTEX_HELD(&oplmsu_uinst->u_lock));

	upath = oplmsu_uinst->first_upath;
	while (upath) {
		if (upath->path_no == path_no) {
			break;
		}
		upath = upath->u_next;
	}
	return (upath);
}
Ejemplo n.º 30
0
/* ARGSUSED */
static int
segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
{
	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas)
		segkmem_badop();

	/*
	 * If it is one of segkp pages, call into segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_GETMEMID(segkp, addr, memidp));

	segkmem_badop();
	return (0);
}