Exemple #1
0
static int
splat_list_test7(struct file *file, void *arg)
{
	list_t list;
	list_item_t *li;
	int rc = 0;

	splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Creating list\n%s", "");
	list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));

	li = kmem_alloc(sizeof(list_item_t), KM_SLEEP);
	if (li == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	/* Validate newly initialized node is inactive */
	splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Init list node\n%s", "");
	list_link_init(&li->li_node);
	if (list_link_active(&li->li_node)) {
		splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Newly initialized "
			    "list node should inactive %p/%p\n",
			    li->li_node.prev, li->li_node.next);
		rc = -EINVAL;
		goto out_li;
	}

	/* Validate node is active when linked in to a list */
	splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Insert list node\n%s", "");
	list_insert_head(&list, li);
	if (!list_link_active(&li->li_node)) {
		splat_vprint(file, SPLAT_LIST_TEST7_NAME, "List node "
			    "inserted in list should be active %p/%p\n",
			    li->li_node.prev, li->li_node.next);
		rc = -EINVAL;
		goto out;
	}

	/* Validate node is inactive when removed from list */
	splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Remove list node\n%s", "");
	list_remove(&list, li);
	if (list_link_active(&li->li_node)) {
		splat_vprint(file, SPLAT_LIST_TEST7_NAME, "List node "
			    "removed from list should be inactive %p/%p\n",
			    li->li_node.prev, li->li_node.next);
		rc = -EINVAL;
	}
out_li:
	kmem_free(li, sizeof(list_item_t));
out:
	/* Remove all items */
	while ((li = list_remove_head(&list)))
		kmem_free(li, sizeof(list_item_t));

	splat_vprint(file, SPLAT_LIST_TEST7_NAME, "Destroying list\n%s", "");
	list_destroy(&list);

        return rc;
}
Exemple #2
0
void
list_link_replace(list_node_t *lold, list_node_t *lnew)
{
	ASSERT(list_link_active(lold));
	ASSERT(!list_link_active(lnew));

	lnew->list_next = lold->list_next;
	lnew->list_prev = lold->list_prev;
	lold->list_prev->list_next = lnew;
	lold->list_next->list_prev = lnew;
	lold->list_next = lold->list_prev = NULL;
}
Exemple #3
0
/*
 * Called in multiple places when an inode should be destroyed.
 */
void
zfs_inode_destroy(struct inode *ip)
{
	znode_t *zp = ITOZ(ip);
	zfs_sb_t *zsb = ZTOZSB(zp);

	if (zfsctl_is_node(ip))
		zfsctl_inode_destroy(ip);

	mutex_enter(&zsb->z_znodes_lock);
	if (list_link_active(&zp->z_link_node)) {
		list_remove(&zsb->z_all_znodes, zp);
		zsb->z_nr_znodes--;
	}
	mutex_exit(&zsb->z_znodes_lock);

	if (zp->z_acl_cached) {
		zfs_acl_free(zp->z_acl_cached);
		zp->z_acl_cached = NULL;
	}

	if (zp->z_xattr_cached) {
		nvlist_free(zp->z_xattr_cached);
		zp->z_xattr_cached = NULL;
	}

	if (zp->z_xattr_parent) {
		zfs_iput_async(ZTOI(zp->z_xattr_parent));
		zp->z_xattr_parent = NULL;
	}

	kmem_cache_free(znode_cache, zp);
}
Exemple #4
0
/*
 * Deactivate cap
 *   - Block its wait queue. This prevents any new threads from being
 *	enqueued there and moves all enqueued threads to the run queue.
 *   - Remove cap from list l.
 *   - Disable CPU caps globally if there are no capped projects or zones
 *
 * Should be called with caps_lock held.
 */
static void
cap_disable(list_t *l, cpucap_t *cap)
{
	ASSERT(MUTEX_HELD(&caps_lock));
	/*
	 * Cap should be currently active
	 */
	ASSERT(CPUCAPS_ON());
	ASSERT(list_link_active(&cap->cap_link));
	ASSERT(CAP_ENABLED(cap));

	waitq_block(&cap->cap_waitq);

	/* do this first to avoid race with cap_kstat_update */
	if (cap->cap_kstat != NULL) {
		kstat_delete(cap->cap_kstat);
		cap->cap_kstat = NULL;
	}

	list_remove(l, cap);
	if (list_is_empty(&capped_projects) && list_is_empty(&capped_zones)) {
		cpucaps_enabled = B_FALSE;
		cpucaps_clock_callout = NULL;
	}
	cap->cap_value = cap->cap_chk_value = 0;
	cap->cap_project = NULL;
	cap->cap_zone = NULL;
}
Exemple #5
0
static int
splat_list_test1(struct file *file, void *arg)
{
	list_t list;

	splat_vprint(file, SPLAT_LIST_TEST1_NAME, "Creating list\n%s", "");
	list_create(&list, sizeof(list_item_t), offsetof(list_item_t, li_node));

	if (!list_is_empty(&list)) {
		splat_vprint(file, SPLAT_LIST_TEST1_NAME,
			     "New list NOT empty%s\n", "");
		/* list_destroy() intentionally skipped to avoid assert */
		return -EEXIST;
	}

	splat_vprint(file, SPLAT_LIST_TEST1_NAME, "Destroying list\n%s", "");
	list_destroy(&list);

	/* Validate the list has been destroyed */
	if (list_link_active(&list.list_head)) {
		splat_vprint(file, SPLAT_LIST_TEST1_NAME,
			     "Destroyed list still active%s", "");
		return -EIO;
	}

        return 0;
}
/*
 * Transfer top-level vdev state from svd to tvd.
 */
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
	spa_t *spa = svd->vdev_spa;
	metaslab_t *msp;
	vdev_t *vd;
	int t;

	ASSERT(tvd == tvd->vdev_top);

	tvd->vdev_ms_array = svd->vdev_ms_array;
	tvd->vdev_ms_shift = svd->vdev_ms_shift;
	tvd->vdev_ms_count = svd->vdev_ms_count;

	svd->vdev_ms_array = 0;
	svd->vdev_ms_shift = 0;
	svd->vdev_ms_count = 0;

	tvd->vdev_mg = svd->vdev_mg;
	tvd->vdev_ms = svd->vdev_ms;

	svd->vdev_mg = NULL;
	svd->vdev_ms = NULL;

	if (tvd->vdev_mg != NULL)
		tvd->vdev_mg->mg_vd = tvd;

	tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
	tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
	tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;

	svd->vdev_stat.vs_alloc = 0;
	svd->vdev_stat.vs_space = 0;
	svd->vdev_stat.vs_dspace = 0;

	for (t = 0; t < TXG_SIZE; t++) {
		while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
			(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
		while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
			(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
		if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
			(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
	}

	if (list_link_active(&svd->vdev_dirty_node)) {
		vdev_config_clean(svd);
		vdev_config_dirty(tvd);
	}

	tvd->vdev_reopen_wanted = svd->vdev_reopen_wanted;
	svd->vdev_reopen_wanted = 0;

	tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
	svd->vdev_deflate_ratio = 0;
}
Exemple #7
0
void
rdsv3_queue_work(rdsv3_workqueue_struct_t *wq, rdsv3_work_t *wp)
{
	RDSV3_DPRINTF4("rdsv3_queue_work", "Enter(wq: %p, wp: %p)", wq, wp);

	mutex_enter(&wq->wq_lock);

	if (list_link_active(&wp->work_item)) {
		/* This is already in the queue, ignore this call */
		mutex_exit(&wq->wq_lock);
		RDSV3_DPRINTF3("rdsv3_queue_work", "already queued: %p", wp);
		return;
	}

	switch (wq->wq_state) {
	case RDSV3_WQ_THREAD_RUNNING:
		list_insert_tail(&wq->wq_queue, wp);
		mutex_exit(&wq->wq_lock);
		break;

	case RDSV3_WQ_THREAD_FLUSHING:
		do {
			mutex_exit(&wq->wq_lock);
			delay(drv_usectohz(1000000));
			mutex_enter(&wq->wq_lock);
		} while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING);

		if (wq->wq_state == RDSV3_WQ_THREAD_RUNNING) {
			list_insert_tail(&wq->wq_queue, wp);
			mutex_exit(&wq->wq_lock);
			break;
		}
		/* FALLTHRU */

	case RDSV3_WQ_THREAD_IDLE:
		list_insert_tail(&wq->wq_queue, wp);
		wq->wq_state = RDSV3_WQ_THREAD_RUNNING;
		mutex_exit(&wq->wq_lock);

		(void) ddi_taskq_dispatch(rdsv3_taskq, rdsv3_worker_thread, wq,
		    DDI_SLEEP);
		break;

	case RDSV3_WQ_THREAD_EXITING:
		mutex_exit(&wq->wq_lock);
		break;
	}

	RDSV3_DPRINTF4("rdsv3_queue_work", "Return(wq: %p, wp: %p)", wq, wp);
}
Exemple #8
0
/*
 * Linux kernels older than 3.1 do not support a per-filesystem shrinker.
 * To accommodate this we must improvise and manually walk the list of znodes
 * attempting to prune dentries in order to be able to drop the inodes.
 *
 * To avoid scanning the same znodes multiple times they are always rotated
 * to the end of the z_all_znodes list.  New znodes are inserted at the
 * end of the list so we're always scanning the oldest znodes first.
 */
static int
zfs_sb_prune_aliases(zfs_sb_t *zsb, unsigned long nr_to_scan)
{
	znode_t **zp_array, *zp;
	int max_array = MIN(nr_to_scan, PAGE_SIZE * 8 / sizeof (znode_t *));
	int objects = 0;
	int i = 0, j = 0;

	zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);

	mutex_enter(&zsb->z_znodes_lock);
	while ((zp = list_head(&zsb->z_all_znodes)) != NULL) {

		if ((i++ > nr_to_scan) || (j >= max_array))
			break;

		ASSERT(list_link_active(&zp->z_link_node));
		list_remove(&zsb->z_all_znodes, zp);
		list_insert_tail(&zsb->z_all_znodes, zp);

		/* Skip active znodes and .zfs entries */
		if (MUTEX_HELD(&zp->z_lock) || zp->z_is_ctldir)
			continue;

		if (igrab(ZTOI(zp)) == NULL)
			continue;

		zp_array[j] = zp;
		j++;
	}
	mutex_exit(&zsb->z_znodes_lock);

	for (i = 0; i < j; i++) {
		zp = zp_array[i];

		ASSERT3P(zp, !=, NULL);
		d_prune_aliases(ZTOI(zp));

		if (atomic_read(&ZTOI(zp)->i_count) == 1)
			objects++;

		iput(ZTOI(zp));
	}

	kmem_free(zp_array, max_array * sizeof (znode_t *));

	return (objects);
}
Exemple #9
0
/*
 * Free cpucap structure
 */
static void
cap_free(cpucap_t *cap)
{
	if (cap == NULL)
		return;

	/*
	 * This cap should not be active
	 */
	ASSERT(!list_link_active(&cap->cap_link));
	ASSERT(cap->cap_value == 0);
	ASSERT(!DISP_LOCK_HELD(&cap->cap_usagelock));

	waitq_fini(&cap->cap_waitq);
	DISP_LOCK_DESTROY(&cap->cap_usagelock);

	kmem_free(cap, sizeof (cpucap_t));
}
Exemple #10
0
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
    znode_t *zp = buf;

    ASSERT(!list_link_active(&zp->z_link_node));
    mutex_destroy(&zp->z_lock);
    rw_destroy(&zp->z_parent_lock);
    rw_destroy(&zp->z_name_lock);
    mutex_destroy(&zp->z_acl_lock);
    rw_destroy(&zp->z_xattr_lock);
    avl_destroy(&zp->z_range_avl);
    mutex_destroy(&zp->z_range_lock);

    ASSERT(zp->z_dirlocks == NULL);
    ASSERT(zp->z_acl_cached == NULL);
    ASSERT(zp->z_xattr_cached == NULL);
}
Exemple #11
0
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
	znode_t *zp = buf;

	// ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
	ASSERT(ZTOV(zp)->v_data == zp);
	vn_free(ZTOV(zp));
	ASSERT(!list_link_active(&zp->z_link_node));
	mutex_destroy(&zp->z_lock);
	rw_destroy(&zp->z_parent_lock);
	rw_destroy(&zp->z_name_lock);
	mutex_destroy(&zp->z_acl_lock);
	avl_destroy(&zp->z_range_avl);
	mutex_destroy(&zp->z_range_lock);

	ASSERT(zp->z_dirlocks == NULL);
	ASSERT(zp->z_acl_cached == NULL);
}
Exemple #12
0
void
cbufq_enq(cbufq_t *cbufq, cbuf_t *cbuf)
{
	VERIFY(!list_link_active(&cbuf->cbuf_link));

	/*
	 * Ensure that either "cbuf_flip()", "cbuf_rewind()" or "cbuf_compact()"
	 * has been called on this buffer before insertion in the queue.
	 */
	VERIFY(cbuf_position(cbuf) == 0);

	if (list_is_empty(&cbufq->cbufq_bufs)) {
		VERIFY(cbufq->cbufq_count == 0);
	} else {
		VERIFY(cbufq->cbufq_count >= 1);
	}

	cbufq->cbufq_count++;
	list_insert_tail(&cbufq->cbufq_bufs, cbuf);
}
void
vdev_free(vdev_t *vd)
{
	int c;

	/*
	 * vdev_free() implies closing the vdev first.  This is simpler than
	 * trying to ensure complicated semantics for all callers.
	 */
	vdev_close(vd);

	ASSERT(!list_link_active(&vd->vdev_dirty_node));

	/*
	 * Free all children.
	 */
	for (c = 0; c < vd->vdev_children; c++)
		vdev_free(vd->vdev_child[c]);

	ASSERT(vd->vdev_child == NULL);
	ASSERT(vd->vdev_guid_sum == vd->vdev_guid);

	/*
	 * Discard allocation state.
	 */
	if (vd == vd->vdev_top)
		vdev_metaslab_fini(vd);

	ASSERT3U(vd->vdev_stat.vs_space, ==, 0);
	ASSERT3U(vd->vdev_stat.vs_dspace, ==, 0);
	ASSERT3U(vd->vdev_stat.vs_alloc, ==, 0);

	/*
	 * Remove this vdev from its parent's child list.
	 */
	vdev_remove_child(vd->vdev_parent, vd);

	ASSERT(vd->vdev_parent == NULL);

	vdev_free_common(vd);
}
Exemple #14
0
/*
 * Activate cap - insert into active list and unblock its
 * wait queue. Should be called with caps_lock held.
 * The cap_value field is set to the value supplied.
 */
static void
cap_enable(list_t *l, cpucap_t *cap, hrtime_t value)
{
	ASSERT(MUTEX_HELD(&caps_lock));

	/*
	 * Cap can not be already enabled
	 */
	ASSERT(!CAP_ENABLED(cap));
	ASSERT(!list_link_active(&cap->cap_link));

	list_insert_tail(l, cap);
	cap->cap_below = cap->cap_above = 0;
	cap->cap_maxusage = 0;
	cap->cap_usage = 0;
	cap->cap_value = cap->cap_chk_value = value;
	waitq_unblock(&cap->cap_waitq);
	if (CPUCAPS_OFF()) {
		cpucaps_enabled = B_TRUE;
		cpucaps_clock_callout = caps_update;
	}
}
Exemple #15
0
/*
 * State routine for the server when a delegation is returned.
 */
void
rfs4_return_deleg(rfs4_deleg_state_t *dsp, bool_t revoked)
{
	rfs4_file_t *fp = dsp->rds_finfo;
	open_delegation_type4 dtypewas;

	rfs4_dbe_lock(fp->rf_dbe);

	/* nothing to do if no longer on list */
	if (!list_link_active(&dsp->rds_node)) {
		rfs4_dbe_unlock(fp->rf_dbe);
		return;
	}

	/* Remove state from recall list */
	list_remove(&fp->rf_delegstatelist, dsp);

	if (list_is_empty(&fp->rf_delegstatelist)) {
		dtypewas = fp->rf_dinfo.rd_dtype;
		fp->rf_dinfo.rd_dtype = OPEN_DELEGATE_NONE;
		rfs4_dbe_cv_broadcast(fp->rf_dbe);

		/* if file system was unshared, the vp will be NULL */
		if (fp->rf_vp != NULL) {
			/*
			 * Once a delegation is no longer held by any client,
			 * the monitor is uninstalled.  At this point, the
			 * client must send OPEN otw, so we don't need the
			 * reference on the vnode anymore.  The open
			 * downgrade removes the reference put on earlier.
			 */
			if (dtypewas == OPEN_DELEGATE_READ) {
				(void) fem_uninstall(fp->rf_vp, deleg_rdops,
				    (void *)fp);
				vn_open_downgrade(fp->rf_vp, FREAD);
			} else if (dtypewas == OPEN_DELEGATE_WRITE) {
				(void) fem_uninstall(fp->rf_vp, deleg_wrops,
				    (void *)fp);
				vn_open_downgrade(fp->rf_vp, FREAD|FWRITE);
			}
		}
	}

	switch (dsp->rds_dtype) {
	case OPEN_DELEGATE_READ:
		fp->rf_dinfo.rd_rdgrants--;
		break;
	case OPEN_DELEGATE_WRITE:
		fp->rf_dinfo.rd_wrgrants--;
		break;
	default:
		break;
	}

	/* used in the policy decision */
	fp->rf_dinfo.rd_time_returned = gethrestime_sec();

	/*
	 * reset the time_recalled field so future delegations are not
	 * accidentally revoked
	 */
	if ((fp->rf_dinfo.rd_rdgrants + fp->rf_dinfo.rd_wrgrants) == 0)
		fp->rf_dinfo.rd_time_recalled = 0;

	rfs4_dbe_unlock(fp->rf_dbe);

	rfs4_dbe_lock(dsp->rds_dbe);

	dsp->rds_dtype = OPEN_DELEGATE_NONE;

	if (revoked == TRUE)
		dsp->rds_time_revoked = gethrestime_sec();

	rfs4_dbe_invalidate(dsp->rds_dbe);

	rfs4_dbe_unlock(dsp->rds_dbe);

	if (revoked == TRUE) {
		rfs4_dbe_lock(dsp->rds_client->rc_dbe);
		dsp->rds_client->rc_deleg_revoked++;	/* observability */
		rfs4_dbe_unlock(dsp->rds_client->rc_dbe);
	}
}
Exemple #16
0
/*
 * State support for delegation.
 * Set the state delegation type for this state;
 * This routine is called from open via rfs4_grant_delegation and the entry
 * locks on sp and sp->rs_finfo are assumed.
 */
static rfs4_deleg_state_t *
rfs4_deleg_state(rfs4_state_t *sp, open_delegation_type4 dtype, int *recall)
{
	rfs4_file_t *fp = sp->rs_finfo;
	bool_t create = TRUE;
	rfs4_deleg_state_t *dsp;
	vnode_t *vp;
	int open_prev = *recall;
	int ret;
	int fflags = 0;

	ASSERT(rfs4_dbe_islocked(sp->rs_dbe));
	ASSERT(rfs4_dbe_islocked(fp->rf_dbe));

	/* Shouldn't happen */
	if (fp->rf_dinfo.rd_recall_count != 0 ||
	    (fp->rf_dinfo.rd_dtype == OPEN_DELEGATE_READ &&
	    dtype != OPEN_DELEGATE_READ)) {
		return (NULL);
	}

	/* Unlock to avoid deadlock */
	rfs4_dbe_unlock(fp->rf_dbe);
	rfs4_dbe_unlock(sp->rs_dbe);

	dsp = rfs4_finddeleg(sp, &create);

	rfs4_dbe_lock(sp->rs_dbe);
	rfs4_dbe_lock(fp->rf_dbe);

	if (dsp == NULL)
		return (NULL);

	/*
	 * It is possible that since we dropped the lock
	 * in order to call finddeleg, the rfs4_file_t
	 * was marked such that we should not grant a
	 * delegation, if so bail out.
	 */
	if (fp->rf_dinfo.rd_hold_grant > 0) {
		rfs4_deleg_state_rele(dsp);
		return (NULL);
	}

	if (create == FALSE) {
		if (sp->rs_owner->ro_client == dsp->rds_client &&
		    dsp->rds_dtype == dtype) {
			return (dsp);
		} else {
			rfs4_deleg_state_rele(dsp);
			return (NULL);
		}
	}

	/*
	 * Check that this file has not been delegated to another
	 * client
	 */
	if (fp->rf_dinfo.rd_recall_count != 0 ||
	    fp->rf_dinfo.rd_dtype == OPEN_DELEGATE_WRITE ||
	    (fp->rf_dinfo.rd_dtype == OPEN_DELEGATE_READ &&
	    dtype != OPEN_DELEGATE_READ)) {
		rfs4_deleg_state_rele(dsp);
		return (NULL);
	}

	vp = fp->rf_vp;
	/* vnevent_support returns 0 if file system supports vnevents */
	if (vnevent_support(vp, NULL)) {
		rfs4_deleg_state_rele(dsp);
		return (NULL);
	}

	/* Calculate the fflags for this OPEN. */
	if (sp->rs_share_access & OPEN4_SHARE_ACCESS_READ)
		fflags |= FREAD;
	if (sp->rs_share_access & OPEN4_SHARE_ACCESS_WRITE)
		fflags |= FWRITE;

	*recall = 0;
	/*
	 * Before granting a delegation we need to know if anyone else has
	 * opened the file in a conflicting mode.  However, first we need to
	 * know how we opened the file to check the counts properly.
	 */
	if (dtype == OPEN_DELEGATE_READ) {
		if (((fflags & FWRITE) && vn_has_other_opens(vp, V_WRITE)) ||
		    (((fflags & FWRITE) == 0) && vn_is_opened(vp, V_WRITE)) ||
		    vn_is_mapped(vp, V_WRITE)) {
			if (open_prev) {
				*recall = 1;
			} else {
				rfs4_deleg_state_rele(dsp);
				return (NULL);
			}
		}
		ret = fem_install(vp, deleg_rdops, (void *)fp, OPUNIQ,
		    rfs4_mon_hold, rfs4_mon_rele);
		if (((fflags & FWRITE) && vn_has_other_opens(vp, V_WRITE)) ||
		    (((fflags & FWRITE) == 0) && vn_is_opened(vp, V_WRITE)) ||
		    vn_is_mapped(vp, V_WRITE)) {
			if (open_prev) {
				*recall = 1;
			} else {
				(void) fem_uninstall(vp, deleg_rdops,
				    (void *)fp);
				rfs4_deleg_state_rele(dsp);
				return (NULL);
			}
		}
		/*
		 * Because a client can hold onto a delegation after the
		 * file has been closed, we need to keep track of the
		 * access to this file.  Otherwise the CIFS server would
		 * not know about the client accessing the file and could
		 * inappropriately grant an OPLOCK.
		 * fem_install() returns EBUSY when asked to install a
		 * OPUNIQ monitor more than once.  Therefore, check the
		 * return code because we only want this done once.
		 */
		if (ret == 0)
			vn_open_upgrade(vp, FREAD);
	} else { /* WRITE */
		if (((fflags & FWRITE) && vn_has_other_opens(vp, V_WRITE)) ||
		    (((fflags & FWRITE) == 0) && vn_is_opened(vp, V_WRITE)) ||
		    ((fflags & FREAD) && vn_has_other_opens(vp, V_READ)) ||
		    (((fflags & FREAD) == 0) && vn_is_opened(vp, V_READ)) ||
		    vn_is_mapped(vp, V_RDORWR)) {
			if (open_prev) {
				*recall = 1;
			} else {
				rfs4_deleg_state_rele(dsp);
				return (NULL);
			}
		}
		ret = fem_install(vp, deleg_wrops, (void *)fp, OPUNIQ,
		    rfs4_mon_hold, rfs4_mon_rele);
		if (((fflags & FWRITE) && vn_has_other_opens(vp, V_WRITE)) ||
		    (((fflags & FWRITE) == 0) && vn_is_opened(vp, V_WRITE)) ||
		    ((fflags & FREAD) && vn_has_other_opens(vp, V_READ)) ||
		    (((fflags & FREAD) == 0) && vn_is_opened(vp, V_READ)) ||
		    vn_is_mapped(vp, V_RDORWR)) {
			if (open_prev) {
				*recall = 1;
			} else {
				(void) fem_uninstall(vp, deleg_wrops,
				    (void *)fp);
				rfs4_deleg_state_rele(dsp);
				return (NULL);
			}
		}
		/*
		 * Because a client can hold onto a delegation after the
		 * file has been closed, we need to keep track of the
		 * access to this file.  Otherwise the CIFS server would
		 * not know about the client accessing the file and could
		 * inappropriately grant an OPLOCK.
		 * fem_install() returns EBUSY when asked to install a
		 * OPUNIQ monitor more than once.  Therefore, check the
		 * return code because we only want this done once.
		 */
		if (ret == 0)
			vn_open_upgrade(vp, FREAD|FWRITE);
	}
	/* Place on delegation list for file */
	ASSERT(!list_link_active(&dsp->rds_node));
	list_insert_tail(&fp->rf_delegstatelist, dsp);

	dsp->rds_dtype = fp->rf_dinfo.rd_dtype = dtype;

	/* Update delegation stats for this file */
	fp->rf_dinfo.rd_time_lastgrant = gethrestime_sec();

	/* reset since this is a new delegation */
	fp->rf_dinfo.rd_conflicted_client = 0;
	fp->rf_dinfo.rd_ever_recalled = FALSE;

	if (dtype == OPEN_DELEGATE_READ)
		fp->rf_dinfo.rd_rdgrants++;
	else
		fp->rf_dinfo.rd_wrgrants++;

	return (dsp);
}