コード例 #1
0
ファイル: lx_prsubr.c プロジェクト: maosi66/illumos-joyent
/* ARGSUSED */
static void
lxpr_node_destructor(void *buf, void *un)
{
	lxpr_node_t	*lxpnp = buf;

	vn_free(LXPTOV(lxpnp));
}
コード例 #2
0
ファイル: vfsops.c プロジェクト: AlissonGiron/open-vm-tools
int
VMBlockVnodePut(struct vnode *vp)
{
   VMBlockVnodeInfo *vip;
   struct vnode *realVnode;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockVnodePut: entry (%p)\n", vp);

   mutex_enter(&vp->v_lock);
   if (vp->v_count > 1) {
      vp->v_count--;
      mutex_exit(&vp->v_lock);
      return 0;
   }
   mutex_exit(&vp->v_lock);

   vip = (VMBlockVnodeInfo *)vp->v_data;
   realVnode = vip->realVnode;

   kmem_free(vip, sizeof *vip);
   vn_free(vp);
   /*
    * VMBlockVnodeGet() doesn't VN_HOLD() the real vnode, but all callers of it
    * will have the vnode held, so we need to VN_RELE() here.
    */
   VN_RELE(realVnode);

   return 0;
}
コード例 #3
0
ファイル: lofs_subr.c プロジェクト: mikess/illumos-gate
/*
 * Return a looped back vnode for the given vnode.
 * If no lnode exists for this vnode create one and put it
 * in a table hashed by vnode.  If the lnode for
 * this vnode is already in the table return it (ref count is
 * incremented by lfind).  The lnode will be flushed from the
 * table when lo_inactive calls freelonode.  The creation of
 * a new lnode can be forced via the LOF_FORCE flag even if
 * the vnode exists in the table.  This is used in the creation
 * of a terminating lnode when looping is detected.  A unique
 * lnode is required for the correct evaluation of the current
 * working directory.
 * NOTE: vp is assumed to be a held vnode.
 */
struct vnode *
makelonode(struct vnode *vp, struct loinfo *li, int flag)
{
    lnode_t *lp, *tlp;
    struct vfs *vfsp;
    vnode_t *nvp;

    lp = NULL;
    TABLE_LOCK_ENTER(vp, li);
    if (flag != LOF_FORCE)
        lp = lfind(vp, li);
    if ((flag == LOF_FORCE) || (lp == NULL)) {
        /*
         * Optimistically assume that we won't need to sleep.
         */
        lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP);
        nvp = vn_alloc(KM_NOSLEEP);
        if (lp == NULL || nvp == NULL) {
            TABLE_LOCK_EXIT(vp, li);
            /* The lnode allocation may have succeeded, save it */
            tlp = lp;
            if (tlp == NULL) {
                tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP);
            }
            if (nvp == NULL) {
                nvp = vn_alloc(KM_SLEEP);
            }
            lp = NULL;
            TABLE_LOCK_ENTER(vp, li);
            if (flag != LOF_FORCE)
                lp = lfind(vp, li);
            if (lp != NULL) {
                kmem_cache_free(lnode_cache, tlp);
                vn_free(nvp);
                VN_RELE(vp);
                goto found_lnode;
            }
            lp = tlp;
        }
        atomic_inc_32(&li->li_refct);
        vfsp = makelfsnode(vp->v_vfsp, li);
        lp->lo_vnode = nvp;
        VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
        nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN));
        vn_setops(nvp, lo_vnodeops);
        nvp->v_data = (caddr_t)lp;
        lp->lo_vp = vp;
        lp->lo_looping = 0;
        lsave(lp, li);
        vn_exists(vp);
    } else {
        VN_RELE(vp);
    }

found_lnode:
    TABLE_LOCK_EXIT(vp, li);
    return (ltov(lp));
}
コード例 #4
0
/*
 * Unmount a file descriptor from a node in the file system.
 * If the user is not the owner of the file and is not privileged,
 * the request is denied.
 * Otherwise, remove the namenode from the hash list.
 * If the mounted file descriptor was that of a stream and this
 * was the last mount of the stream, turn off the STRMOUNT flag.
 * If the rootvp is referenced other than through the mount,
 * nm_inactive will clean up.
 */
static int
nm_unmount(vfs_t *vfsp, int flag, cred_t *crp)
{
	struct namenode *nodep = (struct namenode *)vfsp->vfs_data;
	vnode_t *vp, *thisvp;
	struct file *fp = NULL;

	ASSERT((nodep->nm_flag & NMNMNT) == 0);

	/*
	 * forced unmount is not supported by this file system
	 * and thus, ENOTSUP, is being returned.
	 */
	if (flag & MS_FORCE) {
		return (ENOTSUP);
	}

	vp = nodep->nm_filevp;
	mutex_enter(&nodep->nm_lock);
	if (secpolicy_vnode_owner(crp, nodep->nm_vattr.va_uid) != 0) {
		mutex_exit(&nodep->nm_lock);
		return (EPERM);
	}

	mutex_exit(&nodep->nm_lock);

	mutex_enter(&ntable_lock);
	nameremove(nodep);
	thisvp = NMTOV(nodep);
	mutex_enter(&thisvp->v_lock);
	if (thisvp->v_count-- == 1) {
		fp = nodep->nm_filep;
		mutex_exit(&thisvp->v_lock);
		vn_invalid(thisvp);
		vn_free(thisvp);
		VFS_RELE(vfsp);
		namenodeno_free(nodep->nm_vattr.va_nodeid);
		kmem_free(nodep, sizeof (struct namenode));
	} else {
		thisvp->v_flag &= ~VROOT;
		mutex_exit(&thisvp->v_lock);
	}
	if (namefind(vp, NULLVP) == NULL && vp->v_stream) {
		struct stdata *stp = vp->v_stream;
		mutex_enter(&stp->sd_lock);
		stp->sd_flag &= ~STRMOUNT;
		mutex_exit(&stp->sd_lock);
	}
	mutex_exit(&ntable_lock);
	if (fp != NULL)
		(void) closef(fp);
	return (0);
}
コード例 #5
0
/* ARGSUSED */
static void
fdinactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
	mutex_enter(&vp->v_lock);
	ASSERT(vp->v_count >= 1);
	if (--vp->v_count != 0) {
		mutex_exit(&vp->v_lock);
		return;
	}
	mutex_exit(&vp->v_lock);
	vn_invalid(vp);
	vn_free(vp);
}
コード例 #6
0
ファイル: vncache.c プロジェクト: bahamas10/openzfs
vnode_t *
vncache_enter(struct stat *st, vnode_t *dvp, char *name, int fd)
{
	vnode_t *old_vp;
	vnode_t *new_vp;
	vfs_t *vfs;
	char *vpath;
	avl_index_t	where;
	int len;

	/*
	 * Fill in v_path
	 * Note: fsop_root() calls with dvp=NULL
	 */
	len = strlen(name) + 1;
	if (dvp == NULL) {
		vpath = kmem_alloc(len, KM_SLEEP);
		(void) strlcpy(vpath, name, len);
		vfs = rootvfs;
	} else {
		/* add to length for parent path + "/" */
		len += (strlen(dvp->v_path) + 1);
		vpath = kmem_alloc(len, KM_SLEEP);
		(void) snprintf(vpath, len, "%s/%s", dvp->v_path, name);
		vfs = dvp->v_vfsp;
	}

	new_vp = vn_alloc(KM_SLEEP);
	new_vp->v_path = vpath;
	new_vp->v_fd = fd;
	new_vp->v_st_dev = st->st_dev;
	new_vp->v_st_ino = st->st_ino;
	new_vp->v_vfsp = vfs;
	new_vp->v_type = IFTOVT(st->st_mode);

	mutex_enter(&vncache_lock);
	old_vp = avl_find(&vncache_avl, new_vp, &where);
	if (old_vp != NULL)
		vn_hold(old_vp);
	else
		avl_insert(&vncache_avl, new_vp, where);
	mutex_exit(&vncache_lock);

	/* If we lost the race, free new_vp */
	if (old_vp != NULL) {
		vn_free(new_vp);
		return (old_vp);
	}

	return (new_vp);
}
コード例 #7
0
void
sv_inactive(vnode_t *vp)
{
	svnode_t *svp;
	rnode4_t *rp;
	vnode_t *mvp;

	sv_stats.sv_inactive++;

	svp = VTOSV(vp);
	rp = VTOR4(vp);
	mvp = rp->r_vnode;

	ASSERT(mvp != vp);

	/*
	 * Remove the shadow vnode from the list.  The serialization
	 * is provided by the svnode list lock.  This could be done
	 * with the r_statelock, but that would require more locking
	 * in the activation path.
	 */

	mutex_enter(&rp->r_svlock);
	mutex_enter(&vp->v_lock);
	/* check if someone slipped in while locks were dropped */
	if (vp->v_count > 1) {
		vp->v_count--;
		mutex_exit(&vp->v_lock);
		mutex_exit(&rp->r_svlock);
		return;
	}
	remque(svp);
	mutex_exit(&vp->v_lock);
	mutex_exit(&rp->r_svlock);

	sv_uninit(svp);
	svp->sv_forw = svp->sv_back = NULL;
	kmem_cache_free(svnode_cache, svp);
	vn_invalid(vp);
	vn_free(vp);

	/* release the reference held by this shadow on the master */

	VN_RELE(mvp);
}
コード例 #8
0
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *cdarg)
{
	znode_t *zp = buf;

	ASSERT(zp->z_dirlocks == 0);
	mutex_destroy(&zp->z_lock);
	rw_destroy(&zp->z_map_lock);
	rw_destroy(&zp->z_parent_lock);
	rw_destroy(&zp->z_name_lock);
	mutex_destroy(&zp->z_acl_lock);
	avl_destroy(&zp->z_range_avl);
	mutex_destroy(&zp->z_range_lock);

	ASSERT(zp->z_dbuf == NULL);
	ASSERT(ZTOV(zp)->v_count == 0);
	vn_free(ZTOV(zp));
}
コード例 #9
0
ファイル: zfs_znode.c プロジェクト: ElCoyote27/zfs-fuse
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
	znode_t *zp = buf;

	// ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs));
	ASSERT(ZTOV(zp)->v_data == zp);
	vn_free(ZTOV(zp));
	ASSERT(!list_link_active(&zp->z_link_node));
	mutex_destroy(&zp->z_lock);
	rw_destroy(&zp->z_parent_lock);
	rw_destroy(&zp->z_name_lock);
	mutex_destroy(&zp->z_acl_lock);
	avl_destroy(&zp->z_range_avl);
	mutex_destroy(&zp->z_range_lock);

	ASSERT(zp->z_dirlocks == NULL);
	ASSERT(zp->z_acl_cached == NULL);
}
コード例 #10
0
ファイル: vncache.c プロジェクト: bahamas10/openzfs
/*
 * Last reference to this vnode is (possibly) going away.
 * This is normally called by vn_rele() when v_count==1.
 * Note that due to lock order concerns, we have to take
 * the vncache_lock (for the avl tree) and then recheck
 * v_count, which might have gained a ref during the time
 * we did not hold vp->v_lock.
 */
void
vncache_inactive(vnode_t *vp)
{
	uint_t count;

	mutex_enter(&vncache_lock);
	mutex_enter(&vp->v_lock);

	if ((count = vp->v_count) <= 1) {
		/* This is (still) the last ref. */
		avl_remove(&vncache_avl, vp);
	}

	mutex_exit(&vp->v_lock);
	mutex_exit(&vncache_lock);

	if (count <= 1) {
		vn_free(vp);
	}
}
コード例 #11
0
ファイル: fifosubr.c プロジェクト: apprisi/illumos-gate
static void
fnode_destructor(void *buf, void *cdrarg)
{
	fifodata_t *fdp = buf;
	fifolock_t *flp = &fdp->fifo_lock;
	fifonode_t *fnp = &fdp->fifo_fnode[0];
	size_t size = (uintptr_t)cdrarg;

	mutex_destroy(&flp->flk_lock);
	cv_destroy(&flp->flk_wait_cv);
	ASSERT(flp->flk_ocsync == 0);

	while ((char *)fnp < (char *)buf + size) {

		vnode_t *vp = FTOV(fnp);

		if (vp == NULL) {
			return; /* constructor failed here */
		}

		ASSERT(fnp->fn_mp == NULL);
		ASSERT(fnp->fn_count == 0);
		ASSERT(fnp->fn_lock == flp);
		ASSERT(fnp->fn_open == 0);
		ASSERT(fnp->fn_insync == 0);
		ASSERT(fnp->fn_rsynccnt == 0 && fnp->fn_wsynccnt == 0);
		ASSERT(fnp->fn_wwaitcnt == 0);
		ASSERT(fnp->fn_pcredp == NULL);
		ASSERT(vn_matchops(vp, fifo_vnodeops));
		ASSERT(vp->v_stream == NULL);
		ASSERT(vp->v_type == VFIFO);
		ASSERT(vp->v_data == (caddr_t)fnp);
		ASSERT((vp->v_flag & (VNOMAP|VNOSWAP)) == (VNOMAP|VNOSWAP));

		cv_destroy(&fnp->fn_wait_cv);
		vn_invalid(vp);
		vn_free(vp);

		fnp++;
	}
}
コード例 #12
0
/*
 * This routine destroys all the resources of an rnode
 * and finally the rnode itself.
 */
static void
destroy_rnode4(rnode4_t *rp)
{
	vnode_t *vp;
	vfs_t *vfsp;

	ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE);

	vp = RTOV4(rp);
	vfsp = vp->v_vfsp;

	uninit_rnode4(rp);
	atomic_add_long((ulong_t *)&rnode4_new, -1);
#ifdef DEBUG
	clstat4_debug.nrnode.value.ui64--;
#endif
	kmem_cache_free(rnode4_cache, rp);
	vn_invalid(vp);
	vn_free(vp);
	VFS_RELE(vfsp);
}
コード例 #13
0
ファイル: namevno.c プロジェクト: apprisi/illumos-gate
/* ARGSUSED */
static void
nm_inactive(vnode_t *vp, cred_t *crp, caller_context_t *ct)
{
	struct namenode *nodep = VTONM(vp);
	vfs_t *vfsp = vp->v_vfsp;

	mutex_enter(&vp->v_lock);
	ASSERT(vp->v_count >= 1);
	if (--vp->v_count != 0) {
		mutex_exit(&vp->v_lock);
		return;
	}
	mutex_exit(&vp->v_lock);
	if (!(nodep->nm_flag & NMNMNT)) {
		ASSERT(nodep->nm_filep->f_vnode == nodep->nm_filevp);
		(void) closef(nodep->nm_filep);
	}
	vn_invalid(vp);
	vn_free(vp);
	if (vfsp != &namevfs)
		VFS_RELE(vfsp);
	namenodeno_free(nodep->nm_vattr.va_nodeid);
	kmem_free(nodep, sizeof (struct namenode));
}
コード例 #14
0
void
pc_rele(struct pcnode *pcp)
{
	struct pcfs *fsp;
	struct vnode *vp;
	int err;

	vp = PCTOV(pcp);
	PC_DPRINTF1(8, "pc_rele vp=0x%p\n", (void *)vp);

	fsp = VFSTOPCFS(vp->v_vfsp);
	ASSERT(fsp->pcfs_flags & PCFS_LOCKED);

	rw_enter(&pcnodes_lock, RW_WRITER);
	pcp->pc_flags |= PC_RELEHOLD;

retry:
	if (vp->v_type != VDIR && (pcp->pc_flags & PC_INVAL) == 0) {
		/*
		 * If the file was removed while active it may be safely
		 * truncated now.
		 */

		if (pcp->pc_entry.pcd_filename[0] == PCD_ERASED) {
			(void) pc_truncate(pcp, 0);
		} else if (pcp->pc_flags & PC_CHG) {
			(void) pc_nodeupdate(pcp);
		}
		err = syncpcp(pcp, B_INVAL);
		if (err) {
			(void) syncpcp(pcp, B_INVAL | B_FORCE);
		}
	}
	if (vn_has_cached_data(vp)) {
		/*
		 * pvn_vplist_dirty will abort all old pages
		 */
		(void) pvn_vplist_dirty(vp, (u_offset_t)0,
		    pcfs_putapage, B_INVAL, (struct cred *)NULL);
	}

	(void) pc_syncfat(fsp);
	mutex_enter(&vp->v_lock);
	if (vn_has_cached_data(vp)) {
		mutex_exit(&vp->v_lock);
		goto retry;
	}
	ASSERT(!vn_has_cached_data(vp));

	vp->v_count--;  /* release our hold from vn_rele */
	if (vp->v_count > 0) { /* Is this check still needed? */
		PC_DPRINTF1(3, "pc_rele: pcp=0x%p HELD AGAIN!\n", (void *)pcp);
		mutex_exit(&vp->v_lock);
		pcp->pc_flags &= ~PC_RELEHOLD;
		rw_exit(&pcnodes_lock);
		return;
	}

	remque(pcp);
	rw_exit(&pcnodes_lock);
	/*
	 * XXX - old code had a check for !(pcp->pc_flags & PC_INVAL)
	 * here. Seems superfluous/incorrect, but then earlier on PC_INVAL
	 * was never set anywhere in PCFS. Now it is, and we _have_ to drop
	 * the file reference here. Else, we'd screw up umount/modunload.
	 */
	if ((vp->v_type == VREG)) {
		fsp->pcfs_frefs--;
	}
	fsp->pcfs_nrefs--;
	VFS_RELE(vp->v_vfsp);

	if (fsp->pcfs_nrefs < 0) {
		panic("pc_rele: nrefs count");
	}
	if (fsp->pcfs_frefs < 0) {
		panic("pc_rele: frefs count");
	}

	mutex_exit(&vp->v_lock);
	vn_invalid(vp);
	vn_free(vp);
	kmem_free(pcp, sizeof (struct pcnode));
}
コード例 #15
0
ファイル: xattr.c プロジェクト: pcd1193182/openzfs
/*
 * Get the XATTR dir for some file or directory.
 * See vnode.c: fop_lookup()
 *
 * Note this only gets the GFS XATTR directory.  We'll get the
 * real XATTR directory later, in xattr_dir_realdir.
 */
int
xattr_dir_lookup(vnode_t *dvp, vnode_t **vpp, int flags, cred_t *cr)
{
	int error = 0;

	*vpp = NULL;

	if (dvp->v_type != VDIR && dvp->v_type != VREG)
		return (EINVAL);

	mutex_enter(&dvp->v_lock);

	/*
	 * If we're already in sysattr space, don't allow creation
	 * of another level of sysattrs.
	 */
	if (dvp->v_flag & V_SYSATTR) {
		mutex_exit(&dvp->v_lock);
		return (EINVAL);
	}

	if (dvp->v_xattrdir != NULL) {
		*vpp = dvp->v_xattrdir;
		VN_HOLD(*vpp);
	} else {
		ulong_t val;
		int xattrs_allowed = dvp->v_vfsp->vfs_flag & VFS_XATTR;
		int sysattrs_allowed = 1;

		/*
		 * We have to drop the lock on dvp.  gfs_dir_create will
		 * grab it for a VN_HOLD.
		 */
		mutex_exit(&dvp->v_lock);

		/*
		 * If dvp allows xattr creation, but not sysattr
		 * creation, return the real xattr dir vp. We can't
		 * use the vfs feature mask here because _PC_SATTR_ENABLED
		 * has vnode-level granularity (e.g. .zfs).
		 */
		error = VOP_PATHCONF(dvp, _PC_SATTR_ENABLED, &val, cr, NULL);
		if (error != 0 || val == 0)
			sysattrs_allowed = 0;

		if (!xattrs_allowed && !sysattrs_allowed)
			return (EINVAL);

		if (!sysattrs_allowed) {
			struct pathname pn;
			char *nm = "";

			error = pn_get(nm, UIO_SYSSPACE, &pn);
			if (error)
				return (error);
			error = VOP_LOOKUP(dvp, nm, vpp, &pn,
			    flags|LOOKUP_HAVE_SYSATTR_DIR, rootvp, cr, NULL,
			    NULL, NULL);
			pn_free(&pn);
			return (error);
		}

		/*
		 * Note that we act as if we were given CREATE_XATTR_DIR,
		 * but only for creation of the GFS directory.
		 */
		*vpp = gfs_dir_create(
		    sizeof (xattr_dir_t), dvp, xattr_dir_ops, xattr_dirents,
		    xattrdir_do_ino, MAXNAMELEN, NULL, xattr_lookup_cb);
		mutex_enter(&dvp->v_lock);
		if (dvp->v_xattrdir != NULL) {
			/*
			 * We lost the race to create the xattr dir.
			 * Destroy this one, use the winner.  We can't
			 * just call VN_RELE(*vpp), because the vnode
			 * is only partially initialized.
			 */
			gfs_dir_t *dp = (*vpp)->v_data;

			ASSERT((*vpp)->v_count == 1);
			vn_free(*vpp);

			mutex_destroy(&dp->gfsd_lock);
			kmem_free(dp->gfsd_static,
			    dp->gfsd_nstatic * sizeof (gfs_dirent_t));
			kmem_free(dp, dp->gfsd_file.gfs_size);

			/*
			 * There is an implied VN_HOLD(dvp) here.  We should
			 * be doing a VN_RELE(dvp) to clean up the reference
			 * from *vpp, and then a VN_HOLD(dvp) for the new
			 * reference.  Instead, we just leave the count alone.
			 */

			*vpp = dvp->v_xattrdir;
			VN_HOLD(*vpp);
		} else {
			(*vpp)->v_flag |= (V_XATTRDIR|V_SYSATTR);
			dvp->v_xattrdir = *vpp;
		}
	}
	mutex_exit(&dvp->v_lock);

	return (error);
}
コード例 #16
0
ファイル: lofs_subr.c プロジェクト: mikess/illumos-gate
/*
 * Remove a lnode from the table
 */
void
freelonode(lnode_t *lp)
{
    lnode_t *lt;
    lnode_t *ltprev = NULL;
    struct lfsnode *lfs, *nextlfs;
    struct vfs *vfsp;
    struct vnode *vp = ltov(lp);
    struct vnode *realvp = realvp(vp);
    struct loinfo *li = vtoli(vp->v_vfsp);

#ifdef LODEBUG
    lo_dprint(4, "freelonode lp %p hash %d\n",
              lp, ltablehash(lp->lo_vp, li));
#endif
    TABLE_LOCK_ENTER(lp->lo_vp, li);

    mutex_enter(&vp->v_lock);
    if (vp->v_count > 1) {
        vp->v_count--;	/* release our hold from vn_rele */
        mutex_exit(&vp->v_lock);
        TABLE_LOCK_EXIT(lp->lo_vp, li);
        return;
    }
    mutex_exit(&vp->v_lock);

    for (lt = TABLE_BUCKET(lp->lo_vp, li); lt != NULL;
            ltprev = lt, lt = lt->lo_next) {
        if (lt == lp) {
#ifdef LODEBUG
            lo_dprint(4, "freeing %p, vfsp %p\n",
                      vp, vp->v_vfsp);
#endif
            atomic_dec_32(&li->li_refct);
            vfsp = vp->v_vfsp;
            vn_invalid(vp);
            if (vfsp != li->li_mountvfs) {
                mutex_enter(&li->li_lfslock);
                /*
                 * Check for unused lfs
                 */
                lfs = li->li_lfs;
                while (lfs != NULL) {
                    nextlfs = lfs->lfs_next;
                    if (vfsp == &lfs->lfs_vfs) {
                        lfs_rele(lfs, li);
                        break;
                    }
                    if (lfs->lfs_vfs.vfs_count == 1) {
                        /*
                         * Lfs is idle
                         */
                        freelfsnode(lfs, li);
                    }
                    lfs = nextlfs;
                }
                mutex_exit(&li->li_lfslock);
            }
            if (ltprev == NULL) {
                TABLE_BUCKET(lt->lo_vp, li) = lt->lo_next;
            } else {
                ltprev->lo_next = lt->lo_next;
            }
            TABLE_COUNT(lt->lo_vp, li)--;
            TABLE_LOCK_EXIT(lt->lo_vp, li);
            kmem_cache_free(lnode_cache, lt);
            vn_free(vp);
            VN_RELE(realvp);
            return;
        }
    }
    panic("freelonode");
    /*NOTREACHED*/
}
コード例 #17
0
ファイル: gfs.c プロジェクト: pcd1193182/openzfs
/*
 * gfs_file_inactive()
 *
 * Called from the VOP_INACTIVE() routine.  If necessary, this routine will
 * remove the given vnode from the parent directory and clean up any references
 * in the VFS layer.
 *
 * If the vnode was not removed (due to a race with vget), then NULL is
 * returned.  Otherwise, a pointer to the private data is returned.
 */
void *
gfs_file_inactive(vnode_t *vp)
{
	int i;
	gfs_dirent_t *ge = NULL;
	gfs_file_t *fp = vp->v_data;
	gfs_dir_t *dp = NULL;
	void *data;

	if (fp->gfs_parent == NULL || (vp->v_flag & V_XATTRDIR))
		goto found;

	dp = fp->gfs_parent->v_data;

	/*
	 * First, see if this vnode is cached in the parent.
	 */
	gfs_dir_lock(dp);

	/*
	 * Find it in the set of static entries.
	 */
	for (i = 0; i < dp->gfsd_nstatic; i++)  {
		ge = &dp->gfsd_static[i];

		if (ge->gfse_vnode == vp)
			goto found;
	}

	/*
	 * If 'ge' is NULL, then it is a dynamic entry.
	 */
	ge = NULL;

found:
	if (vp->v_flag & V_XATTRDIR) {
		mutex_enter(&fp->gfs_parent->v_lock);
	}
	mutex_enter(&vp->v_lock);
	if (vp->v_count == 1) {
		/*
		 * Really remove this vnode
		 */
		data = vp->v_data;
		if (ge != NULL) {
			/*
			 * If this was a statically cached entry, simply set the
			 * cached vnode to NULL.
			 */
			ge->gfse_vnode = NULL;
		}
		if (vp->v_flag & V_XATTRDIR) {
			fp->gfs_parent->v_xattrdir = NULL;
			mutex_exit(&fp->gfs_parent->v_lock);
		}
		mutex_exit(&vp->v_lock);

		/*
		 * Free vnode and release parent
		 */
		if (fp->gfs_parent) {
			if (dp) {
				gfs_dir_unlock(dp);
			}
			VN_RELE(fp->gfs_parent);
		} else {
			ASSERT(vp->v_vfsp != NULL);
			VFS_RELE(vp->v_vfsp);
		}
		vn_free(vp);
	} else {
		VN_RELE_LOCKED(vp);
		data = NULL;
		mutex_exit(&vp->v_lock);
		if (vp->v_flag & V_XATTRDIR) {
			mutex_exit(&fp->gfs_parent->v_lock);
		}
		if (dp)
			gfs_dir_unlock(dp);
	}

	return (data);
}
コード例 #18
0
ファイル: xmem_vnops.c プロジェクト: andreiw/polaris
/* ARGSUSED */
static void
xmem_inactive(struct vnode *vp, struct cred *cred)
{
	struct xmemnode *xp = (struct xmemnode *)VTOXN(vp);
	struct xmount *xm = (struct xmount *)VFSTOXM(vp->v_vfsp);

	rw_enter(&xp->xn_rwlock, RW_WRITER);
top:
	mutex_enter(&xp->xn_tlock);
	mutex_enter(&vp->v_lock);
	ASSERT(vp->v_count >= 1);

	/*
	 * If we don't have the last hold or the link count is non-zero,
	 * there's little to do -- just drop our hold.
	 */
	if (vp->v_count > 1 || xp->xn_nlink != 0) {
		vp->v_count--;
		mutex_exit(&vp->v_lock);
		mutex_exit(&xp->xn_tlock);
		rw_exit(&xp->xn_rwlock);
		return;
	}

	/*
	 * We have the last hold *and* the link count is zero, so this
	 * xmemnode is dead from the filesystem's viewpoint.  However,
	 * if the xmemnode has any pages associated with it (i.e. if it's
	 * a normal file with non-zero size), the xmemnode can still be
	 * discovered by pageout or fsflush via the page vnode pointers.
	 * In this case we must drop all our locks, truncate the xmemnode,
	 * and try the whole dance again.
	 */
	if (xp->xn_size != 0) {
		if (xp->xn_type == VREG) {
			mutex_exit(&vp->v_lock);
			mutex_exit(&xp->xn_tlock);
			rw_enter(&xp->xn_contents, RW_WRITER);
			(void) xmemnode_trunc(xm, xp, 0);
			rw_exit(&xp->xn_contents);
			ASSERT(xp->xn_size == 0);
			ASSERT(xp->xn_nblocks == 0);
			goto top;
		}
		if (xp->xn_type == VLNK)
			xmem_memfree(xp->xn_symlink, xp->xn_size + 1);
	}

	mutex_exit(&vp->v_lock);
	mutex_exit(&xp->xn_tlock);
	mutex_enter(&xm->xm_contents);
	if (xp->xn_forw == NULL)
		xm->xm_rootnode->xn_back = xp->xn_back;
	else
		xp->xn_forw->xn_back = xp->xn_back;
	xp->xn_back->xn_forw = xp->xn_forw;
	mutex_exit(&xm->xm_contents);
	rw_exit(&xp->xn_rwlock);
	rw_destroy(&xp->xn_rwlock);
	mutex_destroy(&xp->xn_tlock);
	vn_free(xp->xn_vnode);
	xmem_memfree(xp, sizeof (struct xmemnode));
}
コード例 #19
0
ファイル: vfsops.c プロジェクト: AlissonGiron/open-vm-tools
int
VMBlockVnodeGet(struct vnode **vpp,        // OUT: Filled with address of new vnode
                struct vnode *realVp,      // IN:  Real vnode (assumed held)
                const char *name,          // IN:  Relative name of the file
                size_t nameLen,            // IN:  Size of name
                struct vnode *dvp,         // IN:  Parent directory's vnode
                struct vfs *vfsp,          // IN:  Filesystem structure
                Bool isRoot)               // IN:  If is root directory of fs
{
   VMBlockVnodeInfo *vip;
   struct vnode *vp;
   char *curr;
   int ret;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockVnodeGet: entry\n");

   ASSERT(vpp);
   ASSERT(realVp);
   ASSERT(vfsp);
   ASSERT(name);
   ASSERT(dvp || isRoot);

   vp = vn_alloc(KM_SLEEP);
   if (!vp) {
      return ENOMEM;
   }

   vip = kmem_zalloc(sizeof *vip, KM_SLEEP);
   vp->v_data = (void *)vip;

   /*
    * Store the path that this file redirects to.  For the root vnode we just
    * store the provided path, but for all others we first copy in the parent
    * directory's path.
    */
   curr = vip->name;

   if (!isRoot) {
      VMBlockVnodeInfo *dvip = VPTOVIP(dvp);
      if (dvip->nameLen + 1 + nameLen + 1 >= sizeof vip->name) {
         ret = ENAMETOOLONG;
         goto error;
      }

      memcpy(vip->name, dvip->name, dvip->nameLen);
      vip->name[dvip->nameLen] = '/';
      curr = vip->name + dvip->nameLen + 1;
   }

   if (nameLen + 1 > (sizeof vip->name - (curr - vip->name))) {
      ret = ENAMETOOLONG;
      goto error;
   }

   memcpy(curr, name, nameLen);
   curr[nameLen] = '\0';
   vip->nameLen = nameLen + (curr - vip->name);

   /*
    * We require the caller to have held realVp so we don't need VN_HOLD() it
    * here here even though we VN_RELE() this vnode in VMBlockVnodePut().
    * Despite seeming awkward, this is more natural since the function that our
    * caller obtained realVp from provided a held vnode.
    */
   vip->realVnode = realVp;

   /*
    * Now we'll initialize the vnode.  We need to set the file type, vnode
    * operations, flags, filesystem pointer, reference count, and device.
    */
   /* The root directory is our only directory; the rest are symlinks. */
   vp->v_type = isRoot ? VDIR : VLNK;

   vn_setops(vp, vmblockVnodeOps);

   vp->v_flag  = VNOMAP | VNOMOUNT | VNOSWAP | isRoot ? VROOT : 0;
   vp->v_vfsp  = vfsp;
   vp->v_rdev  = NODEV;

   /* Fill in the provided address with the new vnode. */
   *vpp = vp;

   return 0;

error:
   kmem_free(vip, sizeof *vip);
   vn_free(vp);
   return ret;
}
コード例 #20
0
ファイル: iumfs_node.c プロジェクト: kaizawa/iumfs
/************************************************************************
 * iumfs_free_node()
 *
 *   指定された vnode および iumnode を解放する
 *
 *     1. iumnode に関連づいたリソースを解放
 *     2. iumnode 構造体を解放
 *     3. vnode 構造体を解放
 *
 *   これが呼ばれるのは、iumfs_inactive() もしくは iumfs_unmount() 経由の
 *   iumfs_free_all_node() だけ。つまり、v_count が 1(未参照状態)である
 *   事が確かな場合だけ。
 *
 * 引数:
 *
 *     vp: 解放する vnode 構造体のポインタ
 *     cr    : システムコールを呼び出したユーザのクレデンシャル
 *
 * 戻り値:
 *     無し
 *
 ************************************************************************/
void
iumfs_free_node(vnode_t *vp, struct cred *cr)
{
    iumnode_t *inp; // ファイルシステム型依存のノード情報(iumnode構造体)
    vnode_t *rootvp; // ファイルシステムのルートディレクトリの vnode。
    iumfs_t *iumfsp; // ファイルシステム型依存のプライベートデータ構造体
    vfs_t *vfsp; // ファイルシステム構造体
    int err;

    DEBUG_PRINT((CE_CONT, "iumfs_free_node is called\n"));

    iumfsp = VNODE2IUMFS(vp);
    vfsp = VNODE2VFS(vp);
    inp = VNODE2IUMNODE(vp);

    DEBUG_PRINT((CE_CONT, "iumfs_free_node: vnode=%p, vp->v_count=%d\n", vp, vp->v_count));

    /*
     * 最初にノードリンクリストから iumnode をはずす。誰かが利用中(EBUSY)だったらリターン。
     * 仮にノードリストに入っていなかったとしても、(ありえないはずだが) vnode のフリーは行う。
     */
    if((err = iumfs_remove_node_from_list(vfsp, vp)) != 0){
        if (err == ENOENT)
            cmn_err(CE_CONT, "iumfs_free_node: can't find vnode in the list. Free it anyway.\n");
        else
            return;
    }

    // debug 用
    rootvp = VNODE2ROOT(vp);
    if (rootvp != NULL && VN_CMP(rootvp, vp) != 0) {
        DEBUG_PRINT((CE_CONT, "iumfs_free_node: rootvnode is being freed\n"));
        mutex_enter(&(iumfsp->iumfs_lock));
        iumfsp->rootvnode = NULL;
        mutex_exit(&(iumfsp->iumfs_lock));
    }

    /*
     * もし iumnode にデータ(ディレクトリエントリ等)
     * を含んでいたらそれらも解放する。
     */
    if (inp->data != NULL) {
        kmem_free(inp->data, inp->dlen);
    }

    /*
     * この vnode に関連した page を無効にする
     */
    err = pvn_vplist_dirty(vp, 0, iumfs_putapage, B_INVAL, cr);
    DEBUG_PRINT((CE_CONT, "iumfs_free_node: pvn_vplist_dirty returned with (%d)\n", err));
    if (vn_has_cached_data(vp)) {
        cmn_err(CE_WARN, "iumfs_free_node: vnode still have cached\n");        
    } 

    // iumnode を解放
    mutex_destroy(&(inp)->i_dlock);
    rw_destroy(&(inp)->i_listlock);
    kmem_free(inp, sizeof (iumnode_t));

    // vnode を解放
#ifdef SOL10
    vn_free(vp);
#else
    mutex_destroy(&(vp)->v_lock);
    kmem_free(vp, sizeof (vnode_t));
#endif                
    DEBUG_PRINT((CE_CONT, "iumfs_free_node: return\n"));
    return;
}
コード例 #21
0
ファイル: iumfs_node.c プロジェクト: kaizawa/iumfs
/************************************************************************
 * iumfs_alloc_node()
 *
 *   新しい vnode 及び iumnode を確保する。
 *
 * 引数:
 *     vfsp   : vfs 構造体
 *     vpp    : 呼び出し側から渡された vnode 構造体のポインタのアドレス
 *     flag   : 作成する vnode のフラグ(VROOT, VISSWAP 等)
 *     type   : 作成する vnode のタイプ(VDIR, VREG 等)
 *     nodeid : 作成する vnode のノード番号(0の場合自動割当)
 *
 * 戻り値
 *    正常時   : SUCCESS(=0)
 *    エラー時 : 0 以外
 *
 ************************************************************************/
int
iumfs_alloc_node(vfs_t *vfsp, vnode_t **nvpp, uint_t flag, enum vtype type, ino_t nodeid)
{
    vnode_t *vp;
    iumnode_t *inp;
    iumfs_t *iumfsp; // ファイルシステム型依存のプライベートデータ構造体

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node is called\n"));
    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: type=%d\n",type));
    
    iumfsp = VFS2IUMFS(vfsp);

    // vnode 構造体を確保
#ifdef SOL10
    // Solaris 10 では直接 vnode 構造体を alloc してはいけない。
    vp = vn_alloc(KM_NOSLEEP);
#else
    // Solaris 9 ではファイルシステム自身で vnode 構造体を alloc する。
    vp = (vnode_t *) kmem_zalloc(sizeof (vnode_t), KM_NOSLEEP);
#endif    

    //ファイルシステム型依存のノード情報(iumnode 構造体)を確保
    inp = (iumnode_t *) kmem_zalloc(sizeof (iumnode_t), KM_NOSLEEP);

    /*
     * どちらかでも確保できなかったら ENOMEM を返す
     */
    if (vp == NULL || inp == NULL) {
        cmn_err(CE_WARN, "iumfs_alloc_node: kmem_zalloc failed\n");
        if (vp != NULL)
#ifdef SOL10
            vn_free(vp);
#else        
            kmem_free(vp, sizeof (vnode_t));
#endif            
        if (inp != NULL)
            kmem_free(inp, sizeof (iumnode_t));
        DEBUG_PRINT((CE_CONT, "iumfs_alloc_node return(ENOMEM)\n"));
        return (ENOMEM);
    }

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: allocated vnode = 0x%p\n", vp));

    /*
     * 確保した vnode を初期化
     * VN_INIT マクロの中で、v_count の初期値を 1 にセットする。
     * これによって、ファイルシステムの意図しないタイミングで iumfs_inactive()
     * が呼ばれてしまうのを防ぐ。
     */
    VN_INIT(vp, vfsp, type, 0);

    // ファイルシステム型依存の vnode 操作構造体のアドレスをセット
#ifdef SOL10
    vn_setops(vp, iumfs_vnodeops);
#else        
    vp->v_op = &iumfs_vnodeops;
#endif

    // v_flag にフラグをセット
    vp->v_flag &= flag;

    /*
     * 確保した iumnode を初期化 (IN_INIT マクロは使わない)
     */
    mutex_init(&(inp)->i_dlock, NULL, MUTEX_DEFAULT, NULL);
    inp->vattr.va_mask = AT_ALL;
    inp->vattr.va_uid = 0;
    inp->vattr.va_gid = 0;
    inp->vattr.va_blksize = BLOCKSIZE;
    inp->vattr.va_nlink = 1;
    inp->vattr.va_rdev = 0;
    rw_init(&(inp)->i_listlock,NULL,RW_DRIVER,NULL);
#ifdef SOL10
#else    
    inp->vattr.va_vcode = 1;
#endif
    /*
     * vattr の va_fsid は dev_t(=ulong_t), これに対して vfs の
     * vfs_fsid は int 型の配列(int[2])を含む構造体。
     * なので、iumfs_mount() でもとめたデバイス番号を入れておく。
     */
    inp->vattr.va_fsid = vfsp->vfs_dev;
    inp->vattr.va_type = type;
    inp->vattr.va_atime =      \
    inp->vattr.va_ctime =      \
    inp->vattr.va_mtime = iumfs_get_current_time();

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: va_fsid = 0x%x\n", inp->vattr.va_fsid));

    /*
     * vnode に iumnode 構造体へのポインタをセット
     * 逆に、iumnode にも vnode 構造体へのポインタをセット
     */
    vp->v_data = (caddr_t) inp;
    inp->vnode = vp;

    /*
     * ノード番号(iノード番号)をセット。
     * もし指定されている場合はそれを使い、指定が無い場合には
     * 単純に1づつ増やしていく。
     */
    if( (inp->vattr.va_nodeid = nodeid) == 0) {
        mutex_enter(&(iumfsp->iumfs_lock));
        inp->vattr.va_nodeid = ++(iumfsp->iumfs_last_nodeid);
        mutex_exit(&(iumfsp->iumfs_lock));
    }

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: new nodeid = %d \n", inp->vattr.va_nodeid));

    //新しい iumnode をノードのリンクリストに新規のノードを追加
    iumfs_add_node_to_list(vfsp, vp);

    // 渡された vnode 構造体のポインタに確保した vnode のアドレスをセット
    *nvpp = vp;
    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: return(%d)\n", SUCCESS));
    return (SUCCESS);
}
コード例 #22
0
/*
 * The disk has been changed!
 */
void
pc_diskchanged(struct pcfs *fsp)
{
	struct pcnode	*pcp, *npcp = NULL;
	struct pchead	*hp;
	struct vnode	*vp;
	extern vfs_t	EIO_vfs;
	struct vfs	*vfsp;

	/*
	 * Eliminate all pcnodes (dir & file) associated with this fs.
	 * If the node is internal, ie, no references outside of
	 * pcfs itself, then release the associated vnode structure.
	 * Invalidate the in core FAT.
	 * Invalidate cached data blocks and blocks waiting for I/O.
	 */
	PC_DPRINTF1(1, "pc_diskchanged fsp=0x%p\n", (void *)fsp);

	vfsp = PCFSTOVFS(fsp);

	for (hp = pcdhead; hp < &pcdhead[NPCHASH]; hp++) {
		for (pcp = hp->pch_forw;
		    pcp != (struct pcnode *)hp; pcp = npcp) {
			npcp = pcp -> pc_forw;
			vp = PCTOV(pcp);
			if ((vp->v_vfsp == vfsp) &&
			    !(pcp->pc_flags & PC_RELEHOLD)) {
				mutex_enter(&(vp)->v_lock);
				if (vp->v_count > 0) {
					mutex_exit(&(vp)->v_lock);
					continue;
				}
				mutex_exit(&(vp)->v_lock);
				VN_HOLD(vp);
				remque(pcp);
				vp->v_data = NULL;
				vp->v_vfsp = &EIO_vfs;
				vp->v_type = VBAD;
				VN_RELE(vp);
				if (!(pcp->pc_flags & PC_EXTERNAL)) {
					(void) pvn_vplist_dirty(vp,
					    (u_offset_t)0, pcfs_putapage,
					    B_INVAL | B_TRUNC,
					    (struct cred *)NULL);
					vn_free(vp);
				}
				kmem_free(pcp, sizeof (struct pcnode));
				fsp->pcfs_nrefs --;
				VFS_RELE(vfsp);
			}
		}
	}
	for (hp = pcfhead; fsp->pcfs_frefs && hp < &pcfhead[NPCHASH]; hp++) {
		for (pcp = hp->pch_forw; fsp->pcfs_frefs &&
		    pcp != (struct pcnode *)hp; pcp = npcp) {
			npcp = pcp -> pc_forw;
			vp = PCTOV(pcp);
			if ((vp->v_vfsp == vfsp) &&
			    !(pcp->pc_flags & PC_RELEHOLD)) {
				mutex_enter(&(vp)->v_lock);
				if (vp->v_count > 0) {
					mutex_exit(&(vp)->v_lock);
					continue;
				}
				mutex_exit(&(vp)->v_lock);
				VN_HOLD(vp);
				remque(pcp);
				vp->v_data = NULL;
				vp->v_vfsp = &EIO_vfs;
				vp->v_type = VBAD;
				VN_RELE(vp);
				if (!(pcp->pc_flags & PC_EXTERNAL)) {
					(void) pvn_vplist_dirty(vp,
					    (u_offset_t)0, pcfs_putapage,
					    B_INVAL | B_TRUNC,
					    (struct cred *)NULL);
					vn_free(vp);
				}
				kmem_free(pcp, sizeof (struct pcnode));
				fsp->pcfs_frefs--;
				fsp->pcfs_nrefs--;
				VFS_RELE(vfsp);
			}
		}
	}
#ifdef undef
	if (fsp->pcfs_frefs) {
		rw_exit(&pcnodes_lock);
		panic("pc_diskchanged: frefs");
	}
	if (fsp->pcfs_nrefs) {
		rw_exit(&pcnodes_lock);
		panic("pc_diskchanged: nrefs");
	}
#endif
	if (!(vfsp->vfs_flag & VFS_UNMOUNTED) &&
	    fsp->pcfs_fatp != (uchar_t *)0) {
		pc_invalfat(fsp);
	} else {
		binval(fsp->pcfs_xdev);
	}
}
コード例 #23
0
ファイル: lzfs_snap.c プロジェクト: glycerine/lzfs
void
lzfs_zfsctl_create(vfs_t *vfsp)
{
	vnode_t *vp_zfsctl_dir = NULL, *vp_snap_dir = NULL;
	struct dentry *zfsctl_dir_dentry = NULL, *snap_dir_dentry = NULL;
	struct inode *inode_ctldir = NULL, *inode_snapdir = NULL;
	timestruc_t now;

	inode_ctldir = iget_locked(vfsp->vfs_super, LZFS_ZFSCTL_INO_ROOT);
	ASSERT(inode_ctldir != NULL);
	vp_zfsctl_dir = LZFS_ITOV(inode_ctldir);
	gethrestime(&now);
	ASSERT(inode_ctldir->i_state & I_NEW);
	mutex_enter(&vp_zfsctl_dir->v_lock);
	vp_zfsctl_dir->v_count = 1;
	VN_SET_VFS_TYPE_DEV(vp_zfsctl_dir, vfsp, VDIR, 0);
	bcopy(&now, &(vp_zfsctl_dir->v_inode.i_ctime), 
			sizeof (timestruc_t));
	bcopy(&now, &(vp_zfsctl_dir->v_inode.i_atime),
	      sizeof (timestruc_t));
	bcopy(&now,&(vp_zfsctl_dir->v_inode.i_mtime),sizeof (timestruc_t));
#ifdef HAVE_CRED_STRUCT
	inode_ctldir->i_uid = current->cred->uid;
	inode_ctldir->i_gid = current->cred->gid;
#else
	inode_ctldir->i_uid = current->uid;
	inode_ctldir->i_gid = current->gid;
#endif
	inode_ctldir->i_version = 1;
	inode_ctldir->i_mode |= (S_IFDIR | S_IRWXU);
	inode_ctldir->i_op = &zfsctl_dir_inode_operations;
	inode_ctldir->i_fop = &zfsctl_dir_file_operations;
	ASSERT(vfsp);
	inode_ctldir->i_sb = vfsp->vfs_super;
	ASSERT(vfsp->vfs_super);
	ASSERT(vfsp->vfs_super->s_root);
	unlock_new_inode(inode_ctldir);
	zfsctl_dir_dentry = d_alloc_name(vfsp->vfs_super->s_root, 
					 ZFS_CTLDIR_NAME);
	if (zfsctl_dir_dentry) {
	  d_add(zfsctl_dir_dentry, LZFS_VTOI(vp_zfsctl_dir));
	  vfsp->zfsctl_dir_dentry = zfsctl_dir_dentry;
	} else {
		goto dentry_out;
	}
	set_zfsvfs_ctldir(vfsp->vfs_data, vp_zfsctl_dir);
	mutex_exit(&vp_zfsctl_dir->v_lock);
	inode_snapdir = iget_locked(vfsp->vfs_super, LZFS_ZFSCTL_INO_SNAPDIR);
	ASSERT(inode_snapdir != NULL);
	ASSERT(inode_snapdir->i_state & I_NEW);
	vp_snap_dir = LZFS_ITOV(inode_snapdir);
	gethrestime(&now);
	vfsp->vfs_snap_dir = vp_snap_dir;
	mutex_enter(&vp_snap_dir->v_lock);
	vp_snap_dir->v_count = 1;
	VN_SET_VFS_TYPE_DEV(vp_snap_dir, vfsp, VDIR, 0);
	bcopy(&now,&(vp_snap_dir->v_inode.i_ctime),sizeof (timestruc_t));
	bcopy(&now,&(vp_snap_dir->v_inode.i_atime),sizeof (timestruc_t));
	bcopy(&now,&(vp_snap_dir->v_inode.i_mtime),sizeof (timestruc_t));
#ifdef HAVE_CRED_STRUCT
	inode_snapdir->i_uid = current->cred->uid;
	inode_snapdir->i_gid = current->cred->gid;
#else
	inode_snapdir->i_uid = current->uid;
	inode_snapdir->i_gid = current->gid;
#endif
	inode_snapdir->i_version = 1;
	inode_snapdir->i_mode |= (S_IFDIR | S_IRWXU);
	inode_snapdir->i_op = &snap_dir_inode_operations;
	inode_snapdir->i_fop = &snap_dir_file_operations;
	inode_snapdir->i_sb = vfsp->vfs_super;
	unlock_new_inode(inode_snapdir);
	ASSERT(zfsctl_dir_dentry);
	snap_dir_dentry = d_alloc_name(zfsctl_dir_dentry, ZFS_SNAPDIR_NAME);
	if (snap_dir_dentry) {
		d_add(snap_dir_dentry, LZFS_VTOI(vp_snap_dir));
		vfsp->snap_dir_dentry = snap_dir_dentry;
		mutex_exit(&vp_snap_dir->v_lock);
	} else {
		goto dentry_out;
	}
	return;
dentry_out:
	// free vnode
	vn_free(vp_zfsctl_dir);
	ASSERT(0 && "TODO");
}