Example #1
0
/*
 * Return a looped back vnode for the given vnode.
 * If no lnode exists for this vnode create one and put it
 * in a table hashed by vnode.  If the lnode for
 * this vnode is already in the table return it (ref count is
 * incremented by lfind).  The lnode will be flushed from the
 * table when lo_inactive calls freelonode.  The creation of
 * a new lnode can be forced via the LOF_FORCE flag even if
 * the vnode exists in the table.  This is used in the creation
 * of a terminating lnode when looping is detected.  A unique
 * lnode is required for the correct evaluation of the current
 * working directory.
 * NOTE: vp is assumed to be a held vnode.
 */
struct vnode *
makelonode(struct vnode *vp, struct loinfo *li, int flag)
{
    lnode_t *lp, *tlp;
    struct vfs *vfsp;
    vnode_t *nvp;

    lp = NULL;
    TABLE_LOCK_ENTER(vp, li);
    if (flag != LOF_FORCE)
        lp = lfind(vp, li);
    if ((flag == LOF_FORCE) || (lp == NULL)) {
        /*
         * Optimistically assume that we won't need to sleep.
         */
        lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP);
        nvp = vn_alloc(KM_NOSLEEP);
        if (lp == NULL || nvp == NULL) {
            TABLE_LOCK_EXIT(vp, li);
            /* The lnode allocation may have succeeded, save it */
            tlp = lp;
            if (tlp == NULL) {
                tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP);
            }
            if (nvp == NULL) {
                nvp = vn_alloc(KM_SLEEP);
            }
            lp = NULL;
            TABLE_LOCK_ENTER(vp, li);
            if (flag != LOF_FORCE)
                lp = lfind(vp, li);
            if (lp != NULL) {
                kmem_cache_free(lnode_cache, tlp);
                vn_free(nvp);
                VN_RELE(vp);
                goto found_lnode;
            }
            lp = tlp;
        }
        atomic_inc_32(&li->li_refct);
        vfsp = makelfsnode(vp->v_vfsp, li);
        lp->lo_vnode = nvp;
        VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
        nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN));
        vn_setops(nvp, lo_vnodeops);
        nvp->v_data = (caddr_t)lp;
        lp->lo_vp = vp;
        lp->lo_looping = 0;
        lsave(lp, li);
        vn_exists(vp);
    } else {
        VN_RELE(vp);
    }

found_lnode:
    TABLE_LOCK_EXIT(vp, li);
    return (ltov(lp));
}
Example #2
0
/*
 * gfs_file_create(): create a new GFS file
 *
 *   size	- size of private data structure (v_data)
 *   pvp	- parent vnode (GFS directory)
 *   ops	- vnode operations vector
 *
 * In order to use this interface, the parent vnode must have been created by
 * gfs_dir_create(), and the private data stored in v_data must have a
 * 'gfs_file_t' as its first field.
 *
 * Given these constraints, this routine will automatically:
 *
 * 	- Allocate v_data for the vnode
 * 	- Initialize necessary fields in the vnode
 * 	- Hold the parent
 */
vnode_t *
gfs_file_create(size_t size, vnode_t *pvp, vnodeops_t *ops)
{
	gfs_file_t *fp;
	vnode_t *vp;

	/*
	 * Allocate vnode and internal data structure
	 */
	fp = kmem_zalloc(size, KM_SLEEP);
	vp = vn_alloc(KM_SLEEP);

	/*
	 * Set up various pointers
	 */
	fp->gfs_vnode = vp;
	fp->gfs_parent = pvp;
	vp->v_data = fp;
	fp->gfs_size = size;
	fp->gfs_type = GFS_FILE;

	/*
	 * Initialize vnode and hold parent.
	 */
	vn_setops(vp, ops);
	if (pvp) {
		VN_SET_VFS_TYPE_DEV(vp, pvp->v_vfsp, VREG, 0);
		VN_HOLD(pvp);
	}

	return (vp);
}
/*ARGSUSED*/
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
	znode_t *zp = buf;

	/* ZFSFUSE */
	/* ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs)); */

	zp->z_vnode = vn_alloc(kmflags);
	if (zp->z_vnode == NULL) {
		return (-1);
	}
	ZTOV(zp)->v_data = zp;

	list_link_init(&zp->z_link_node);

	mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
	rw_init(&zp->z_map_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
	mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);

	mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zp->z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));

	zp->z_dbuf = NULL;
	zp->z_dirlocks = NULL;
	return (0);
}
Example #4
0
/*
 * Constructor/destructor routines for fifos and pipes.
 *
 * In the interest of code sharing, we define a common fifodata structure
 * which consists of a fifolock and one or two fnodes.  A fifo contains
 * one fnode; a pipe contains two.  The fifolock is shared by the fnodes,
 * each of which points to it:
 *
 *	--> -->	---------  --- ---
 *	|   |	| lock	|   |	|
 *	|   |	---------   |	|
 *	|   |	|	|  fifo	|
 *	|   --- | fnode	|   |	|
 *	|	|	|   |  pipe
 *	|	---------  ---	|
 *	|	|	|	|
 *	------- | fnode	|	|
 *		|	|	|
 *		---------      ---
 *
 * Since the fifolock is at the beginning of the fifodata structure,
 * the fifolock address is the same as the fifodata address.  Thus,
 * we can determine the fifodata address from any of its member fnodes.
 * This is essential for fifo_inactive.
 *
 * The fnode constructor is designed to handle any fifodata structure,
 * deducing the number of fnodes from the total size.  Thus, the fnode
 * constructor does most of the work for the pipe constructor.
 */
static int
fnode_constructor(void *buf, void *cdrarg, int kmflags)
{
	fifodata_t *fdp = buf;
	fifolock_t *flp = &fdp->fifo_lock;
	fifonode_t *fnp = &fdp->fifo_fnode[0];
	size_t size = (uintptr_t)cdrarg;

	mutex_init(&flp->flk_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&flp->flk_wait_cv, NULL, CV_DEFAULT, NULL);
	flp->flk_ocsync = 0;

	while ((char *)fnp < (char *)buf + size) {

		vnode_t *vp;

		vp = vn_alloc(kmflags);
		if (vp == NULL) {
			fnp->fn_vnode = NULL; /* mark for destructor */
			fnode_destructor(buf, cdrarg);
			return (-1);
		}
		fnp->fn_vnode = vp;

		fnp->fn_lock = flp;
		fnp->fn_open = 0;
		fnp->fn_dest = fnp;
		fnp->fn_mp = NULL;
		fnp->fn_count = 0;
		fnp->fn_rsynccnt = 0;
		fnp->fn_wsynccnt = 0;
		fnp->fn_wwaitcnt = 0;
		fnp->fn_insync = 0;
		fnp->fn_pcredp = NULL;
		fnp->fn_cpid = -1;
		/*
		 * 32-bit stat(2) may fail if fn_ino isn't initialized
		 */
		fnp->fn_ino = 0;

		cv_init(&fnp->fn_wait_cv, NULL, CV_DEFAULT, NULL);

		vn_setops(vp, fifo_vnodeops);
		vp->v_stream = NULL;
		vp->v_type = VFIFO;
		vp->v_data = (caddr_t)fnp;
		vp->v_flag = VNOMAP | VNOSWAP;
		vn_exists(vp);
		fnp++;
	}
	return (0);
}
Example #5
0
vnode_t *
vncache_enter(struct stat *st, vnode_t *dvp, char *name, int fd)
{
	vnode_t *old_vp;
	vnode_t *new_vp;
	vfs_t *vfs;
	char *vpath;
	avl_index_t	where;
	int len;

	/*
	 * Fill in v_path
	 * Note: fsop_root() calls with dvp=NULL
	 */
	len = strlen(name) + 1;
	if (dvp == NULL) {
		vpath = kmem_alloc(len, KM_SLEEP);
		(void) strlcpy(vpath, name, len);
		vfs = rootvfs;
	} else {
		/* add to length for parent path + "/" */
		len += (strlen(dvp->v_path) + 1);
		vpath = kmem_alloc(len, KM_SLEEP);
		(void) snprintf(vpath, len, "%s/%s", dvp->v_path, name);
		vfs = dvp->v_vfsp;
	}

	new_vp = vn_alloc(KM_SLEEP);
	new_vp->v_path = vpath;
	new_vp->v_fd = fd;
	new_vp->v_st_dev = st->st_dev;
	new_vp->v_st_ino = st->st_ino;
	new_vp->v_vfsp = vfs;
	new_vp->v_type = IFTOVT(st->st_mode);

	mutex_enter(&vncache_lock);
	old_vp = avl_find(&vncache_avl, new_vp, &where);
	if (old_vp != NULL)
		vn_hold(old_vp);
	else
		avl_insert(&vncache_avl, new_vp, where);
	mutex_exit(&vncache_lock);

	/* If we lost the race, free new_vp */
	if (old_vp != NULL) {
		vn_free(new_vp);
		return (old_vp);
	}

	return (new_vp);
}
Example #6
0
/* ARGSUSED */
static int
lxpr_node_constructor(void *buf, void *un, int kmflags)
{
	lxpr_node_t	*lxpnp = buf;
	vnode_t		*vp;

	vp = lxpnp->lxpr_vnode = vn_alloc(kmflags);
	if (vp == NULL)
		return (-1);

	(void) vn_setops(vp, lxpr_vnodeops);
	vp->v_data = lxpnp;

	return (0);
}
Example #7
0
/*
 * Initialize the page retire mechanism:
 *
 *   - Establish the correctable error retire limit.
 *   - Initialize locks.
 *   - Build the retired_pages vnode.
 *   - Set up the kstats.
 *   - Fire off the background thread.
 *   - Tell page_tryretire() it's OK to start retiring pages.
 */
void
page_retire_init(void)
{
	const fs_operation_def_t retired_vnodeops_template[] = {NULL, NULL};
	struct vnodeops *vops;

	const uint_t page_retire_ndata =
	    sizeof (page_retire_kstat) / sizeof (kstat_named_t);

	ASSERT(page_retire_ksp == NULL);

	if (max_pages_retired_bps <= 0) {
		max_pages_retired_bps = MCE_BPT;
	}

	mutex_init(&pr_q_mutex, NULL, MUTEX_DEFAULT, NULL);

	retired_pages = vn_alloc(KM_SLEEP);
	if (vn_make_ops("retired_pages", retired_vnodeops_template, &vops)) {
		cmn_err(CE_PANIC,
		    "page_retired_init: can't make retired vnodeops");
	}
	vn_setops(retired_pages, vops);

	if ((page_retire_ksp = kstat_create("unix", 0, "page_retire",
	    "misc", KSTAT_TYPE_NAMED, page_retire_ndata,
	    KSTAT_FLAG_VIRTUAL)) == NULL) {
		cmn_err(CE_WARN, "kstat_create for page_retire failed");
	} else {
		page_retire_ksp->ks_data = (void *)&page_retire_kstat;
		page_retire_ksp->ks_update = page_retire_kstat_update;
		kstat_install(page_retire_ksp);
	}

	pr_thread_shortwait = 23 * hz;
	pr_thread_longwait = 1201 * hz;
	mutex_init(&pr_thread_mutex, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&pr_cv, NULL, CV_DEFAULT, NULL);
	pr_thread_id = thread_create(NULL, 0, page_retire_thread, NULL, 0, &p0,
	    TS_RUN, minclsyspri);

	pr_enable = 1;
}
Example #8
0
/*ARGSUSED*/
static int
fdmount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	struct vnode *vp;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);
	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	mutex_enter(&mvp->v_lock);
	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Having the resource be anything but "fd" doesn't make sense
	 */
	vfs_setresource(vfsp, "fd");

	vp = vn_alloc(KM_SLEEP);
	vp->v_vfsp = vfsp;
	vn_setops(vp, fd_vnodeops);
	vp->v_type = VDIR;
	vp->v_data = NULL;
	vp->v_flag |= VROOT;
	vfsp->vfs_fstype = fdfstype;
	vfsp->vfs_data = (char *)vp;
	mutex_enter(&fd_minor_lock);
	do {
		fdfsmin = (fdfsmin + 1) & L_MAXMIN32;
		vfsp->vfs_dev = makedevice(fdfsmaj, fdfsmin);
	} while (vfs_devismounted(vfsp->vfs_dev));
	mutex_exit(&fd_minor_lock);
	vfs_make_fsid(&vfsp->vfs_fsid, vfsp->vfs_dev, fdfstype);
	vfsp->vfs_bsize = 1024;
	return (0);
}
Example #9
0
/*ARGSUSED*/
static int
zfs_znode_cache_constructor(void *buf, void *cdrarg, int kmflags)
{
	znode_t *zp = buf;

	zp->z_vnode = vn_alloc(KM_SLEEP);
	zp->z_vnode->v_data = (caddr_t)zp;
	mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
	rw_init(&zp->z_map_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
	mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);

	mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zp->z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));

	zp->z_dbuf = NULL;
	zp->z_dirlocks = 0;
	return (0);
}
Example #10
0
static int
fdget(struct vnode *dvp, char *comp, struct vnode **vpp)
{
	int n = 0;
	struct vnode *vp;

	while (*comp) {
		if (*comp < '0' || *comp > '9')
			return (ENOENT);
		n = 10 * n + *comp++ - '0';
	}
	vp = vn_alloc(KM_SLEEP);
	vp->v_type = VCHR;
	vp->v_vfsp = dvp->v_vfsp;
	vn_setops(vp, fd_vnodeops);
	vp->v_data = NULL;
	vp->v_flag = VNOMAP;
	vp->v_rdev = makedevice(fdrmaj, n);
	vn_exists(vp);
	*vpp = vp;
	return (0);
}
Example #11
0
int
VMBlockVnodeGet(struct vnode **vpp,        // OUT: Filled with address of new vnode
                struct vnode *realVp,      // IN:  Real vnode (assumed held)
                const char *name,          // IN:  Relative name of the file
                size_t nameLen,            // IN:  Size of name
                struct vnode *dvp,         // IN:  Parent directory's vnode
                struct vfs *vfsp,          // IN:  Filesystem structure
                Bool isRoot)               // IN:  If is root directory of fs
{
   VMBlockVnodeInfo *vip;
   struct vnode *vp;
   char *curr;
   int ret;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockVnodeGet: entry\n");

   ASSERT(vpp);
   ASSERT(realVp);
   ASSERT(vfsp);
   ASSERT(name);
   ASSERT(dvp || isRoot);

   vp = vn_alloc(KM_SLEEP);
   if (!vp) {
      return ENOMEM;
   }

   vip = kmem_zalloc(sizeof *vip, KM_SLEEP);
   vp->v_data = (void *)vip;

   /*
    * Store the path that this file redirects to.  For the root vnode we just
    * store the provided path, but for all others we first copy in the parent
    * directory's path.
    */
   curr = vip->name;

   if (!isRoot) {
      VMBlockVnodeInfo *dvip = VPTOVIP(dvp);
      if (dvip->nameLen + 1 + nameLen + 1 >= sizeof vip->name) {
         ret = ENAMETOOLONG;
         goto error;
      }

      memcpy(vip->name, dvip->name, dvip->nameLen);
      vip->name[dvip->nameLen] = '/';
      curr = vip->name + dvip->nameLen + 1;
   }

   if (nameLen + 1 > (sizeof vip->name - (curr - vip->name))) {
      ret = ENAMETOOLONG;
      goto error;
   }

   memcpy(curr, name, nameLen);
   curr[nameLen] = '\0';
   vip->nameLen = nameLen + (curr - vip->name);

   /*
    * We require the caller to have held realVp so we don't need VN_HOLD() it
    * here here even though we VN_RELE() this vnode in VMBlockVnodePut().
    * Despite seeming awkward, this is more natural since the function that our
    * caller obtained realVp from provided a held vnode.
    */
   vip->realVnode = realVp;

   /*
    * Now we'll initialize the vnode.  We need to set the file type, vnode
    * operations, flags, filesystem pointer, reference count, and device.
    */
   /* The root directory is our only directory; the rest are symlinks. */
   vp->v_type = isRoot ? VDIR : VLNK;

   vn_setops(vp, vmblockVnodeOps);

   vp->v_flag  = VNOMAP | VNOMOUNT | VNOSWAP | isRoot ? VROOT : 0;
   vp->v_vfsp  = vfsp;
   vp->v_rdev  = NODEV;

   /* Fill in the provided address with the new vnode. */
   *vpp = vp;

   return 0;

error:
   kmem_free(vip, sizeof *vip);
   vn_free(vp);
   return ret;
}
Example #12
0
static int
xdirmakexnode(
	struct xmemnode *dir,
	struct xmount	*xm,
	struct vattr	*va,
	enum	de_op	op,
	struct xmemnode **newnode,
	struct cred	*cred)
{
	struct xmemnode *xp;
	enum vtype	type;

	ASSERT(va != NULL);
	ASSERT(op == DE_CREATE || op == DE_MKDIR);
	if (((va->va_mask & AT_ATIME) && TIMESPEC_OVERFLOW(&va->va_atime)) ||
	    ((va->va_mask & AT_MTIME) && TIMESPEC_OVERFLOW(&va->va_mtime)))
		return (EOVERFLOW);
	type = va->va_type;
	xp = xmem_memalloc(sizeof (struct xmemnode), 1);
	xp->xn_vnode = vn_alloc(KM_SLEEP);
	xmemnode_init(xm, xp, va, cred);
	if (type == VBLK || type == VCHR) {
		xp->xn_vnode->v_rdev = xp->xn_rdev = va->va_rdev;
	} else {
		xp->xn_vnode->v_rdev = xp->xn_rdev = NODEV;
	}
	xp->xn_vnode->v_type = type;
	xp->xn_uid = crgetuid(cred);

	/*
	 * To determine the group-id of the created file:
	 *   1) If the gid is set in the attribute list (non-Sun & pre-4.0
	 *	clients are not likely to set the gid), then use it if
	 *	the process is privileged, belongs to the target group,
	 *	or the group is the same as the parent directory.
	 *   2) If the filesystem was not mounted with the Old-BSD-compatible
	 *	GRPID option, and the directory's set-gid bit is clear,
	 *	then use the process's gid.
	 *   3) Otherwise, set the group-id to the gid of the parent directory.
	 */
	if ((va->va_mask & AT_GID) &&
	    ((va->va_gid == dir->xn_gid) || groupmember(va->va_gid, cred) ||
	    secpolicy_vnode_create_gid(cred) == 0)) {
		xp->xn_gid = va->va_gid;
	} else {
		if (dir->xn_mode & VSGID)
			xp->xn_gid = dir->xn_gid;
		else
			xp->xn_gid = crgetgid(cred);
	}
	/*
	 * If we're creating a directory, and the parent directory has the
	 * set-GID bit set, set it on the new directory.
	 * Otherwise, if the user is neither privileged nor a member of the
	 * file's new group, clear the file's set-GID bit.
	 */
	if (dir->xn_mode & VSGID && type == VDIR)
		xp->xn_mode |= VSGID;
	else if ((xp->xn_mode & VSGID) &&
		secpolicy_vnode_setids_setgids(cred, xp->xn_gid) != 0)
			xp->xn_mode &= ~VSGID;

	if (va->va_mask & AT_ATIME)
		xp->xn_atime = va->va_atime;
	if (va->va_mask & AT_MTIME)
		xp->xn_mtime = va->va_mtime;

	if (op == DE_MKDIR)
		xdirinit(dir, xp);

	*newnode = xp;
	return (0);
}
};

extern dev_info_t	*clone_dip;
extern major_t		clone_major;
extern struct dev_ops	*ddi_hold_driver(major_t);

/* dv_node node constructor for kmem cache */
static int
i_dv_node_ctor(void *buf, void *cfarg, int flag)
{
	_NOTE(ARGUNUSED(cfarg, flag))
	struct dv_node	*dv = (struct dv_node *)buf;
	struct vnode	*vp;

	bzero(buf, sizeof (struct dv_node));
	vp = dv->dv_vnode = vn_alloc(flag);
	if (vp == NULL) {
		return (-1);
	}
	vp->v_data = dv;
	rw_init(&dv->dv_contents, NULL, RW_DEFAULT, NULL);
	return (0);
}

/* dv_node node destructor for kmem cache */
static void
i_dv_node_dtor(void *buf, void *arg)
{
	_NOTE(ARGUNUSED(arg))
	struct dv_node	*dv = (struct dv_node *)buf;
	struct vnode	*vp = DVTOV(dv);
vnode_t *
sv_find(vnode_t *mvp, vnode_t *dvp, nfs4_fname_t **namepp)
{
	vnode_t *vp;
	rnode4_t *rp = VTOR4(mvp);
	svnode_t *svp;
	svnode_t *master_svp = VTOSV(mvp);
	rnode4_t *drp = VTOR4(dvp);
	nfs4_fname_t *nm;

	ASSERT(dvp != NULL);

	sv_stats.sv_find++;

	ASSERT(namepp != NULL);
	ASSERT(*namepp != NULL);
	nm = *namepp;
	*namepp = NULL;

	/*
	 * At this point, all we know is that we have an rnode whose
	 * file handle matches the file handle of the object we want.
	 * We have to verify that component name and the directory
	 * match.  If so, then we are done.
	 *
	 * Note: mvp is always the master vnode.
	 */

	ASSERT(!IS_SHADOW(mvp, rp));

	if (sv_match(nm, drp->r_fh, master_svp)) {
		VN_HOLD(mvp);
		fn_rele(&nm);
		return (mvp);
	}

	/*
	 * No match, search through the shadow vnode list.
	 * Hold the r_svlock to prevent changes.
	 */

	mutex_enter(&rp->r_svlock);

	for (svp = master_svp->sv_forw; svp != master_svp; svp = svp->sv_forw)
		if (sv_match(nm, drp->r_fh, svp)) {

			/*
			 * A matching shadow vnode is found, bump the
			 * reference count on it and return it.
			 */

			vp = SVTOV(svp);
			VN_HOLD(vp);
			fn_rele(&nm);
			mutex_exit(&rp->r_svlock);
			return (vp);
		}

	/*
	 * No match searching the list, go allocate a new shadow
	 */
	svp = kmem_cache_alloc(svnode_cache, KM_SLEEP);
	svp->sv_r_vnode = vn_alloc(KM_SLEEP);
	vp = SVTOV(svp);

	/* Initialize the vnode */

	vn_setops(vp, nfs4_vnodeops);
	vp->v_data = (caddr_t)rp;
	vp->v_vfsp = mvp->v_vfsp;
	ASSERT(nfs4_consistent_type(mvp));
	vp->v_type = mvp->v_type;
	vp->v_pages = (page_t *)-1;	/* No pages, please */
	vn_exists(vp);

	/* Initialize the shadow vnode */

	svp->sv_dfh = VTOR4(dvp)->r_fh;
	sfh4_hold(svp->sv_dfh);

	svp->sv_name = nm;
	VN_HOLD(mvp);
	insque(svp, master_svp);
	mutex_exit(&rp->r_svlock);

	return (vp);
}
static vnode_t *
make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp,
    struct vnodeops *vops,
    int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *),
    int *newnode, cred_t *cr)
{
	rnode4_t *rp;
	rnode4_t *trp;
	vnode_t *vp;
	mntinfo4_t *mi;

	ASSERT(RW_READ_HELD(&rhtp->r_lock));

	mi = VFTOMI4(vfsp);

start:
	if ((rp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(rp);
		*newnode = 0;
		return (vp);
	}
	rw_exit(&rhtp->r_lock);

	mutex_enter(&rp4freelist_lock);

	if (rp4freelist != NULL && rnode4_new >= nrnode) {
		rp = rp4freelist;
		rp4_rmfree(rp);
		mutex_exit(&rp4freelist_lock);

		vp = RTOV4(rp);

		if (rp->r_flags & R4HASHED) {
			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
			mutex_enter(&vp->v_lock);
			if (vp->v_count > 1) {
				vp->v_count--;
				mutex_exit(&vp->v_lock);
				rw_exit(&rp->r_hashq->r_lock);
				rw_enter(&rhtp->r_lock, RW_READER);
				goto start;
			}
			mutex_exit(&vp->v_lock);
			rp4_rmhash_locked(rp);
			rw_exit(&rp->r_hashq->r_lock);
		}

		r4inactive(rp, cr);

		mutex_enter(&vp->v_lock);
		if (vp->v_count > 1) {
			vp->v_count--;
			mutex_exit(&vp->v_lock);
			rw_enter(&rhtp->r_lock, RW_READER);
			goto start;
		}
		mutex_exit(&vp->v_lock);
		vn_invalid(vp);

		/*
		 * destroy old locks before bzero'ing and
		 * recreating the locks below.
		 */
		uninit_rnode4(rp);

		/*
		 * Make sure that if rnode is recycled then
		 * VFS count is decremented properly before
		 * reuse.
		 */
		VFS_RELE(vp->v_vfsp);
		vn_reinit(vp);
	} else {
		vnode_t *new_vp;

		mutex_exit(&rp4freelist_lock);

		rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
		new_vp = vn_alloc(KM_SLEEP);

		atomic_add_long((ulong_t *)&rnode4_new, 1);
#ifdef DEBUG
		clstat4_debug.nrnode.value.ui64++;
#endif
		vp = new_vp;
	}

	bzero(rp, sizeof (*rp));
	rp->r_vnode = vp;
	nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
	nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
	mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
	rp->created_v4 = 0;
	list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
	    offsetof(nfs4_open_stream_t, os_node));
	rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
	rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
	cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
	cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
	rp->r_flags = R4READDIRWATTR;
	rp->r_fh = fh;
	rp->r_hashq = rhtp;
	sfh4_hold(rp->r_fh);
	rp->r_server = mi->mi_curr_serv;
	rp->r_deleg_type = OPEN_DELEGATE_NONE;
	rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE;
	nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL);

	rddir4_cache_create(rp);
	rp->r_putapage = putapage;
	vn_setops(vp, vops);
	vp->v_data = (caddr_t)rp;
	vp->v_vfsp = vfsp;
	VFS_HOLD(vfsp);
	vp->v_type = VNON;
	if (isrootfh(fh, rp))
		vp->v_flag = VROOT;
	vn_exists(vp);

	/*
	 * There is a race condition if someone else
	 * alloc's the rnode while no locks are held, so we
	 * check again and recover if found.
	 */
	rw_enter(&rhtp->r_lock, RW_WRITER);
	if ((trp = r4find(rhtp, fh, vfsp)) != NULL) {
		vp = RTOV4(trp);
		*newnode = 0;
		rw_exit(&rhtp->r_lock);
		rp4_addfree(rp, cr);
		rw_enter(&rhtp->r_lock, RW_READER);
		return (vp);
	}
	rp4_addhash(rp);
	*newnode = 1;
	return (vp);
}
Example #16
0
/*
 * Create a reference to the vnode representing the file descriptor.
 * Then, apply the VOP_OPEN operation to that vnode.
 *
 * The vnode for the file descriptor may be switched under you.
 * If it is, search the hash list for an nodep - nodep->nm_filevp
 * pair. If it exists, return that nodep to the user.
 * If it does not exist, create a new namenode to attach
 * to the nodep->nm_filevp then place the pair on the hash list.
 *
 * Newly created objects are like children/nodes in the mounted
 * file system, with the parent being the initial mount.
 */
int
nm_open(vnode_t **vpp, int flag, cred_t *crp, caller_context_t *ct)
{
	struct namenode *nodep = VTONM(*vpp);
	int error = 0;
	struct namenode *newnamep;
	struct vnode *newvp;
	struct vnode *infilevp;
	struct vnode *outfilevp;

	/*
	 * If the vnode is switched under us, the corresponding
	 * VN_RELE for this VN_HOLD will be done by the file system
	 * performing the switch. Otherwise, the corresponding
	 * VN_RELE will be done by nm_close().
	 */
	infilevp = outfilevp = nodep->nm_filevp;
	VN_HOLD(outfilevp);

	if ((error = VOP_OPEN(&outfilevp, flag, crp, ct)) != 0) {
		VN_RELE(outfilevp);
		return (error);
	}
	if (infilevp != outfilevp) {
		/*
		 * See if the new filevp (outfilevp) is already associated
		 * with the mount point. If it is, then it already has a
		 * namenode associated with it.
		 */
		mutex_enter(&ntable_lock);
		if ((newnamep =
		    namefind(outfilevp, nodep->nm_mountpt)) != NULL) {
			struct vnode *vp = NMTOV(newnamep);

			VN_HOLD(vp);
			goto gotit;
		}

		newnamep = kmem_zalloc(sizeof (struct namenode), KM_SLEEP);
		newvp = vn_alloc(KM_SLEEP);
		newnamep->nm_vnode = newvp;

		mutex_init(&newnamep->nm_lock, NULL, MUTEX_DEFAULT, NULL);

		mutex_enter(&nodep->nm_lock);
		newvp->v_flag = ((*vpp)->v_flag | VNOMAP | VNOSWAP) & ~VROOT;
		vn_setops(newvp, vn_getops(*vpp));
		newvp->v_vfsp = &namevfs;
		newvp->v_stream = outfilevp->v_stream;
		newvp->v_type = outfilevp->v_type;
		newvp->v_rdev = outfilevp->v_rdev;
		newvp->v_data = (caddr_t)newnamep;
		vn_exists(newvp);
		bcopy(&nodep->nm_vattr, &newnamep->nm_vattr, sizeof (vattr_t));
		newnamep->nm_vattr.va_type = outfilevp->v_type;
		newnamep->nm_vattr.va_nodeid = namenodeno_alloc();
		newnamep->nm_vattr.va_size = (u_offset_t)0;
		newnamep->nm_vattr.va_rdev = outfilevp->v_rdev;
		newnamep->nm_flag = NMNMNT;
		newnamep->nm_filevp = outfilevp;
		newnamep->nm_filep = nodep->nm_filep;
		newnamep->nm_mountpt = nodep->nm_mountpt;
		mutex_exit(&nodep->nm_lock);

		/*
		 * Insert the new namenode into the hash list.
		 */
		nameinsert(newnamep);
gotit:
		mutex_exit(&ntable_lock);
		/*
		 * Release the above reference to the infilevp, the reference
		 * to the NAMEFS vnode, create a reference to the new vnode
		 * and return the new vnode to the user.
		 */
		VN_RELE(*vpp);
		*vpp = NMTOV(newnamep);
	}
	return (0);
}
Example #17
0
/*
 * Mount a file descriptor onto the node in the file system.
 * Create a new vnode, update the attributes with info from the
 * file descriptor and the mount point.  The mask, mode, uid, gid,
 * atime, mtime and ctime are taken from the mountpt.  Link count is
 * set to one, the file system id is namedev and nodeid is unique
 * for each mounted object.  Other attributes are taken from mount point.
 * Make sure user is owner (or root) with write permissions on mount point.
 * Hash the new vnode and return 0.
 * Upon entry to this routine, the file descriptor is in the
 * fd field of a struct namefd.  Copy that structure from user
 * space and retrieve the file descriptor.
 */
static int
nm_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *crp)
{
	struct namefd namefdp;
	struct vnode *filevp;		/* file descriptor vnode */
	struct file *fp;
	struct vnode *newvp;		/* vnode representing this mount */
	struct vnode *rvp;		/* realvp (if any) for the mountpt */
	struct namenode *nodep;		/* namenode for this mount */
	struct vattr filevattr;		/* attributes of file dec.  */
	struct vattr *vattrp;		/* attributes of this mount */
	char *resource_name;
	char *resource_nodetype;
	statvfs64_t *svfsp;
	int error = 0;

	/*
	 * Get the file descriptor from user space.
	 * Make sure the file descriptor is valid and has an
	 * associated file pointer.
	 * If so, extract the vnode from the file pointer.
	 */
	if (uap->datalen != sizeof (struct namefd))
		return (EINVAL);

	if (copyin(uap->dataptr, &namefdp, uap->datalen))
		return (EFAULT);

	if ((fp = getf(namefdp.fd)) == NULL)
		return (EBADF);

	/*
	 * If the mount point already has something mounted
	 * on it, disallow this mount.  (This restriction may
	 * be removed in a later release).
	 * Or unmount has completed but the namefs ROOT vnode
	 * count has not decremented to zero, disallow this mount.
	 */

	mutex_enter(&mvp->v_lock);
	if ((mvp->v_flag & VROOT) ||
	    vfs_matchops(mvp->v_vfsp, namefs_vfsops)) {
		mutex_exit(&mvp->v_lock);
		releasef(namefdp.fd);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Cannot allow users to fattach() in /dev/pts.
	 * First, there is no need for doing so and secondly
	 * we cannot allow arbitrary users to park on a node in
	 * /dev/pts or /dev/vt.
	 */
	rvp = NULLVP;
	if (vn_matchops(mvp, spec_getvnodeops()) &&
	    VOP_REALVP(mvp, &rvp, NULL) == 0 && rvp &&
	    (vn_matchops(rvp, devpts_getvnodeops()) ||
	    vn_matchops(rvp, devvt_getvnodeops()))) {
		releasef(namefdp.fd);
		return (ENOTSUP);
	}

	filevp = fp->f_vnode;
	if (filevp->v_type == VDIR || filevp->v_type == VPORT) {
		releasef(namefdp.fd);
		return (EINVAL);
	}

	/*
	 * If the fd being mounted refers to neither a door nor a stream,
	 * make sure the caller is privileged.
	 */
	if (filevp->v_type != VDOOR && filevp->v_stream == NULL) {
		if (secpolicy_fs_mount(crp, filevp, vfsp) != 0) {
			/* fd is neither a stream nor a door */
			releasef(namefdp.fd);
			return (EINVAL);
		}
	}

	/*
	 * Make sure the file descriptor is not the root of some
	 * file system.
	 * If it's not, create a reference and allocate a namenode
	 * to represent this mount request.
	 */
	if (filevp->v_flag & VROOT) {
		releasef(namefdp.fd);
		return (EBUSY);
	}

	nodep = kmem_zalloc(sizeof (struct namenode), KM_SLEEP);

	mutex_init(&nodep->nm_lock, NULL, MUTEX_DEFAULT, NULL);
	vattrp = &nodep->nm_vattr;
	vattrp->va_mask = AT_ALL;
	if (error = VOP_GETATTR(mvp, vattrp, 0, crp, NULL))
		goto out;

	filevattr.va_mask = AT_ALL;
	if (error = VOP_GETATTR(filevp, &filevattr, 0, crp, NULL))
		goto out;
	/*
	 * Make sure the user is the owner of the mount point
	 * or has sufficient privileges.
	 */
	if (error = secpolicy_vnode_owner(crp, vattrp->va_uid))
		goto out;

	/*
	 * Make sure the user has write permissions on the
	 * mount point (or has sufficient privileges).
	 */
	if (!(vattrp->va_mode & VWRITE) &&
	    secpolicy_vnode_access(crp, mvp, vattrp->va_uid, VWRITE) != 0) {
		error = EACCES;
		goto out;
	}

	/*
	 * If the file descriptor has file/record locking, don't
	 * allow the mount to succeed.
	 */
	if (vn_has_flocks(filevp)) {
		error = EACCES;
		goto out;
	}

	/*
	 * Initialize the namenode.
	 */
	if (filevp->v_stream) {
		struct stdata *stp = filevp->v_stream;
		mutex_enter(&stp->sd_lock);
		stp->sd_flag |= STRMOUNT;
		mutex_exit(&stp->sd_lock);
	}
	nodep->nm_filevp = filevp;
	mutex_enter(&fp->f_tlock);
	fp->f_count++;
	mutex_exit(&fp->f_tlock);

	releasef(namefdp.fd);
	nodep->nm_filep = fp;
	nodep->nm_mountpt = mvp;

	/*
	 * The attributes for the mounted file descriptor were initialized
	 * above by applying VOP_GETATTR to the mount point.  Some of
	 * the fields of the attributes structure will be overwritten
	 * by the attributes from the file descriptor.
	 */
	vattrp->va_type    = filevattr.va_type;
	vattrp->va_fsid    = namedev;
	vattrp->va_nodeid  = namenodeno_alloc();
	vattrp->va_nlink   = 1;
	vattrp->va_size    = filevattr.va_size;
	vattrp->va_rdev    = filevattr.va_rdev;
	vattrp->va_blksize = filevattr.va_blksize;
	vattrp->va_nblocks = filevattr.va_nblocks;
	vattrp->va_seq	   = 0;

	/*
	 * Initialize new vnode structure for the mounted file descriptor.
	 */
	nodep->nm_vnode = vn_alloc(KM_SLEEP);
	newvp = NMTOV(nodep);

	newvp->v_flag = filevp->v_flag | VROOT | VNOMAP | VNOSWAP;
	vn_setops(newvp, nm_vnodeops);
	newvp->v_vfsp = vfsp;
	newvp->v_stream = filevp->v_stream;
	newvp->v_type = filevp->v_type;
	newvp->v_rdev = filevp->v_rdev;
	newvp->v_data = (caddr_t)nodep;
	VFS_HOLD(vfsp);
	vn_exists(newvp);

	/*
	 * Initialize the vfs structure.
	 */
	vfsp->vfs_vnodecovered = NULL;
	vfsp->vfs_flag |= VFS_UNLINKABLE;
	vfsp->vfs_bsize = 1024;
	vfsp->vfs_fstype = namefstype;
	vfs_make_fsid(&vfsp->vfs_fsid, namedev, namefstype);
	vfsp->vfs_data = (caddr_t)nodep;
	vfsp->vfs_dev = namedev;
	vfsp->vfs_bcount = 0;

	/*
	 * Set the name we mounted from.
	 */
	switch (filevp->v_type) {
	case VPROC:	/* VOP_GETATTR() translates this to VREG */
	case VREG:	resource_nodetype = "file"; break;
	case VDIR:	resource_nodetype = "directory"; break;
	case VBLK:	resource_nodetype = "device"; break;
	case VCHR:	resource_nodetype = "device"; break;
	case VLNK:	resource_nodetype = "link"; break;
	case VFIFO:	resource_nodetype = "fifo"; break;
	case VDOOR:	resource_nodetype = "door"; break;
	case VSOCK:	resource_nodetype = "socket"; break;
	default:	resource_nodetype = "resource"; break;
	}

#define	RESOURCE_NAME_SZ 128 /* Maximum length of the resource name */
	resource_name = kmem_alloc(RESOURCE_NAME_SZ, KM_SLEEP);
	svfsp = kmem_alloc(sizeof (statvfs64_t), KM_SLEEP);

	error = VFS_STATVFS(filevp->v_vfsp, svfsp);
	if (error == 0) {
		(void) snprintf(resource_name, RESOURCE_NAME_SZ,
		    "unspecified_%s_%s", svfsp->f_basetype, resource_nodetype);
	} else {
		(void) snprintf(resource_name, RESOURCE_NAME_SZ,
		    "unspecified_%s", resource_nodetype);
	}

	vfs_setresource(vfsp, resource_name);

	kmem_free(svfsp, sizeof (statvfs64_t));
	kmem_free(resource_name, RESOURCE_NAME_SZ);
#undef RESOURCE_NAME_SZ

	/*
	 * Insert the namenode.
	 */
	mutex_enter(&ntable_lock);
	nameinsert(nodep);
	mutex_exit(&ntable_lock);
	return (0);
out:
	releasef(namefdp.fd);
	kmem_free(nodep, sizeof (struct namenode));
	return (error);
}
Example #18
0
struct pcnode *
pc_getnode(
	struct pcfs *fsp,	/* filsystem for node */
	daddr_t blkno,		/* phys block no of dir entry */
	int offset,		/* offset of dir entry in block */
	struct pcdir *ep)	/* node dir entry */
{
	struct pcnode *pcp;
	struct pchead *hp;
	struct vnode *vp;
	pc_cluster32_t scluster;

	ASSERT(fsp->pcfs_flags & PCFS_LOCKED);
	if (ep == (struct pcdir *)0) {
		ep = &pcfs_rootdirentry;
		scluster = 0;
	} else {
		scluster = pc_getstartcluster(fsp, ep);
	}
	/*
	 * First look for active nodes.
	 * File nodes are identified by the location (blkno, offset) of
	 * its directory entry.
	 * Directory nodes are identified by the starting cluster number
	 * for the entries.
	 */
	if (ep->pcd_attr & PCA_DIR) {
		hp = &pcdhead[PCDHASH(fsp, scluster)];
		rw_enter(&pcnodes_lock, RW_READER);
		for (pcp = hp->pch_forw;
		    pcp != (struct pcnode *)hp; pcp = pcp->pc_forw) {
			if ((fsp == VFSTOPCFS(PCTOV(pcp)->v_vfsp)) &&
			    (scluster == pcp->pc_scluster)) {
				VN_HOLD(PCTOV(pcp));
				rw_exit(&pcnodes_lock);
				return (pcp);
			}
		}
		rw_exit(&pcnodes_lock);
	} else {
		hp = &pcfhead[PCFHASH(fsp, blkno, offset)];
		rw_enter(&pcnodes_lock, RW_READER);
		for (pcp = hp->pch_forw;
		    pcp != (struct pcnode *)hp; pcp = pcp->pc_forw) {
			if ((fsp == VFSTOPCFS(PCTOV(pcp)->v_vfsp)) &&
			    ((pcp->pc_flags & PC_INVAL) == 0) &&
			    (blkno == pcp->pc_eblkno) &&
			    (offset == pcp->pc_eoffset)) {
				VN_HOLD(PCTOV(pcp));
				rw_exit(&pcnodes_lock);
				return (pcp);
			}
		}
		rw_exit(&pcnodes_lock);
	}
	/*
	 * Cannot find node in active list. Allocate memory for a new node
	 * initialize it, and put it on the active list.
	 */
	pcp = kmem_alloc(sizeof (struct pcnode), KM_SLEEP);
	bzero(pcp, sizeof (struct pcnode));
	vp = vn_alloc(KM_SLEEP);
	pcp->pc_vn = vp;
	pcp->pc_entry = *ep;
	pcp->pc_eblkno = blkno;
	pcp->pc_eoffset = offset;
	pcp->pc_scluster = scluster;
	pcp->pc_lcluster = scluster;
	pcp->pc_lindex = 0;
	pcp->pc_flags = 0;
	if (ep->pcd_attr & PCA_DIR) {
		vn_setops(vp, pcfs_dvnodeops);
		vp->v_type = VDIR;
		if (scluster == 0) {
			vp->v_flag = VROOT;
			blkno = offset = 0;
			if (IS_FAT32(fsp)) {
				pc_cluster32_t ncl = 0;

				scluster = fsp->pcfs_rdirstart;
				if (pc_fileclsize(fsp, scluster, &ncl)) {
					PC_DPRINTF1(2, "cluster chain "
					    "corruption, scluster=%d\n",
					    scluster);
					pcp->pc_flags |= PC_INVAL;
				}
				pcp->pc_size = fsp->pcfs_clsize * ncl;
			} else {
				pcp->pc_size =
				    fsp->pcfs_rdirsec * fsp->pcfs_secsize;
			}
		} else {
			pc_cluster32_t ncl = 0;

			if (pc_fileclsize(fsp, scluster, &ncl)) {
				PC_DPRINTF1(2, "cluster chain corruption, "
				    "scluster=%d\n", scluster);
				pcp->pc_flags |= PC_INVAL;
			}
			pcp->pc_size = fsp->pcfs_clsize * ncl;
		}
	} else {
		vn_setops(vp, pcfs_fvnodeops);
		vp->v_type = VREG;
		vp->v_flag = VNOSWAP;
		fsp->pcfs_frefs++;
		pcp->pc_size = ltohi(ep->pcd_size);
	}
	fsp->pcfs_nrefs++;
	VFS_HOLD(PCFSTOVFS(fsp));
	vp->v_data = (caddr_t)pcp;
	vp->v_vfsp = PCFSTOVFS(fsp);
	vn_exists(vp);
	rw_enter(&pcnodes_lock, RW_WRITER);
	insque(pcp, hp);
	rw_exit(&pcnodes_lock);
	return (pcp);
}
Example #19
0
/************************************************************************
 * iumfs_alloc_node()
 *
 *   新しい vnode 及び iumnode を確保する。
 *
 * 引数:
 *     vfsp   : vfs 構造体
 *     vpp    : 呼び出し側から渡された vnode 構造体のポインタのアドレス
 *     flag   : 作成する vnode のフラグ(VROOT, VISSWAP 等)
 *     type   : 作成する vnode のタイプ(VDIR, VREG 等)
 *     nodeid : 作成する vnode のノード番号(0の場合自動割当)
 *
 * 戻り値
 *    正常時   : SUCCESS(=0)
 *    エラー時 : 0 以外
 *
 ************************************************************************/
int
iumfs_alloc_node(vfs_t *vfsp, vnode_t **nvpp, uint_t flag, enum vtype type, ino_t nodeid)
{
    vnode_t *vp;
    iumnode_t *inp;
    iumfs_t *iumfsp; // ファイルシステム型依存のプライベートデータ構造体

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node is called\n"));
    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: type=%d\n",type));
    
    iumfsp = VFS2IUMFS(vfsp);

    // vnode 構造体を確保
#ifdef SOL10
    // Solaris 10 では直接 vnode 構造体を alloc してはいけない。
    vp = vn_alloc(KM_NOSLEEP);
#else
    // Solaris 9 ではファイルシステム自身で vnode 構造体を alloc する。
    vp = (vnode_t *) kmem_zalloc(sizeof (vnode_t), KM_NOSLEEP);
#endif    

    //ファイルシステム型依存のノード情報(iumnode 構造体)を確保
    inp = (iumnode_t *) kmem_zalloc(sizeof (iumnode_t), KM_NOSLEEP);

    /*
     * どちらかでも確保できなかったら ENOMEM を返す
     */
    if (vp == NULL || inp == NULL) {
        cmn_err(CE_WARN, "iumfs_alloc_node: kmem_zalloc failed\n");
        if (vp != NULL)
#ifdef SOL10
            vn_free(vp);
#else        
            kmem_free(vp, sizeof (vnode_t));
#endif            
        if (inp != NULL)
            kmem_free(inp, sizeof (iumnode_t));
        DEBUG_PRINT((CE_CONT, "iumfs_alloc_node return(ENOMEM)\n"));
        return (ENOMEM);
    }

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: allocated vnode = 0x%p\n", vp));

    /*
     * 確保した vnode を初期化
     * VN_INIT マクロの中で、v_count の初期値を 1 にセットする。
     * これによって、ファイルシステムの意図しないタイミングで iumfs_inactive()
     * が呼ばれてしまうのを防ぐ。
     */
    VN_INIT(vp, vfsp, type, 0);

    // ファイルシステム型依存の vnode 操作構造体のアドレスをセット
#ifdef SOL10
    vn_setops(vp, iumfs_vnodeops);
#else        
    vp->v_op = &iumfs_vnodeops;
#endif

    // v_flag にフラグをセット
    vp->v_flag &= flag;

    /*
     * 確保した iumnode を初期化 (IN_INIT マクロは使わない)
     */
    mutex_init(&(inp)->i_dlock, NULL, MUTEX_DEFAULT, NULL);
    inp->vattr.va_mask = AT_ALL;
    inp->vattr.va_uid = 0;
    inp->vattr.va_gid = 0;
    inp->vattr.va_blksize = BLOCKSIZE;
    inp->vattr.va_nlink = 1;
    inp->vattr.va_rdev = 0;
    rw_init(&(inp)->i_listlock,NULL,RW_DRIVER,NULL);
#ifdef SOL10
#else    
    inp->vattr.va_vcode = 1;
#endif
    /*
     * vattr の va_fsid は dev_t(=ulong_t), これに対して vfs の
     * vfs_fsid は int 型の配列(int[2])を含む構造体。
     * なので、iumfs_mount() でもとめたデバイス番号を入れておく。
     */
    inp->vattr.va_fsid = vfsp->vfs_dev;
    inp->vattr.va_type = type;
    inp->vattr.va_atime =      \
    inp->vattr.va_ctime =      \
    inp->vattr.va_mtime = iumfs_get_current_time();

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: va_fsid = 0x%x\n", inp->vattr.va_fsid));

    /*
     * vnode に iumnode 構造体へのポインタをセット
     * 逆に、iumnode にも vnode 構造体へのポインタをセット
     */
    vp->v_data = (caddr_t) inp;
    inp->vnode = vp;

    /*
     * ノード番号(iノード番号)をセット。
     * もし指定されている場合はそれを使い、指定が無い場合には
     * 単純に1づつ増やしていく。
     */
    if( (inp->vattr.va_nodeid = nodeid) == 0) {
        mutex_enter(&(iumfsp->iumfs_lock));
        inp->vattr.va_nodeid = ++(iumfsp->iumfs_last_nodeid);
        mutex_exit(&(iumfsp->iumfs_lock));
    }

    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: new nodeid = %d \n", inp->vattr.va_nodeid));

    //新しい iumnode をノードのリンクリストに新規のノードを追加
    iumfs_add_node_to_list(vfsp, vp);

    // 渡された vnode 構造体のポインタに確保した vnode のアドレスをセット
    *nvpp = vp;
    DEBUG_PRINT((CE_CONT, "iumfs_alloc_node: return(%d)\n", SUCCESS));
    return (SUCCESS);
}