static void zfs_znode_dmu_init(zfsvfs_t *zfsvfs, znode_t *zp, dmu_buf_t *db) { znode_t *nzp; ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs)); ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id))); mutex_enter(&zp->z_lock); ASSERT(zp->z_dbuf == NULL); zp->z_dbuf = db; nzp = dmu_buf_set_user_ie(db, zp, &zp->z_phys, znode_evict_error); /* * there should be no * concurrent zgets on this object. */ if (nzp != NULL) panic("existing znode %p for dbuf %p", (void *)nzp, (void *)db); /* * Slap on VROOT if we are the root znode */ if (zp->z_id == zfsvfs->z_root) ZTOV(zp)->v_flag |= VROOT; mutex_exit(&zp->z_lock); vn_exists(ZTOV(zp)); }
/* * Return a looped back vnode for the given vnode. * If no lnode exists for this vnode create one and put it * in a table hashed by vnode. If the lnode for * this vnode is already in the table return it (ref count is * incremented by lfind). The lnode will be flushed from the * table when lo_inactive calls freelonode. The creation of * a new lnode can be forced via the LOF_FORCE flag even if * the vnode exists in the table. This is used in the creation * of a terminating lnode when looping is detected. A unique * lnode is required for the correct evaluation of the current * working directory. * NOTE: vp is assumed to be a held vnode. */ struct vnode * makelonode(struct vnode *vp, struct loinfo *li, int flag) { lnode_t *lp, *tlp; struct vfs *vfsp; vnode_t *nvp; lp = NULL; TABLE_LOCK_ENTER(vp, li); if (flag != LOF_FORCE) lp = lfind(vp, li); if ((flag == LOF_FORCE) || (lp == NULL)) { /* * Optimistically assume that we won't need to sleep. */ lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP); nvp = vn_alloc(KM_NOSLEEP); if (lp == NULL || nvp == NULL) { TABLE_LOCK_EXIT(vp, li); /* The lnode allocation may have succeeded, save it */ tlp = lp; if (tlp == NULL) { tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP); } if (nvp == NULL) { nvp = vn_alloc(KM_SLEEP); } lp = NULL; TABLE_LOCK_ENTER(vp, li); if (flag != LOF_FORCE) lp = lfind(vp, li); if (lp != NULL) { kmem_cache_free(lnode_cache, tlp); vn_free(nvp); VN_RELE(vp); goto found_lnode; } lp = tlp; } atomic_inc_32(&li->li_refct); vfsp = makelfsnode(vp->v_vfsp, li); lp->lo_vnode = nvp; VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev); nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN)); vn_setops(nvp, lo_vnodeops); nvp->v_data = (caddr_t)lp; lp->lo_vp = vp; lp->lo_looping = 0; lsave(lp, li); vn_exists(vp); } else { VN_RELE(vp); } found_lnode: TABLE_LOCK_EXIT(vp, li); return (ltov(lp)); }
/* * Constructor/destructor routines for fifos and pipes. * * In the interest of code sharing, we define a common fifodata structure * which consists of a fifolock and one or two fnodes. A fifo contains * one fnode; a pipe contains two. The fifolock is shared by the fnodes, * each of which points to it: * * --> --> --------- --- --- * | | | lock | | | * | | --------- | | * | | | | fifo | * | --- | fnode | | | * | | | | pipe * | --------- --- | * | | | | * ------- | fnode | | * | | | * --------- --- * * Since the fifolock is at the beginning of the fifodata structure, * the fifolock address is the same as the fifodata address. Thus, * we can determine the fifodata address from any of its member fnodes. * This is essential for fifo_inactive. * * The fnode constructor is designed to handle any fifodata structure, * deducing the number of fnodes from the total size. Thus, the fnode * constructor does most of the work for the pipe constructor. */ static int fnode_constructor(void *buf, void *cdrarg, int kmflags) { fifodata_t *fdp = buf; fifolock_t *flp = &fdp->fifo_lock; fifonode_t *fnp = &fdp->fifo_fnode[0]; size_t size = (uintptr_t)cdrarg; mutex_init(&flp->flk_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&flp->flk_wait_cv, NULL, CV_DEFAULT, NULL); flp->flk_ocsync = 0; while ((char *)fnp < (char *)buf + size) { vnode_t *vp; vp = vn_alloc(kmflags); if (vp == NULL) { fnp->fn_vnode = NULL; /* mark for destructor */ fnode_destructor(buf, cdrarg); return (-1); } fnp->fn_vnode = vp; fnp->fn_lock = flp; fnp->fn_open = 0; fnp->fn_dest = fnp; fnp->fn_mp = NULL; fnp->fn_count = 0; fnp->fn_rsynccnt = 0; fnp->fn_wsynccnt = 0; fnp->fn_wwaitcnt = 0; fnp->fn_insync = 0; fnp->fn_pcredp = NULL; fnp->fn_cpid = -1; /* * 32-bit stat(2) may fail if fn_ino isn't initialized */ fnp->fn_ino = 0; cv_init(&fnp->fn_wait_cv, NULL, CV_DEFAULT, NULL); vn_setops(vp, fifo_vnodeops); vp->v_stream = NULL; vp->v_type = VFIFO; vp->v_data = (caddr_t)fnp; vp->v_flag = VNOMAP | VNOSWAP; vn_exists(vp); fnp++; } return (0); }
static int fdget(struct vnode *dvp, char *comp, struct vnode **vpp) { int n = 0; struct vnode *vp; while (*comp) { if (*comp < '0' || *comp > '9') return (ENOENT); n = 10 * n + *comp++ - '0'; } vp = vn_alloc(KM_SLEEP); vp->v_type = VCHR; vp->v_vfsp = dvp->v_vfsp; vn_setops(vp, fd_vnodeops); vp->v_data = NULL; vp->v_flag = VNOMAP; vp->v_rdev = makedevice(fdrmaj, n); vn_exists(vp); *vpp = vp; return (0); }
struct pcnode * pc_getnode( struct pcfs *fsp, /* filsystem for node */ daddr_t blkno, /* phys block no of dir entry */ int offset, /* offset of dir entry in block */ struct pcdir *ep) /* node dir entry */ { struct pcnode *pcp; struct pchead *hp; struct vnode *vp; pc_cluster32_t scluster; ASSERT(fsp->pcfs_flags & PCFS_LOCKED); if (ep == (struct pcdir *)0) { ep = &pcfs_rootdirentry; scluster = 0; } else { scluster = pc_getstartcluster(fsp, ep); } /* * First look for active nodes. * File nodes are identified by the location (blkno, offset) of * its directory entry. * Directory nodes are identified by the starting cluster number * for the entries. */ if (ep->pcd_attr & PCA_DIR) { hp = &pcdhead[PCDHASH(fsp, scluster)]; rw_enter(&pcnodes_lock, RW_READER); for (pcp = hp->pch_forw; pcp != (struct pcnode *)hp; pcp = pcp->pc_forw) { if ((fsp == VFSTOPCFS(PCTOV(pcp)->v_vfsp)) && (scluster == pcp->pc_scluster)) { VN_HOLD(PCTOV(pcp)); rw_exit(&pcnodes_lock); return (pcp); } } rw_exit(&pcnodes_lock); } else { hp = &pcfhead[PCFHASH(fsp, blkno, offset)]; rw_enter(&pcnodes_lock, RW_READER); for (pcp = hp->pch_forw; pcp != (struct pcnode *)hp; pcp = pcp->pc_forw) { if ((fsp == VFSTOPCFS(PCTOV(pcp)->v_vfsp)) && ((pcp->pc_flags & PC_INVAL) == 0) && (blkno == pcp->pc_eblkno) && (offset == pcp->pc_eoffset)) { VN_HOLD(PCTOV(pcp)); rw_exit(&pcnodes_lock); return (pcp); } } rw_exit(&pcnodes_lock); } /* * Cannot find node in active list. Allocate memory for a new node * initialize it, and put it on the active list. */ pcp = kmem_alloc(sizeof (struct pcnode), KM_SLEEP); bzero(pcp, sizeof (struct pcnode)); vp = vn_alloc(KM_SLEEP); pcp->pc_vn = vp; pcp->pc_entry = *ep; pcp->pc_eblkno = blkno; pcp->pc_eoffset = offset; pcp->pc_scluster = scluster; pcp->pc_lcluster = scluster; pcp->pc_lindex = 0; pcp->pc_flags = 0; if (ep->pcd_attr & PCA_DIR) { vn_setops(vp, pcfs_dvnodeops); vp->v_type = VDIR; if (scluster == 0) { vp->v_flag = VROOT; blkno = offset = 0; if (IS_FAT32(fsp)) { pc_cluster32_t ncl = 0; scluster = fsp->pcfs_rdirstart; if (pc_fileclsize(fsp, scluster, &ncl)) { PC_DPRINTF1(2, "cluster chain " "corruption, scluster=%d\n", scluster); pcp->pc_flags |= PC_INVAL; } pcp->pc_size = fsp->pcfs_clsize * ncl; } else { pcp->pc_size = fsp->pcfs_rdirsec * fsp->pcfs_secsize; } } else { pc_cluster32_t ncl = 0; if (pc_fileclsize(fsp, scluster, &ncl)) { PC_DPRINTF1(2, "cluster chain corruption, " "scluster=%d\n", scluster); pcp->pc_flags |= PC_INVAL; } pcp->pc_size = fsp->pcfs_clsize * ncl; } } else { vn_setops(vp, pcfs_fvnodeops); vp->v_type = VREG; vp->v_flag = VNOSWAP; fsp->pcfs_frefs++; pcp->pc_size = ltohi(ep->pcd_size); } fsp->pcfs_nrefs++; VFS_HOLD(PCFSTOVFS(fsp)); vp->v_data = (caddr_t)pcp; vp->v_vfsp = PCFSTOVFS(fsp); vn_exists(vp); rw_enter(&pcnodes_lock, RW_WRITER); insque(pcp, hp); rw_exit(&pcnodes_lock); return (pcp); }
vnode_t * sv_find(vnode_t *mvp, vnode_t *dvp, nfs4_fname_t **namepp) { vnode_t *vp; rnode4_t *rp = VTOR4(mvp); svnode_t *svp; svnode_t *master_svp = VTOSV(mvp); rnode4_t *drp = VTOR4(dvp); nfs4_fname_t *nm; ASSERT(dvp != NULL); sv_stats.sv_find++; ASSERT(namepp != NULL); ASSERT(*namepp != NULL); nm = *namepp; *namepp = NULL; /* * At this point, all we know is that we have an rnode whose * file handle matches the file handle of the object we want. * We have to verify that component name and the directory * match. If so, then we are done. * * Note: mvp is always the master vnode. */ ASSERT(!IS_SHADOW(mvp, rp)); if (sv_match(nm, drp->r_fh, master_svp)) { VN_HOLD(mvp); fn_rele(&nm); return (mvp); } /* * No match, search through the shadow vnode list. * Hold the r_svlock to prevent changes. */ mutex_enter(&rp->r_svlock); for (svp = master_svp->sv_forw; svp != master_svp; svp = svp->sv_forw) if (sv_match(nm, drp->r_fh, svp)) { /* * A matching shadow vnode is found, bump the * reference count on it and return it. */ vp = SVTOV(svp); VN_HOLD(vp); fn_rele(&nm); mutex_exit(&rp->r_svlock); return (vp); } /* * No match searching the list, go allocate a new shadow */ svp = kmem_cache_alloc(svnode_cache, KM_SLEEP); svp->sv_r_vnode = vn_alloc(KM_SLEEP); vp = SVTOV(svp); /* Initialize the vnode */ vn_setops(vp, nfs4_vnodeops); vp->v_data = (caddr_t)rp; vp->v_vfsp = mvp->v_vfsp; ASSERT(nfs4_consistent_type(mvp)); vp->v_type = mvp->v_type; vp->v_pages = (page_t *)-1; /* No pages, please */ vn_exists(vp); /* Initialize the shadow vnode */ svp->sv_dfh = VTOR4(dvp)->r_fh; sfh4_hold(svp->sv_dfh); svp->sv_name = nm; VN_HOLD(mvp); insque(svp, master_svp); mutex_exit(&rp->r_svlock); return (vp); }
/* * Mount a file descriptor onto the node in the file system. * Create a new vnode, update the attributes with info from the * file descriptor and the mount point. The mask, mode, uid, gid, * atime, mtime and ctime are taken from the mountpt. Link count is * set to one, the file system id is namedev and nodeid is unique * for each mounted object. Other attributes are taken from mount point. * Make sure user is owner (or root) with write permissions on mount point. * Hash the new vnode and return 0. * Upon entry to this routine, the file descriptor is in the * fd field of a struct namefd. Copy that structure from user * space and retrieve the file descriptor. */ static int nm_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *crp) { struct namefd namefdp; struct vnode *filevp; /* file descriptor vnode */ struct file *fp; struct vnode *newvp; /* vnode representing this mount */ struct vnode *rvp; /* realvp (if any) for the mountpt */ struct namenode *nodep; /* namenode for this mount */ struct vattr filevattr; /* attributes of file dec. */ struct vattr *vattrp; /* attributes of this mount */ char *resource_name; char *resource_nodetype; statvfs64_t *svfsp; int error = 0; /* * Get the file descriptor from user space. * Make sure the file descriptor is valid and has an * associated file pointer. * If so, extract the vnode from the file pointer. */ if (uap->datalen != sizeof (struct namefd)) return (EINVAL); if (copyin(uap->dataptr, &namefdp, uap->datalen)) return (EFAULT); if ((fp = getf(namefdp.fd)) == NULL) return (EBADF); /* * If the mount point already has something mounted * on it, disallow this mount. (This restriction may * be removed in a later release). * Or unmount has completed but the namefs ROOT vnode * count has not decremented to zero, disallow this mount. */ mutex_enter(&mvp->v_lock); if ((mvp->v_flag & VROOT) || vfs_matchops(mvp->v_vfsp, namefs_vfsops)) { mutex_exit(&mvp->v_lock); releasef(namefdp.fd); return (EBUSY); } mutex_exit(&mvp->v_lock); /* * Cannot allow users to fattach() in /dev/pts. * First, there is no need for doing so and secondly * we cannot allow arbitrary users to park on a node in * /dev/pts or /dev/vt. */ rvp = NULLVP; if (vn_matchops(mvp, spec_getvnodeops()) && VOP_REALVP(mvp, &rvp, NULL) == 0 && rvp && (vn_matchops(rvp, devpts_getvnodeops()) || vn_matchops(rvp, devvt_getvnodeops()))) { releasef(namefdp.fd); return (ENOTSUP); } filevp = fp->f_vnode; if (filevp->v_type == VDIR || filevp->v_type == VPORT) { releasef(namefdp.fd); return (EINVAL); } /* * If the fd being mounted refers to neither a door nor a stream, * make sure the caller is privileged. */ if (filevp->v_type != VDOOR && filevp->v_stream == NULL) { if (secpolicy_fs_mount(crp, filevp, vfsp) != 0) { /* fd is neither a stream nor a door */ releasef(namefdp.fd); return (EINVAL); } } /* * Make sure the file descriptor is not the root of some * file system. * If it's not, create a reference and allocate a namenode * to represent this mount request. */ if (filevp->v_flag & VROOT) { releasef(namefdp.fd); return (EBUSY); } nodep = kmem_zalloc(sizeof (struct namenode), KM_SLEEP); mutex_init(&nodep->nm_lock, NULL, MUTEX_DEFAULT, NULL); vattrp = &nodep->nm_vattr; vattrp->va_mask = AT_ALL; if (error = VOP_GETATTR(mvp, vattrp, 0, crp, NULL)) goto out; filevattr.va_mask = AT_ALL; if (error = VOP_GETATTR(filevp, &filevattr, 0, crp, NULL)) goto out; /* * Make sure the user is the owner of the mount point * or has sufficient privileges. */ if (error = secpolicy_vnode_owner(crp, vattrp->va_uid)) goto out; /* * Make sure the user has write permissions on the * mount point (or has sufficient privileges). */ if (!(vattrp->va_mode & VWRITE) && secpolicy_vnode_access(crp, mvp, vattrp->va_uid, VWRITE) != 0) { error = EACCES; goto out; } /* * If the file descriptor has file/record locking, don't * allow the mount to succeed. */ if (vn_has_flocks(filevp)) { error = EACCES; goto out; } /* * Initialize the namenode. */ if (filevp->v_stream) { struct stdata *stp = filevp->v_stream; mutex_enter(&stp->sd_lock); stp->sd_flag |= STRMOUNT; mutex_exit(&stp->sd_lock); } nodep->nm_filevp = filevp; mutex_enter(&fp->f_tlock); fp->f_count++; mutex_exit(&fp->f_tlock); releasef(namefdp.fd); nodep->nm_filep = fp; nodep->nm_mountpt = mvp; /* * The attributes for the mounted file descriptor were initialized * above by applying VOP_GETATTR to the mount point. Some of * the fields of the attributes structure will be overwritten * by the attributes from the file descriptor. */ vattrp->va_type = filevattr.va_type; vattrp->va_fsid = namedev; vattrp->va_nodeid = namenodeno_alloc(); vattrp->va_nlink = 1; vattrp->va_size = filevattr.va_size; vattrp->va_rdev = filevattr.va_rdev; vattrp->va_blksize = filevattr.va_blksize; vattrp->va_nblocks = filevattr.va_nblocks; vattrp->va_seq = 0; /* * Initialize new vnode structure for the mounted file descriptor. */ nodep->nm_vnode = vn_alloc(KM_SLEEP); newvp = NMTOV(nodep); newvp->v_flag = filevp->v_flag | VROOT | VNOMAP | VNOSWAP; vn_setops(newvp, nm_vnodeops); newvp->v_vfsp = vfsp; newvp->v_stream = filevp->v_stream; newvp->v_type = filevp->v_type; newvp->v_rdev = filevp->v_rdev; newvp->v_data = (caddr_t)nodep; VFS_HOLD(vfsp); vn_exists(newvp); /* * Initialize the vfs structure. */ vfsp->vfs_vnodecovered = NULL; vfsp->vfs_flag |= VFS_UNLINKABLE; vfsp->vfs_bsize = 1024; vfsp->vfs_fstype = namefstype; vfs_make_fsid(&vfsp->vfs_fsid, namedev, namefstype); vfsp->vfs_data = (caddr_t)nodep; vfsp->vfs_dev = namedev; vfsp->vfs_bcount = 0; /* * Set the name we mounted from. */ switch (filevp->v_type) { case VPROC: /* VOP_GETATTR() translates this to VREG */ case VREG: resource_nodetype = "file"; break; case VDIR: resource_nodetype = "directory"; break; case VBLK: resource_nodetype = "device"; break; case VCHR: resource_nodetype = "device"; break; case VLNK: resource_nodetype = "link"; break; case VFIFO: resource_nodetype = "fifo"; break; case VDOOR: resource_nodetype = "door"; break; case VSOCK: resource_nodetype = "socket"; break; default: resource_nodetype = "resource"; break; } #define RESOURCE_NAME_SZ 128 /* Maximum length of the resource name */ resource_name = kmem_alloc(RESOURCE_NAME_SZ, KM_SLEEP); svfsp = kmem_alloc(sizeof (statvfs64_t), KM_SLEEP); error = VFS_STATVFS(filevp->v_vfsp, svfsp); if (error == 0) { (void) snprintf(resource_name, RESOURCE_NAME_SZ, "unspecified_%s_%s", svfsp->f_basetype, resource_nodetype); } else { (void) snprintf(resource_name, RESOURCE_NAME_SZ, "unspecified_%s", resource_nodetype); } vfs_setresource(vfsp, resource_name); kmem_free(svfsp, sizeof (statvfs64_t)); kmem_free(resource_name, RESOURCE_NAME_SZ); #undef RESOURCE_NAME_SZ /* * Insert the namenode. */ mutex_enter(&ntable_lock); nameinsert(nodep); mutex_exit(&ntable_lock); return (0); out: releasef(namefdp.fd); kmem_free(nodep, sizeof (struct namenode)); return (error); }
/* * Create a reference to the vnode representing the file descriptor. * Then, apply the VOP_OPEN operation to that vnode. * * The vnode for the file descriptor may be switched under you. * If it is, search the hash list for an nodep - nodep->nm_filevp * pair. If it exists, return that nodep to the user. * If it does not exist, create a new namenode to attach * to the nodep->nm_filevp then place the pair on the hash list. * * Newly created objects are like children/nodes in the mounted * file system, with the parent being the initial mount. */ int nm_open(vnode_t **vpp, int flag, cred_t *crp, caller_context_t *ct) { struct namenode *nodep = VTONM(*vpp); int error = 0; struct namenode *newnamep; struct vnode *newvp; struct vnode *infilevp; struct vnode *outfilevp; /* * If the vnode is switched under us, the corresponding * VN_RELE for this VN_HOLD will be done by the file system * performing the switch. Otherwise, the corresponding * VN_RELE will be done by nm_close(). */ infilevp = outfilevp = nodep->nm_filevp; VN_HOLD(outfilevp); if ((error = VOP_OPEN(&outfilevp, flag, crp, ct)) != 0) { VN_RELE(outfilevp); return (error); } if (infilevp != outfilevp) { /* * See if the new filevp (outfilevp) is already associated * with the mount point. If it is, then it already has a * namenode associated with it. */ mutex_enter(&ntable_lock); if ((newnamep = namefind(outfilevp, nodep->nm_mountpt)) != NULL) { struct vnode *vp = NMTOV(newnamep); VN_HOLD(vp); goto gotit; } newnamep = kmem_zalloc(sizeof (struct namenode), KM_SLEEP); newvp = vn_alloc(KM_SLEEP); newnamep->nm_vnode = newvp; mutex_init(&newnamep->nm_lock, NULL, MUTEX_DEFAULT, NULL); mutex_enter(&nodep->nm_lock); newvp->v_flag = ((*vpp)->v_flag | VNOMAP | VNOSWAP) & ~VROOT; vn_setops(newvp, vn_getops(*vpp)); newvp->v_vfsp = &namevfs; newvp->v_stream = outfilevp->v_stream; newvp->v_type = outfilevp->v_type; newvp->v_rdev = outfilevp->v_rdev; newvp->v_data = (caddr_t)newnamep; vn_exists(newvp); bcopy(&nodep->nm_vattr, &newnamep->nm_vattr, sizeof (vattr_t)); newnamep->nm_vattr.va_type = outfilevp->v_type; newnamep->nm_vattr.va_nodeid = namenodeno_alloc(); newnamep->nm_vattr.va_size = (u_offset_t)0; newnamep->nm_vattr.va_rdev = outfilevp->v_rdev; newnamep->nm_flag = NMNMNT; newnamep->nm_filevp = outfilevp; newnamep->nm_filep = nodep->nm_filep; newnamep->nm_mountpt = nodep->nm_mountpt; mutex_exit(&nodep->nm_lock); /* * Insert the new namenode into the hash list. */ nameinsert(newnamep); gotit: mutex_exit(&ntable_lock); /* * Release the above reference to the infilevp, the reference * to the NAMEFS vnode, create a reference to the new vnode * and return the new vnode to the user. */ VN_RELE(*vpp); *vpp = NMTOV(newnamep); } return (0); }
static vnode_t * make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp, struct vnodeops *vops, int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *), int *newnode, cred_t *cr) { rnode4_t *rp; rnode4_t *trp; vnode_t *vp; mntinfo4_t *mi; ASSERT(RW_READ_HELD(&rhtp->r_lock)); mi = VFTOMI4(vfsp); start: if ((rp = r4find(rhtp, fh, vfsp)) != NULL) { vp = RTOV4(rp); *newnode = 0; return (vp); } rw_exit(&rhtp->r_lock); mutex_enter(&rp4freelist_lock); if (rp4freelist != NULL && rnode4_new >= nrnode) { rp = rp4freelist; rp4_rmfree(rp); mutex_exit(&rp4freelist_lock); vp = RTOV4(rp); if (rp->r_flags & R4HASHED) { rw_enter(&rp->r_hashq->r_lock, RW_WRITER); mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); rw_exit(&rp->r_hashq->r_lock); rw_enter(&rhtp->r_lock, RW_READER); goto start; } mutex_exit(&vp->v_lock); rp4_rmhash_locked(rp); rw_exit(&rp->r_hashq->r_lock); } r4inactive(rp, cr); mutex_enter(&vp->v_lock); if (vp->v_count > 1) { vp->v_count--; mutex_exit(&vp->v_lock); rw_enter(&rhtp->r_lock, RW_READER); goto start; } mutex_exit(&vp->v_lock); vn_invalid(vp); /* * destroy old locks before bzero'ing and * recreating the locks below. */ uninit_rnode4(rp); /* * Make sure that if rnode is recycled then * VFS count is decremented properly before * reuse. */ VFS_RELE(vp->v_vfsp); vn_reinit(vp); } else { vnode_t *new_vp; mutex_exit(&rp4freelist_lock); rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP); new_vp = vn_alloc(KM_SLEEP); atomic_add_long((ulong_t *)&rnode4_new, 1); #ifdef DEBUG clstat4_debug.nrnode.value.ui64++; #endif vp = new_vp; } bzero(rp, sizeof (*rp)); rp->r_vnode = vp; nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL); nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL); mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL); rp->created_v4 = 0; list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t), offsetof(nfs4_open_stream_t, os_node)); rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head; rp->r_lo_head.lo_next_rnode = &rp->r_lo_head; cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL); cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL); rp->r_flags = R4READDIRWATTR; rp->r_fh = fh; rp->r_hashq = rhtp; sfh4_hold(rp->r_fh); rp->r_server = mi->mi_curr_serv; rp->r_deleg_type = OPEN_DELEGATE_NONE; rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE; nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL); rddir4_cache_create(rp); rp->r_putapage = putapage; vn_setops(vp, vops); vp->v_data = (caddr_t)rp; vp->v_vfsp = vfsp; VFS_HOLD(vfsp); vp->v_type = VNON; if (isrootfh(fh, rp)) vp->v_flag = VROOT; vn_exists(vp); /* * There is a race condition if someone else * alloc's the rnode while no locks are held, so we * check again and recover if found. */ rw_enter(&rhtp->r_lock, RW_WRITER); if ((trp = r4find(rhtp, fh, vfsp)) != NULL) { vp = RTOV4(trp); *newnode = 0; rw_exit(&rhtp->r_lock); rp4_addfree(rp, cr); rw_enter(&rhtp->r_lock, RW_READER); return (vp); } rp4_addhash(rp); *newnode = 1; return (vp); }