int nfs_filestat(struct vnode *vp, struct filestat *fsp) { struct nfsnode nfsnode; mode_t mode; if (!kread(VTONFS(vp), &nfsnode, sizeof (nfsnode))) { dprintf(stderr, "can't read nfsnode at %p for pid %d\n", (void *)VTONFS(vp), Pid); return 0; } fsp->fsid = nfsnode.n_vattr.va_fsid; fsp->fileid = nfsnode.n_vattr.va_fileid; fsp->size = nfsnode.n_size; fsp->rdev = makeudev(nfsnode.n_vattr.va_rmajor, nfsnode.n_vattr.va_rminor); mode = (mode_t)nfsnode.n_vattr.va_mode; switch (vp->v_type) { case VREG: mode |= S_IFREG; break; case VDIR: mode |= S_IFDIR; break; case VBLK: mode |= S_IFBLK; break; case VCHR: mode |= S_IFCHR; break; case VLNK: mode |= S_IFLNK; break; case VSOCK: mode |= S_IFSOCK; break; case VFIFO: mode |= S_IFIFO; break; case VDATABASE: break; case VINT: case VNON: case VBAD: return 0; } fsp->mode = mode; return 1; }
static void ncl_releasesillyrename(struct vnode *vp, struct thread *td) { struct nfsnode *np; struct sillyrename *sp; ASSERT_VOP_ELOCKED(vp, "releasesillyrename"); np = VTONFS(vp); mtx_assert(&np->n_mtx, MA_OWNED); if (vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; } else sp = NULL; if (sp != NULL) { mtx_unlock(&np->n_mtx); (void) ncl_vinvalbuf(vp, 0, td, 1); /* * Remove the silly file that was rename'd earlier */ ncl_removeit(sp, vp); crfree(sp->s_cred); sysmon_task_queue_sched(0, nfs_freesillyrename, sp); mtx_lock(&np->n_mtx); } }
int nfs_inactive(void *v) { struct vop_inactive_args /* { struct vnode *a_vp; bool *a_recycle; } */ *ap = v; struct nfsnode *np; struct sillyrename *sp; struct vnode *vp = ap->a_vp; np = VTONFS(vp); if (vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = (struct sillyrename *)0; } else sp = NULL; if (sp != NULL) nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1); *ap->a_recycle = (np->n_flag & NREMOVED) != 0; np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED); if (vp->v_type == VDIR && np->n_dircache) nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF); VOP_UNLOCK(vp); if (sp != NULL) { workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL); } return (0); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int ncl_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; if (NFS_ISV4(vp) && vp->v_type == VREG) /* * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 * Close operations are delayed until ncl_inactive(). * However, since VOP_INACTIVE() is not guaranteed to be * called, we need to do it again here. */ (void) nfsrpc_close(vp, 1, ap->a_td); /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p != NULL) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); vfs_hash_remove(vp); /* * Call nfscl_reclaimnode() to save attributes in the delegation, * as required. */ if (vp->v_type == VREG) nfscl_reclaimnode(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); FREE((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); FREE((caddr_t)np->n_fhp, M_NFSFH); if (np->n_v4 != NULL) FREE((caddr_t)np->n_v4, M_NFSV4NODE); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int ncl_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p != NULL) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); if (NFS_ISV4(vp) && vp->v_type == VREG) /* * We can now safely close any remaining NFSv4 Opens for * this file. Most opens will have already been closed by * ncl_inactive(), but there are cases where it is not * called, so we need to do it again here. */ (void) nfsrpc_close(vp, 1, ap->a_td); vfs_hash_remove(vp); /* * Call nfscl_reclaimnode() to save attributes in the delegation, * as required. */ if (vp->v_type == VREG) nfscl_reclaimnode(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); FREE((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); FREE((caddr_t)np->n_fhp, M_NFSFH); if (np->n_v4 != NULL) FREE((caddr_t)np->n_v4, M_NFSV4NODE); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }
int nfs_inactive(struct vop_inactive_args *ap) { struct nfsnode *np; struct sillyrename *sp; struct thread *td = curthread; /* XXX */ np = VTONFS(ap->a_vp); mtx_lock(&np->n_mtx); if (ap->a_vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; } else sp = NULL; if (sp) { mtx_unlock(&np->n_mtx); (void)nfs_vinvalbuf(ap->a_vp, 0, td, 1); /* * Remove the silly file that was rename'd earlier */ (sp->s_removeit)(sp); crfree(sp->s_cred); TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp); taskqueue_enqueue(taskqueue_thread, &sp->s_task); mtx_lock(&np->n_mtx); } np->n_flag &= NMODIFIED; mtx_unlock(&np->n_mtx); return (0); }
int nfs_filestat(struct vnode *vp, struct filestat *fsp) { struct nfsnode nfsnode; mode_t mode; if (!KVM_READ(VTONFS(vp), &nfsnode, sizeof (nfsnode))) { dprintf("can't read nfsnode at %p for pid %ld", VTONFS(vp), (long)Pid); return 0; } fsp->fsid = nfsnode.n_vattr.va_fsid; fsp->fileid = nfsnode.n_vattr.va_fileid; fsp->size = nfsnode.n_size; fsp->rdev = nfsnode.n_vattr.va_rdev; mode = (mode_t)nfsnode.n_vattr.va_mode; switch (vp->v_type) { case VREG: mode |= S_IFREG; break; case VDIR: mode |= S_IFDIR; break; case VBLK: mode |= S_IFBLK; break; case VCHR: mode |= S_IFCHR; break; case VLNK: mode |= S_IFLNK; break; case VSOCK: mode |= S_IFSOCK; break; case VFIFO: mode |= S_IFIFO; break; default: break; } fsp->mode = mode; return 1; }
static int nfs_vncmpf(struct vnode *vp, void *arg) { struct nfs_vncmp *a; struct nfsnode *np; a = arg; np = VTONFS(vp); return (bcmp(a->fh, np->n_fhp, a->fhsize)); }
/* * Comparison function for vfs_hash functions. */ int newnfs_vncmpf(struct vnode *vp, void *arg) { struct nfsfh *nfhp = (struct nfsfh *)arg; struct nfsnode *np = VTONFS(vp); if (np->n_fhp->nfh_len != nfhp->nfh_len || NFSBCMP(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len)) return (1); return (0); }
int ncl_inactive(struct vop_inactive_args *ap) { struct nfsnode *np; struct sillyrename *sp; struct vnode *vp = ap->a_vp; boolean_t retv; np = VTONFS(vp); if (NFS_ISV4(vp) && vp->v_type == VREG) { /* * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 * Close operations are delayed until now. Any dirty * buffers/pages must be flushed before the close, so that the * stateid is available for the writes. */ if (vp->v_object != NULL) { VM_OBJECT_WLOCK(vp->v_object); retv = vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(vp->v_object); } else retv = TRUE; if (retv == TRUE) { (void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0); (void)nfsrpc_close(vp, 1, ap->a_td); } } mtx_lock(&np->n_mtx); if (vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; } else sp = NULL; if (sp) { mtx_unlock(&np->n_mtx); (void) ncl_vinvalbuf(vp, 0, ap->a_td, 1); /* * Remove the silly file that was rename'd earlier */ ncl_removeit(sp, vp); crfree(sp->s_cred); TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp); taskqueue_enqueue(taskqueue_thread, &sp->s_task); mtx_lock(&np->n_mtx); } np->n_flag &= NMODIFIED; mtx_unlock(&np->n_mtx); return (0); }
int nfs_print(struct vnode *vp) { struct nfsnode nfsnode, *np = &nfsnode; char flagbuf[16], *flags = flagbuf; int flag; char *name; mode_t type; KGETRET(VTONFS(vp), &nfsnode, sizeof(nfsnode), "vnode's nfsnode"); flag = np->n_flag; if (flag & NFLUSHWANT) *flags++ = 'W'; if (flag & NFLUSHINPROG) *flags++ = 'P'; if (flag & NLMODIFIED) *flags++ = 'M'; if (flag & NRMODIFIED) *flags++ = 'R'; if (flag & NWRITEERR) *flags++ = 'E'; if (flag & NQNFSEVICTED) *flags++ = 'G'; if (flag & NACC) *flags++ = 'A'; if (flag & NUPD) *flags++ = 'U'; if (flag & NCHG) *flags++ = 'C'; if (flag & NLOCKED) *flags++ = 'L'; if (flag & NWANTED) *flags++ = 'w'; if (flag == 0) *flags++ = '-'; *flags = '\0'; #define VT np->n_vattr printf(" %6ju %5s", (uintmax_t)VT.va_fileid, flagbuf); type = VT.va_mode & S_IFMT; if (S_ISCHR(VT.va_mode) || S_ISBLK(VT.va_mode)) if (usenumflag || ((name = devname((VT.va_rmajor << 8) | VT.va_rminor, type)) == NULL)) printf(" %2d,%-2d", VT.va_rmajor, VT.va_rminor); else printf(" %7s", name); else printf(" %7ju", (uintmax_t)np->n_size); return (0); }
/* * Invalidate both the access and attribute caches for this vnode. */ void ncl_invalcaches(struct vnode *vp) { struct nfsnode *np = VTONFS(vp); int i; mtx_lock(&np->n_mtx); for (i = 0; i < NFS_ACCESSCACHESIZE; i++) np->n_accesscache[i].stamp = 0; KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp); np->n_attrstamp = 0; KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp); mtx_unlock(&np->n_mtx); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int nfs_reclaim(void *v) { struct vop_reclaim_args /* { struct vnode *a_vp; } */ *ap = v; struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsmount *nmp = VFSTONFS(vp->v_mount); if (prtactive && vp->v_usecount > 1) vprint("nfs_reclaim: pushing active", vp); rw_enter(&nmp->nm_rbtlock, RW_WRITER); rb_tree_remove_node(&nmp->nm_rbtree, np); rw_exit(&nmp->nm_rbtlock); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR && np->n_dircache != NULL) { nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE); hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask); } KASSERT(np->n_dirgens == NULL); if (np->n_fhsize > NFS_SMALLFH) kmem_free(np->n_fhp, np->n_fhsize); pool_put(&nfs_vattr_pool, np->n_vattr); if (np->n_rcred) kauth_cred_free(np->n_rcred); if (np->n_wcred) kauth_cred_free(np->n_wcred); if (vp->v_type == VREG) { mutex_destroy(&np->n_commitlock); } genfs_node_destroy(vp); pool_put(&nfs_node_pool, np); vp->v_data = NULL; return (0); }
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int lkflags) { int error; struct vnode *vp; error = vcache_get(mntp, fhp, fhsize, &vp); if (error) return error; error = vn_lock(vp, LK_EXCLUSIVE | lkflags); if (error) { vrele(vp); return error; } *npp = VTONFS(vp); return 0; }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int nfs_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); vfs_hash_remove(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); free((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); if (np->n_fhsize > NFS_SMALLFH) { free((caddr_t)np->n_fhp, M_NFSBIGFH); } mtx_destroy(&np->n_mtx); uma_zfree(nfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }
int nfs_inactive(void *v) { struct vop_inactive_args *ap = v; struct nfsnode *np; struct sillyrename *sp; #ifdef DIAGNOSTIC if (prtactive && ap->a_vp->v_usecount != 0) vprint("nfs_inactive: pushing active", ap->a_vp); #endif if (ap->a_vp->v_flag & VLARVAL) /* * vnode was incompletely set up, just return * as we are throwing it away. */ return(0); #ifdef DIAGNOSTIC if (ap->a_vp->v_data == NULL) panic("NULL v_data (no nfsnode set up?) in vnode %p", ap->a_vp); #endif np = VTONFS(ap->a_vp); if (ap->a_vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; } else sp = NULL; if (sp) { /* * Remove the silly file that was rename'd earlier */ nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, curproc); nfs_removeit(sp); crfree(sp->s_cred); vrele(sp->s_dvp); free(sp, M_NFSREQ, sizeof(*sp)); } np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT); VOP_UNLOCK(ap->a_vp, ap->a_p); return (0); }
int ncl_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np; boolean_t retv; if (NFS_ISV4(vp) && vp->v_type == VREG) { /* * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 * Close operations are delayed until now. Any dirty * buffers/pages must be flushed before the close, so that the * stateid is available for the writes. */ if (vp->v_object != NULL) { VM_OBJECT_WLOCK(vp->v_object); retv = vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(vp->v_object); } else retv = TRUE; if (retv == TRUE) { (void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0); (void)nfsrpc_close(vp, 1, ap->a_td); } } np = VTONFS(vp); mtx_lock(&np->n_mtx); ncl_releasesillyrename(vp, ap->a_td); /* * NMODIFIED means that there might be dirty/stale buffers * associated with the NFS vnode. None of the other flags are * meaningful after the vnode is unused. */ np->n_flag &= NMODIFIED; mtx_unlock(&np->n_mtx); return (0); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int nfs_reclaim(void *v) { struct vop_reclaim_args *ap = v; struct vnode *vp = ap->a_vp; struct nfsmount *nmp; struct nfsnode *np = VTONFS(vp); #ifdef DIAGNOSTIC if (prtactive && vp->v_usecount != 0) vprint("nfs_reclaim: pushing active", vp); #endif if (ap->a_vp->v_flag & VLARVAL) /* * vnode was incompletely set up, just return * as we are throwing it away. */ return(0); #ifdef DIAGNOSTIC if (ap->a_vp->v_data == NULL) panic("NULL v_data (no nfsnode set up?) in vnode %p", ap->a_vp); #endif nmp = VFSTONFS(vp->v_mount); rw_enter_write(&nfs_hashlock); RB_REMOVE(nfs_nodetree, &nmp->nm_ntree, np); rw_exit_write(&nfs_hashlock); if (np->n_rcred) crfree(np->n_rcred); if (np->n_wcred) crfree(np->n_wcred); cache_purge(vp); pool_put(&nfs_node_pool, vp->v_data); vp->v_data = NULL; return (0); }
/* * Extract the information needed by the nlm from the nfs vnode. */ static void nfs_getnlminfo(struct vnode *vp, uint8_t *fhp, size_t *fhlenp, struct sockaddr_storage *sp, int *is_v3p, off_t *sizep, struct timeval *timeop) { struct nfsmount *nmp; struct nfsnode *np = VTONFS(vp); nmp = VFSTONFS(vp->v_mount); if (fhlenp != NULL) *fhlenp = (size_t)np->n_fhsize; if (fhp != NULL) bcopy(np->n_fhp, fhp, np->n_fhsize); if (sp != NULL) bcopy(nmp->nm_nam, sp, min(nmp->nm_nam->sa_len, sizeof(*sp))); if (is_v3p != NULL) *is_v3p = NFS_ISV3(vp); if (sizep != NULL) *sizep = np->n_size; if (timeop != NULL) { timeop->tv_sec = nmp->nm_timeo / NFS_HZ; timeop->tv_usec = (nmp->nm_timeo % NFS_HZ) * (1000000 / NFS_HZ); } }
* if there is no nfsowner table yet, allocate one. */ if (p->p_nlminfo == NULL) { MALLOC(p->p_nlminfo, struct nlminfo *, sizeof(struct nlminfo), M_LOCKF, M_WAITOK | M_ZERO); p->p_nlminfo->pid_start = p->p_stats->p_start; } msg.lm_msg_ident.pid_start = p->p_nlminfo->pid_start; msg.lm_msg_ident.msg_seq = ++(p->p_nlminfo->msg_seq); msg.lm_fl = *fl; msg.lm_wait = ap->a_flags & F_WAIT; msg.lm_getlk = ap->a_op == F_GETLK; bcopy(VFSTONFS(vp->v_mount)->nm_nam, &msg.lm_addr, min(sizeof msg.lm_addr, VFSTONFS(vp->v_mount)->nm_nam->sa_len)); msg.lm_fh_len = NFS_ISV3(vp) ? VTONFS(vp)->n_fhsize : NFSX_V2FH; bcopy(VTONFS(vp)->n_fhp, msg.lm_fh, msg.lm_fh_len); msg.lm_nfsv3 = NFS_ISV3(vp); cru2x(td->td_ucred, &msg.lm_cred); /* * Open the lock fifo. If for any reason we don't find the fifo, it * means that the lock daemon isn't running. Translate any missing * file error message for the user, otherwise the application will * complain that the user's file is missing, which isn't the case. * Note that we use proc0's cred, so the fifo is opened as root. * * XXX: Note that this behavior is relative to the root directory * of the current process, and this may result in a variety of * {functional, security} problems in chroot() environments. */
/* * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this * function is going to be used to get Regular Files, code must be added * to fill in the "struct nfsv4node". * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp, int lkflags) { struct thread *td = curthread; /* XXX */ struct nfsnode *np; struct vnode *vp; struct vnode *nvp; int error; u_int hash; struct nfsmount *nmp; struct nfsfh *nfhp; nmp = VFSTONFS(mntp); *npp = NULL; hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT); MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, M_NFSFH, M_WAITOK); bcopy(fhp, &nfhp->nfh_fh[0], fhsize); nfhp->nfh_len = fhsize; error = vfs_hash_get(mntp, hash, lkflags, td, &nvp, newnfs_vncmpf, nfhp); FREE(nfhp, M_NFSFH); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); return (0); } np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO); error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp); if (error) { uma_zfree(newnfsnode_zone, np); return (error); } vp = nvp; KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0")); vp->v_bufobj.bo_ops = &buf_ops_newnfs; vp->v_data = np; np->n_vnode = vp; /* * Initialize the mutex even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally * destroy the mutex (in the case of the loser, or if hash_insert * happened to return an error no special casing is needed). */ mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK); /* * NFS supports recursive and shared locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); /* * Are we getting the root? If so, make sure the vnode flags * are correct */ if ((fhsize == nmp->nm_fhsize) && !bcmp(fhp, nmp->nm_fh, fhsize)) { if (vp->v_type == VNON) vp->v_type = VDIR; vp->v_vflag |= VV_ROOT; } MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize, M_NFSFH, M_WAITOK); bcopy(fhp, np->n_fhp->nfh_fh, fhsize); np->n_fhp->nfh_len = fhsize; error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; FREE((caddr_t)np->n_fhp, M_NFSFH); mtx_destroy(&np->n_mtx); uma_zfree(newnfsnode_zone, np); return (error); } error = vfs_hash_insert(vp, hash, lkflags, td, &nvp, newnfs_vncmpf, np->n_fhp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); /* vfs_hash_insert() vput()'s the losing vnode */ return (0); } *npp = np; return (0); }
/* * nfs statvfs call */ int nfs_statvfs(struct mount *mp, struct statvfs *sbp) { struct lwp *l = curlwp; struct vnode *vp; struct nfs_statfs *sfp; char *cp; u_int32_t *tl; int32_t t1, t2; char *bpos, *dpos, *cp2; struct nfsmount *nmp = VFSTONFS(mp); int error = 0, retattr; #ifdef NFS_V2_ONLY const int v3 = 0; #else int v3 = (nmp->nm_flag & NFSMNT_NFSV3); #endif struct mbuf *mreq, *mrep = NULL, *md, *mb; kauth_cred_t cred; u_quad_t tquad; struct nfsnode *np; #ifndef nolint sfp = (struct nfs_statfs *)0; #endif vp = nmp->nm_vnode; np = VTONFS(vp); cred = kauth_cred_alloc(); #ifndef NFS_V2_ONLY if (v3 && (nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0) (void)nfs_fsinfo(nmp, vp, cred, l); #endif nfsstats.rpccnt[NFSPROC_FSSTAT]++; nfsm_reqhead(np, NFSPROC_FSSTAT, NFSX_FH(v3)); nfsm_fhtom(np, v3); nfsm_request(np, NFSPROC_FSSTAT, l, cred); if (v3) nfsm_postop_attr(vp, retattr, 0); if (error) { if (mrep != NULL) { if (mrep->m_next != NULL) printf("nfs_vfsops: nfs_statvfs would lose buffers\n"); m_freem(mrep); } goto nfsmout; } nfsm_dissect(sfp, struct nfs_statfs *, NFSX_STATFS(v3)); sbp->f_flag = nmp->nm_flag; sbp->f_iosize = min(nmp->nm_rsize, nmp->nm_wsize); if (v3) { sbp->f_frsize = sbp->f_bsize = NFS_FABLKSIZE; tquad = fxdr_hyper(&sfp->sf_tbytes); sbp->f_blocks = ((quad_t)tquad / (quad_t)NFS_FABLKSIZE); tquad = fxdr_hyper(&sfp->sf_fbytes); sbp->f_bfree = ((quad_t)tquad / (quad_t)NFS_FABLKSIZE); tquad = fxdr_hyper(&sfp->sf_abytes); tquad = ((quad_t)tquad / (quad_t)NFS_FABLKSIZE); sbp->f_bresvd = sbp->f_bfree - tquad; sbp->f_bavail = tquad; /* Handle older NFS servers returning negative values */ if ((quad_t)sbp->f_bavail < 0) sbp->f_bavail = 0; tquad = fxdr_hyper(&sfp->sf_tfiles); sbp->f_files = tquad; tquad = fxdr_hyper(&sfp->sf_ffiles); sbp->f_ffree = tquad; sbp->f_favail = tquad; sbp->f_fresvd = 0; sbp->f_namemax = NFS_MAXNAMLEN; } else { sbp->f_bsize = NFS_FABLKSIZE; sbp->f_frsize = fxdr_unsigned(int32_t, sfp->sf_bsize); sbp->f_blocks = fxdr_unsigned(int32_t, sfp->sf_blocks); sbp->f_bfree = fxdr_unsigned(int32_t, sfp->sf_bfree); sbp->f_bavail = fxdr_unsigned(int32_t, sfp->sf_bavail); sbp->f_fresvd = 0; sbp->f_files = 0; sbp->f_ffree = 0; sbp->f_favail = 0; sbp->f_fresvd = 0; sbp->f_namemax = NFS_MAXNAMLEN; } copy_statvfs_info(sbp, mp); nfsm_reqdone; kauth_cred_free(cred); return (error); }
/* * nfs version 3 fsinfo rpc call */ int nfs_fsinfo(struct nfsmount *nmp, struct vnode *vp, kauth_cred_t cred, struct lwp *l) { struct nfsv3_fsinfo *fsp; char *cp; int32_t t1, t2; u_int32_t *tl, pref, xmax; char *bpos, *dpos, *cp2; int error = 0, retattr; struct mbuf *mreq, *mrep, *md, *mb; u_int64_t maxfsize; struct nfsnode *np = VTONFS(vp); nfsstats.rpccnt[NFSPROC_FSINFO]++; nfsm_reqhead(np, NFSPROC_FSINFO, NFSX_FH(1)); nfsm_fhtom(np, 1); nfsm_request(np, NFSPROC_FSINFO, l, cred); nfsm_postop_attr(vp, retattr, 0); if (!error) { nfsm_dissect(fsp, struct nfsv3_fsinfo *, NFSX_V3FSINFO); pref = fxdr_unsigned(u_int32_t, fsp->fs_wtpref); if ((nmp->nm_flag & NFSMNT_WSIZE) == 0 && pref < nmp->nm_wsize && pref >= NFS_FABLKSIZE) nmp->nm_wsize = (pref + NFS_FABLKSIZE - 1) & ~(NFS_FABLKSIZE - 1); xmax = fxdr_unsigned(u_int32_t, fsp->fs_wtmax); if (xmax < nmp->nm_wsize && xmax > 0) { nmp->nm_wsize = xmax & ~(NFS_FABLKSIZE - 1); if (nmp->nm_wsize == 0) nmp->nm_wsize = xmax; } pref = fxdr_unsigned(u_int32_t, fsp->fs_rtpref); if ((nmp->nm_flag & NFSMNT_RSIZE) == 0 && pref < nmp->nm_rsize && pref >= NFS_FABLKSIZE) nmp->nm_rsize = (pref + NFS_FABLKSIZE - 1) & ~(NFS_FABLKSIZE - 1); xmax = fxdr_unsigned(u_int32_t, fsp->fs_rtmax); if (xmax < nmp->nm_rsize && xmax > 0) { nmp->nm_rsize = xmax & ~(NFS_FABLKSIZE - 1); if (nmp->nm_rsize == 0) nmp->nm_rsize = xmax; } pref = fxdr_unsigned(u_int32_t, fsp->fs_dtpref); if (pref < nmp->nm_readdirsize && pref >= NFS_DIRFRAGSIZ) nmp->nm_readdirsize = (pref + NFS_DIRFRAGSIZ - 1) & ~(NFS_DIRFRAGSIZ - 1); if (xmax < nmp->nm_readdirsize && xmax > 0) { nmp->nm_readdirsize = xmax & ~(NFS_DIRFRAGSIZ - 1); if (nmp->nm_readdirsize == 0) nmp->nm_readdirsize = xmax; } /* XXX */ nmp->nm_maxfilesize = (u_int64_t)0x80000000 * DEV_BSIZE - 1; maxfsize = fxdr_hyper(&fsp->fs_maxfilesize); if (maxfsize > 0 && maxfsize < nmp->nm_maxfilesize) nmp->nm_maxfilesize = maxfsize; nmp->nm_mountp->mnt_fs_bshift = ffs(MIN(nmp->nm_rsize, nmp->nm_wsize)) - 1; nmp->nm_iflag |= NFSMNT_GOTFSINFO; } nfsm_reqdone; return (error); }
/* * Mount a remote root fs via. nfs. This depends on the info in the * nfs_diskless structure that has been filled in properly by some primary * bootstrap. * It goes something like this: * - do enough of "ifconfig" by calling ifioctl() so that the system * can talk to the server * - If nfs_diskless.mygateway is filled in, use that address as * a default gateway. * - build the rootfs mount point and call mountnfs() to do the rest. */ int nfs_mountroot(struct mount *mp) { struct mount *swap_mp; struct nfsv3_diskless *nd = &nfsv3_diskless; struct socket *so; struct vnode *vp; struct thread *td = curthread; /* XXX */ int error, i; u_long l; char buf[128]; #if defined(BOOTP_NFSROOT) && defined(BOOTP) bootpc_init(); /* use bootp to get nfs_diskless filled in */ #endif /* * XXX time must be non-zero when we init the interface or else * the arp code will wedge... */ while (mycpu->gd_time_seconds == 0) tsleep(mycpu, 0, "arpkludge", 10); /* * The boot code may have passed us a diskless structure. */ kprintf("DISKLESS %d\n", nfs_diskless_valid); if (nfs_diskless_valid == 1) nfs_convert_diskless(); /* * NFSv3 is required. */ nd->root_args.flags |= NFSMNT_NFSV3 | NFSMNT_RDIRPLUS; nd->swap_args.flags |= NFSMNT_NFSV3; #define SINP(sockaddr) ((struct sockaddr_in *)(sockaddr)) kprintf("nfs_mountroot: interface %s ip %s", nd->myif.ifra_name, inet_ntoa(SINP(&nd->myif.ifra_addr)->sin_addr)); kprintf(" bcast %s", inet_ntoa(SINP(&nd->myif.ifra_broadaddr)->sin_addr)); kprintf(" mask %s\n", inet_ntoa(SINP(&nd->myif.ifra_mask)->sin_addr)); #undef SINP /* * XXX splnet, so networks will receive... */ crit_enter(); /* * BOOTP does not necessarily have to be compiled into the kernel * for an NFS root to work. If we inherited the network * configuration for PXEBOOT then pxe_setup_nfsdiskless() has figured * out our interface for us and all we need to do is ifconfig the * interface. We only do this if the interface has not already been * ifconfig'd by e.g. BOOTP. */ error = socreate(nd->myif.ifra_addr.sa_family, &so, SOCK_DGRAM, 0, td); if (error) { panic("nfs_mountroot: socreate(%04x): %d", nd->myif.ifra_addr.sa_family, error); } error = ifioctl(so, SIOCAIFADDR, (caddr_t)&nd->myif, proc0.p_ucred); if (error) panic("nfs_mountroot: SIOCAIFADDR: %d", error); soclose(so, FNONBLOCK); /* * If the gateway field is filled in, set it as the default route. */ if (nd->mygateway.sin_len != 0) { struct sockaddr_in mask, sin; bzero((caddr_t)&mask, sizeof(mask)); sin = mask; sin.sin_family = AF_INET; sin.sin_len = sizeof(sin); kprintf("nfs_mountroot: gateway %s\n", inet_ntoa(nd->mygateway.sin_addr)); error = rtrequest_global(RTM_ADD, (struct sockaddr *)&sin, (struct sockaddr *)&nd->mygateway, (struct sockaddr *)&mask, RTF_UP | RTF_GATEWAY); if (error) kprintf("nfs_mountroot: unable to set gateway, error %d, continuing anyway\n", error); } /* * Create the rootfs mount point. */ nd->root_args.fh = nd->root_fh; nd->root_args.fhsize = nd->root_fhsize; l = ntohl(nd->root_saddr.sin_addr.s_addr); ksnprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s", (l >> 24) & 0xff, (l >> 16) & 0xff, (l >> 8) & 0xff, (l >> 0) & 0xff,nd->root_hostnam); kprintf("NFS_ROOT: %s\n",buf); error = nfs_mountdiskless(buf, "/", MNT_RDONLY, &nd->root_saddr, &nd->root_args, td, &vp, &mp); if (error) { mp->mnt_vfc->vfc_refcount--; crit_exit(); return (error); } swap_mp = NULL; if (nd->swap_nblks) { /* Convert to DEV_BSIZE instead of Kilobyte */ nd->swap_nblks *= 2; /* * Create a fake mount point just for the swap vnode so that the * swap file can be on a different server from the rootfs. */ nd->swap_args.fh = nd->swap_fh; nd->swap_args.fhsize = nd->swap_fhsize; l = ntohl(nd->swap_saddr.sin_addr.s_addr); ksnprintf(buf, sizeof(buf), "%ld.%ld.%ld.%ld:%s", (l >> 24) & 0xff, (l >> 16) & 0xff, (l >> 8) & 0xff, (l >> 0) & 0xff,nd->swap_hostnam); kprintf("NFS SWAP: %s\n",buf); error = nfs_mountdiskless(buf, "/swap", 0, &nd->swap_saddr, &nd->swap_args, td, &vp, &swap_mp); if (error) { crit_exit(); return (error); } vfs_unbusy(swap_mp); VTONFS(vp)->n_size = VTONFS(vp)->n_vattr.va_size = nd->swap_nblks * DEV_BSIZE ; /* * Since the swap file is not the root dir of a file system, * hack it to a regular file. */ vclrflags(vp, VROOT); vref(vp); nfs_setvtype(vp, VREG); swaponvp(td, vp, nd->swap_nblks); }
void nfs4_vnop_loadattrcache(struct vnode *vp, struct nfsv4_fattr *fap, struct vattr *vaper) { struct vattr *vap; struct nfsnode *np; int32_t rdev; enum vtype vtyp; u_short vmode; struct timespec mtime; struct timeval tv; microtime(&tv); vtyp = nv3tov_type[fap->fa4_type & 0x7]; vmode = (fap->fa4_valid & FA4V_MODE) ? fap->fa4_mode : 0777; rdev = (fap->fa4_valid & FA4V_RDEV) ? makeudev(fap->fa4_rdev_major, fap->fa4_rdev_minor) : 0; if (fap->fa4_valid & FA4V_MTIME) mtime = fap->fa4_mtime; else bzero(&mtime, sizeof mtime); /* * If v_type == VNON it is a new node, so fill in the v_type, * n_mtime fields. Check to see if it represents a special * device, and if so, check for a possible alias. Once the * correct vnode has been obtained, fill in the rest of the * information. */ np = VTONFS(vp); vap = &np->n_vattr; if (vp->v_type != vtyp || np->n_mtime == 0) { bzero(vap, sizeof *vap); vp->v_type = vtyp; np->n_mtime = mtime.tv_sec; } vap->va_type = vtyp; vap->va_mode = (vmode & 07777); vap->va_rdev = rdev; vap->va_mtime = mtime; vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; if (fap->fa4_valid & FA4V_NLINK) vap->va_nlink = fap->fa4_nlink; if (fap->fa4_valid & FA4V_UID) vap->va_uid = fap->fa4_uid; if (fap->fa4_valid & FA4V_GID) vap->va_gid = fap->fa4_gid; vap->va_size = fap->fa4_size; vap->va_blocksize = NFS_FABLKSIZE; vap->va_bytes = fap->fa4_size; if (fap->fa4_valid & FA4V_FILEID) vap->va_fileid = nfs_v4fileid4_to_fileid(fap->fa4_fileid); if (fap->fa4_valid & FA4V_ATIME) vap->va_atime = fap->fa4_atime; if (fap->fa4_valid & FA4V_CTIME) vap->va_ctime = fap->fa4_ctime; vap->va_flags = 0; vap->va_filerev = 0; /* XXX dontshrink flag? */ if (vap->va_size != np->n_size) { if (vap->va_type == VREG) { if (np->n_flag & NMODIFIED) { if (vap->va_size < np->n_size) vap->va_size = np->n_size; else np->n_size = vap->va_size; } else np->n_size = vap->va_size; vnode_pager_setsize(vp, np->n_size); } else np->n_size = vap->va_size; } np->n_attrstamp = tv.tv_sec; if (vaper != NULL) { bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); if (np->n_flag & NCHG) { if (np->n_flag & NACC) vaper->va_atime = np->n_atim; if (np->n_flag & NUPD) vaper->va_mtime = np->n_mtim; } } }
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. */ int nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags) { struct thread *td = curthread; /* XXX */ struct nfsnode *np; struct vnode *vp; struct vnode *nvp; int error; u_int hash; struct nfsmount *nmp; struct nfs_vncmp ncmp; nmp = VFSTONFS(mntp); *npp = NULL; hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT); ncmp.fhsize = fhsize; ncmp.fh = fhp; error = vfs_hash_get(mntp, hash, flags, td, &nvp, nfs_vncmpf, &ncmp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); return (0); } /* * Allocate before getnewvnode since doing so afterward * might cause a bogus v_data pointer to get dereferenced * elsewhere if zalloc should block. */ np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO); error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp); if (error) { uma_zfree(nfsnode_zone, np); return (error); } vp = nvp; vp->v_bufobj.bo_ops = &buf_ops_nfs; vp->v_data = np; np->n_vnode = vp; /* * Initialize the mutex even if the vnode is going to be a loser. * This simplifies the logic in reclaim, which can then unconditionally * destroy the mutex (in the case of the loser, or if hash_insert happened * to return an error no special casing is needed). */ mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF); /* * NFS supports recursive and shared locking. */ lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL); VN_LOCK_AREC(vp); VN_LOCK_ASHARE(vp); if (fhsize > NFS_SMALLFH) { np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK); } else np->n_fhp = &np->n_fh; bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); np->n_fhsize = fhsize; error = insmntque(vp, mntp); if (error != 0) { *npp = NULL; if (np->n_fhsize > NFS_SMALLFH) { free((caddr_t)np->n_fhp, M_NFSBIGFH); } mtx_destroy(&np->n_mtx); uma_zfree(nfsnode_zone, np); return (error); } error = vfs_hash_insert(vp, hash, flags, td, &nvp, nfs_vncmpf, &ncmp); if (error) return (error); if (nvp != NULL) { *npp = VTONFS(nvp); /* vfs_hash_insert() vput()'s the losing vnode */ return (0); } *npp = np; return (0); }
/* * Check the time stamp * If the cache is valid, copy contents to *vap and return 0 * otherwise return an error */ int ncl_getattrcache(struct vnode *vp, struct vattr *vaper) { struct nfsnode *np; struct vattr *vap; struct nfsmount *nmp; int timeo, mustflush; np = VTONFS(vp); vap = &np->n_vattr.na_vattr; nmp = VFSTONFS(vp->v_mount); mustflush = nfscl_mustflush(vp); /* must be before mtx_lock() */ mtx_lock(&np->n_mtx); /* XXX n_mtime doesn't seem to be updated on a miss-and-reload */ timeo = (time_second - np->n_mtime.tv_sec) / 10; #ifdef NFS_ACDEBUG if (nfs_acdebug>1) printf("ncl_getattrcache: initial timeo = %d\n", timeo); #endif if (vap->va_type == VDIR) { if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin) timeo = nmp->nm_acdirmin; else if (timeo > nmp->nm_acdirmax) timeo = nmp->nm_acdirmax; } else { if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin) timeo = nmp->nm_acregmin; else if (timeo > nmp->nm_acregmax) timeo = nmp->nm_acregmax; } #ifdef NFS_ACDEBUG if (nfs_acdebug > 2) printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n", nmp->nm_acregmin, nmp->nm_acregmax, nmp->nm_acdirmin, nmp->nm_acdirmax); if (nfs_acdebug) printf("ncl_getattrcache: age = %d; final timeo = %d\n", (time_second - np->n_attrstamp), timeo); #endif if ((time_second - np->n_attrstamp) >= timeo && (mustflush != 0 || np->n_attrstamp == 0)) { nfsstatsv1.attrcache_misses++; mtx_unlock(&np->n_mtx); KDTRACE_NFS_ATTRCACHE_GET_MISS(vp); return( ENOENT); } nfsstatsv1.attrcache_hits++; if (vap->va_size != np->n_size) { if (vap->va_type == VREG) { if (np->n_flag & NMODIFIED) { if (vap->va_size < np->n_size) vap->va_size = np->n_size; else np->n_size = vap->va_size; } else { np->n_size = vap->va_size; } vnode_pager_setsize(vp, np->n_size); } else { np->n_size = vap->va_size; } } bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); if (np->n_flag & NCHG) { if (np->n_flag & NACC) vaper->va_atime = np->n_atim; if (np->n_flag & NUPD) vaper->va_mtime = np->n_mtim; } mtx_unlock(&np->n_mtx); KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap); return (0); }
/* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! * In all cases, a pointer to a * nfsnode structure is returned. * This variant takes a "struct nfsfh *" as second argument and uses * that structure up, either by hanging off the nfsnode or FREEing it. */ int nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp, struct componentname *cnp, struct thread *td, struct nfsnode **npp, void *stuff, int lkflags) { struct nfsnode *np, *dnp; struct vnode *vp, *nvp; struct nfsv4node *newd, *oldd; int error; u_int hash; struct nfsmount *nmp; nmp = VFSTONFS(mntp); dnp = VTONFS(dvp); *npp = NULL; hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT); error = vfs_hash_get(mntp, hash, lkflags, td, &nvp, newnfs_vncmpf, nfhp); if (error == 0 && nvp != NULL) { /* * I believe there is a slight chance that vgonel() could * get called on this vnode between when NFSVOPLOCK() drops * the VI_LOCK() and vget() acquires it again, so that it * hasn't yet had v_usecount incremented. If this were to * happen, the VI_DOOMED flag would be set, so check for * that here. Since we now have the v_usecount incremented, * we should be ok until we vrele() it, if the VI_DOOMED * flag isn't set now. */ VI_LOCK(nvp); if ((nvp->v_iflag & VI_DOOMED)) { VI_UNLOCK(nvp); vrele(nvp); error = ENOENT; } else { VI_UNLOCK(nvp); } } if (error) { FREE((caddr_t)nfhp, M_NFSFH); return (error); } if (nvp != NULL) { np = VTONFS(nvp); /* * For NFSv4, check to see if it is the same name and * replace the name, if it is different. */ oldd = newd = NULL; if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL && nvp->v_type == VREG && (np->n_v4->n4_namelen != cnp->cn_namelen || NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), cnp->cn_namelen) || dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, dnp->n_fhp->nfh_len))) { MALLOC(newd, struct nfsv4node *, sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len + + cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK); NFSLOCKNODE(np); if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG && (np->n_v4->n4_namelen != cnp->cn_namelen || NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), cnp->cn_namelen) || dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen || NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, dnp->n_fhp->nfh_len))) { oldd = np->n_v4; np->n_v4 = newd; newd = NULL; np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len; np->n_v4->n4_namelen = cnp->cn_namelen; NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data, dnp->n_fhp->nfh_len); NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4), cnp->cn_namelen); } NFSUNLOCKNODE(np); }