Пример #1
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 * This variant takes a "struct nfsfh *" as second argument and uses
 * that structure up, either by hanging off the nfsnode or FREEing it.
 */
int
nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
    struct componentname *cnp, struct thread *td, struct nfsnode **npp,
    void *stuff, int lkflags)
{
	struct nfsnode *np, *dnp;
	struct vnode *vp, *nvp;
	struct nfsv4node *newd, *oldd;
	int error;
	u_int hash;
	struct nfsmount *nmp;

	nmp = VFSTONFS(mntp);
	dnp = VTONFS(dvp);
	*npp = NULL;

	hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);

	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	if (error == 0 && nvp != NULL) {
		/*
		 * I believe there is a slight chance that vgonel() could
		 * get called on this vnode between when NFSVOPLOCK() drops
		 * the VI_LOCK() and vget() acquires it again, so that it
		 * hasn't yet had v_usecount incremented. If this were to
		 * happen, the VI_DOOMED flag would be set, so check for
		 * that here. Since we now have the v_usecount incremented,
		 * we should be ok until we vrele() it, if the VI_DOOMED
		 * flag isn't set now.
		 */
		VI_LOCK(nvp);
		if ((nvp->v_iflag & VI_DOOMED)) {
			VI_UNLOCK(nvp);
			vrele(nvp);
			error = ENOENT;
		} else {
			VI_UNLOCK(nvp);
		}
	}
	if (error) {
		FREE((caddr_t)nfhp, M_NFSFH);
		return (error);
	}
	if (nvp != NULL) {
		np = VTONFS(nvp);
		/*
		 * For NFSv4, check to see if it is the same name and
		 * replace the name, if it is different.
		 */
		oldd = newd = NULL;
		if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
		    nvp->v_type == VREG &&
		    (np->n_v4->n4_namelen != cnp->cn_namelen ||
		     NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
		     cnp->cn_namelen) ||
		     dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
		     NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
		     dnp->n_fhp->nfh_len))) {
		    MALLOC(newd, struct nfsv4node *,
			sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
			+ cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
		    NFSLOCKNODE(np);
		    if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
			&& (np->n_v4->n4_namelen != cnp->cn_namelen ||
			 NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
			 cnp->cn_namelen) ||
			 dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
			 NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
			 dnp->n_fhp->nfh_len))) {
			oldd = np->n_v4;
			np->n_v4 = newd;
			newd = NULL;
			np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
			np->n_v4->n4_namelen = cnp->cn_namelen;
			NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
			    dnp->n_fhp->nfh_len);
			NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
			    cnp->cn_namelen);
		    }
		    NFSUNLOCKNODE(np);
		}
Пример #2
0
/*
 * Check the time stamp
 * If the cache is valid, copy contents to *vap and return 0
 * otherwise return an error
 */
int
ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
{
	struct nfsnode *np;
	struct vattr *vap;
	struct nfsmount *nmp;
	int timeo, mustflush;
	
	np = VTONFS(vp);
	vap = &np->n_vattr.na_vattr;
	nmp = VFSTONFS(vp->v_mount);
	mustflush = nfscl_mustflush(vp);	/* must be before mtx_lock() */
#ifdef NFS_ACDEBUG
	mtx_lock(&Giant);	/* ncl_printf() */
#endif
	mtx_lock(&np->n_mtx);
	/* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
	timeo = (time_second - np->n_mtime.tv_sec) / 10;

#ifdef NFS_ACDEBUG
	if (nfs_acdebug>1)
		ncl_printf("nfs_getattrcache: initial timeo = %d\n", timeo);
#endif

	if (vap->va_type == VDIR) {
		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
			timeo = nmp->nm_acdirmin;
		else if (timeo > nmp->nm_acdirmax)
			timeo = nmp->nm_acdirmax;
	} else {
		if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
			timeo = nmp->nm_acregmin;
		else if (timeo > nmp->nm_acregmax)
			timeo = nmp->nm_acregmax;
	}

#ifdef NFS_ACDEBUG
	if (nfs_acdebug > 2)
		ncl_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
			   nmp->nm_acregmin, nmp->nm_acregmax,
			   nmp->nm_acdirmin, nmp->nm_acdirmax);

	if (nfs_acdebug)
		ncl_printf("nfs_getattrcache: age = %d; final timeo = %d\n",
			   (time_second - np->n_attrstamp), timeo);
#endif

	if ((time_second - np->n_attrstamp) >= timeo &&
	    (mustflush != 0 || np->n_attrstamp == 0)) {
		newnfsstats.attrcache_misses++;
		mtx_unlock(&np->n_mtx);
#ifdef NFS_ACDEBUG
		mtx_unlock(&Giant);	/* ncl_printf() */
#endif
		KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
		return( ENOENT);
	}
	newnfsstats.attrcache_hits++;
	if (vap->va_size != np->n_size) {
		if (vap->va_type == VREG) {
			if (np->n_flag & NMODIFIED) {
				if (vap->va_size < np->n_size)
					vap->va_size = np->n_size;
				else
					np->n_size = vap->va_size;
			} else {
				np->n_size = vap->va_size;
			}
			vnode_pager_setsize(vp, np->n_size);
		} else {
			np->n_size = vap->va_size;
		}
	}
	bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
	if (np->n_flag & NCHG) {
		if (np->n_flag & NACC)
			vaper->va_atime = np->n_atim;
		if (np->n_flag & NUPD)
			vaper->va_mtime = np->n_mtim;
	}
	mtx_unlock(&np->n_mtx);
#ifdef NFS_ACDEBUG
	mtx_unlock(&Giant);	/* ncl_printf() */
#endif
	KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
	return (0);
}
Пример #3
0
/*
 * nfs_request - goes something like this
 *	- fill in request struct
 *	- links it into list
 *	- calls nfs_send() for first transmit
 *	- calls nfs_receive() to get reply
 *	- break down rpc header and return with nfs reply pointed to
 *	  by mrep or error
 * nb: always frees up mreq mbuf list
 */
int
nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
    struct thread *td, struct ucred *cred, struct mbuf **mrp,
    struct mbuf **mdp, caddr_t *dposp)
{
	struct mbuf *mrep;
	u_int32_t *tl;
	struct nfsmount *nmp;
	struct mbuf *md;
	time_t waituntil;
	caddr_t dpos;
	int error = 0, timeo;
	AUTH *auth = NULL;
	enum nfs_rto_timer_t timer;
	struct nfs_feedback_arg nf;
	struct rpc_callextra ext;
	enum clnt_stat stat;
	struct timeval timo;

	/* Reject requests while attempting a forced unmount. */
	if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
		m_freem(mreq);
		return (ESTALE);
	}
	nmp = VFSTONFS(vp->v_mount);
	bzero(&nf, sizeof(struct nfs_feedback_arg));
	nf.nf_mount = nmp;
	nf.nf_td = td;
	nf.nf_lastmsg = time_uptime -
	    ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));

	/*
	 * XXX if not already connected call nfs_connect now.  Longer
	 * term, change nfs_mount to call nfs_connect unconditionally
	 * and let clnt_reconnect_create handle reconnects.
	 */
	if (!nmp->nm_client)
		nfs_connect(nmp);

	auth = nfs_getauth(nmp, cred);
	if (!auth) {
		m_freem(mreq);
		return (EACCES);
	}
	bzero(&ext, sizeof(ext));
	ext.rc_auth = auth;

	ext.rc_feedback = nfs_feedback;
	ext.rc_feedback_arg = &nf;

	/*
	 * Use a conservative timeout for RPCs other than getattr,
	 * lookup, read or write.  The justification for doing "other"
	 * this way is that these RPCs happen so infrequently that
	 * timer est. would probably be stale.  Also, since many of
	 * these RPCs are non-idempotent, a conservative timeout is
	 * desired.
	 */
	timer = nfs_rto_timer(procnum);
	if (timer != NFS_DEFAULT_TIMER)
		ext.rc_timers = &nmp->nm_timers[timer - 1];
	else
		ext.rc_timers = NULL;

#ifdef KDTRACE_HOOKS
	if (dtrace_nfsclient_nfs23_start_probe != NULL) {
		uint32_t probe_id;
		int probe_procnum;

		if (nmp->nm_flag & NFSMNT_NFSV3) {
			probe_id = nfsclient_nfs3_start_probes[procnum];
			probe_procnum = procnum;
		} else {
			probe_id = nfsclient_nfs2_start_probes[procnum];
			probe_procnum = nfsv2_procid[procnum];
		}
		if (probe_id != 0)
			(dtrace_nfsclient_nfs23_start_probe)(probe_id, vp,
			    mreq, cred, probe_procnum);
	}
#endif

	nfsstats.rpcrequests++;
tryagain:
	/*
	 * This timeout specifies when a new socket should be created,
	 * along with new xid values. For UDP, this should be done
	 * infrequently, since retransmits of RPC requests should normally
	 * use the same xid.
	 */
	if (nmp->nm_sotype == SOCK_DGRAM) {
		if ((nmp->nm_flag & NFSMNT_SOFT) != 0) {
			/*
			 * CLSET_RETRIES is set to 2, so this should be half
			 * of the total timeout required.
			 */
			timeo = nmp->nm_retry * nmp->nm_timeo / 2;
			if (timeo < 1)
				timeo = 1;
			timo.tv_sec = timeo / NFS_HZ;
			timo.tv_usec = (timeo % NFS_HZ) * 1000000 / NFS_HZ;
		} else {
			/* For UDP hard mounts, use a large value. */
			timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
			timo.tv_usec = 0;
		}
	} else {
		timo.tv_sec = nmp->nm_timeo / NFS_HZ;
		timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
	}
	mrep = NULL;
	stat = CLNT_CALL_MBUF(nmp->nm_client, &ext,
	    (nmp->nm_flag & NFSMNT_NFSV3) ? procnum : nfsv2_procid[procnum],
	    mreq, &mrep, timo);

	/*
	 * If there was a successful reply and a tprintf msg.
	 * tprintf a response.
	 */
	if (stat == RPC_SUCCESS)
		error = 0;
	else if (stat == RPC_TIMEDOUT) {
		nfsstats.rpctimeouts++;
		error = ETIMEDOUT;
	} else if (stat == RPC_VERSMISMATCH) {
		nfsstats.rpcinvalid++;
		error = EOPNOTSUPP;
	} else if (stat == RPC_PROGVERSMISMATCH) {
		nfsstats.rpcinvalid++;
		error = EPROTONOSUPPORT;
	} else if (stat == RPC_INTR) {
		error = EINTR;
	} else {
		nfsstats.rpcinvalid++;
		error = EACCES;
	}
	if (error)
		goto nfsmout;

	KASSERT(mrep != NULL, ("mrep shouldn't be NULL if no error\n"));

	/*
	 * Search for any mbufs that are not a multiple of 4 bytes long
	 * or with m_data not longword aligned.
	 * These could cause pointer alignment problems, so copy them to
	 * well aligned mbufs.
	 */
	error = nfs_realign(&mrep, M_NOWAIT);
	if (error == ENOMEM) {
		m_freem(mrep);
		AUTH_DESTROY(auth);
		nfsstats.rpcinvalid++;
		return (error);
	}

	md = mrep;
	dpos = mtod(mrep, caddr_t);
	tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
	if (*tl != 0) {
		error = fxdr_unsigned(int, *tl);
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    error == NFSERR_TRYLATER) {
			m_freem(mrep);
			error = 0;
			waituntil = time_second + nfs3_jukebox_delay;
			while (time_second < waituntil)
				(void)tsleep(&fake_wchan, PSOCK, "nqnfstry",
				    hz);
			goto tryagain;
		}
		/*
		 * Make sure NFSERR_RETERR isn't bogusly set by a server
		 * such as amd. (No actual NFS error has bit 31 set.)
		 */
		error &= ~NFSERR_RETERR;

		/*
		 * If the File Handle was stale, invalidate the lookup
		 * cache, just in case.
		 */
		if (error == ESTALE)
			nfs_purgecache(vp);
		/*
		 * Skip wcc data on non-ENOENT NFS errors for now.
		 * NetApp filers return corrupt postop attrs in the
		 * wcc data for NFS err EROFS.  Not sure if they could
		 * return corrupt postop attrs for others errors.
		 * Blocking ENOENT post-op attributes breaks negative
		 * name caching, so always allow it through.
		 */
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    (!nfs_skip_wcc_data_onerr || error == ENOENT)) {
			*mrp = mrep;
			*mdp = md;
			*dposp = dpos;
			error |= NFSERR_RETERR;
		} else
			m_freem(mrep);
		goto nfsmout;
	}
Пример #4
0
/*
 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
 * function is going to be used to get Regular Files, code must be added
 * to fill in the "struct nfsv4node".
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfsfh *nfhp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);

	MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
	nfhp->nfh_len = fhsize;
	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	FREE(nfhp, M_NFSFH);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}
	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert
	 * happened to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	/* 
	 * Are we getting the root? If so, make sure the vnode flags
	 * are correct 
	 */
	if ((fhsize == nmp->nm_fhsize) &&
	    !bcmp(fhp, nmp->nm_fh, fhsize)) {
		if (vp->v_type == VNON)
			vp->v_type = VDIR;
		vp->v_vflag |= VV_ROOT;
	}
	
	MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
	np->n_fhp->nfh_len = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		FREE((caddr_t)np->n_fhp, M_NFSFH);
		mtx_destroy(&np->n_mtx);
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, lkflags, 
	    td, &nvp, newnfs_vncmpf, np->n_fhp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
Пример #5
0
/*
 * nfs statfs call
 */
int
nfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
{
	struct vnode *vp;
	struct nfs_statfs *sfp;
	struct nfsmount *nmp = VFSTONFS(mp);
	thread_t td = curthread;
	int error = 0, retattr;
	struct nfsnode *np;
	u_quad_t tquad;
	struct nfsm_info info;

	info.mrep = NULL;
	info.v3 = (nmp->nm_flag & NFSMNT_NFSV3);

	lwkt_gettoken(&nmp->nm_token);

#ifndef nolint
	sfp = NULL;
#endif
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, NULL);
	if (error) {
		lwkt_reltoken(&nmp->nm_token);
		return (error);
	}
	vp = NFSTOV(np);
	/* ignore the passed cred */
	cred = crget();
	cred->cr_ngroups = 1;
	if (info.v3 && (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
		(void)nfs_fsinfo(nmp, vp, td);
	nfsstats.rpccnt[NFSPROC_FSSTAT]++;
	nfsm_reqhead(&info, vp, NFSPROC_FSSTAT, NFSX_FH(info.v3));
	ERROROUT(nfsm_fhtom(&info, vp));
	NEGKEEPOUT(nfsm_request(&info, vp, NFSPROC_FSSTAT, td, cred, &error));
	if (info.v3) {
		ERROROUT(nfsm_postop_attr(&info, vp, &retattr,
					 NFS_LATTR_NOSHRINK));
	}
	if (error) {
		if (info.mrep != NULL)
			m_freem(info.mrep);
		goto nfsmout;
	}
	NULLOUT(sfp = nfsm_dissect(&info, NFSX_STATFS(info.v3)));
	sbp->f_flags = nmp->nm_flag;

	if (info.v3) {
		sbp->f_bsize = NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_tbytes);
		sbp->f_blocks = (long)(tquad / ((u_quad_t)NFS_FABLKSIZE));
		tquad = fxdr_hyper(&sfp->sf_fbytes);
		sbp->f_bfree = (long)(tquad / ((u_quad_t)NFS_FABLKSIZE));
		tquad = fxdr_hyper(&sfp->sf_abytes);
		sbp->f_bavail = (long)(tquad / ((u_quad_t)NFS_FABLKSIZE));
		sbp->f_files = (fxdr_unsigned(int32_t,
		    sfp->sf_tfiles.nfsuquad[1]) & 0x7fffffff);
		sbp->f_ffree = (fxdr_unsigned(int32_t,
		    sfp->sf_ffiles.nfsuquad[1]) & 0x7fffffff);
	} else {
		sbp->f_bsize = fxdr_unsigned(int32_t, sfp->sf_bsize);
		sbp->f_blocks = fxdr_unsigned(int32_t, sfp->sf_blocks);
		sbp->f_bfree = fxdr_unsigned(int32_t, sfp->sf_bfree);
		sbp->f_bavail = fxdr_unsigned(int32_t, sfp->sf_bavail);
		sbp->f_files = 0;
		sbp->f_ffree = 0;
	}

	/*
	 * Some values are pre-set in mnt_stat.  Note in particular f_iosize
	 * cannot be changed once the filesystem is mounted as it is used
	 * as the basis for BIOs.
	 */
	if (sbp != &mp->mnt_stat) {
		sbp->f_type = mp->mnt_vfc->vfc_typenum;
		bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
		sbp->f_iosize = mp->mnt_stat.f_iosize;
	}
	m_freem(info.mrep);
	info.mrep = NULL;
nfsmout:
	vput(vp);
	crfree(cred);
	lwkt_reltoken(&nmp->nm_token);
	return (error);
}
Пример #6
0
static int
nfs_statvfs(struct mount *mp, struct statvfs *sbp, struct ucred *cred)
{
	struct vnode *vp;
	struct nfs_statfs *sfp;
	struct nfsmount *nmp = VFSTONFS(mp);
	thread_t td = curthread;
	int error = 0, retattr;
	struct nfsnode *np;
	struct nfsm_info info;

	info.mrep = NULL;
	info.v3 = (nmp->nm_flag & NFSMNT_NFSV3);
	lwkt_gettoken(&nmp->nm_token);

#ifndef nolint
	sfp = NULL;
#endif
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, NULL);
	if (error) {
		lwkt_reltoken(&nmp->nm_token);
		return (error);
	}
	vp = NFSTOV(np);
	/* ignore the passed cred */
	cred = crget();
	cred->cr_ngroups = 1;
	if (info.v3 && (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
		(void)nfs_fsinfo(nmp, vp, td);
	nfsstats.rpccnt[NFSPROC_FSSTAT]++;
	nfsm_reqhead(&info, vp, NFSPROC_FSSTAT, NFSX_FH(info.v3));
	ERROROUT(nfsm_fhtom(&info, vp));
	NEGKEEPOUT(nfsm_request(&info, vp, NFSPROC_FSSTAT, td, cred, &error));
	if (info.v3) {
		ERROROUT(nfsm_postop_attr(&info, vp, &retattr,
					 NFS_LATTR_NOSHRINK));
	}
	if (error) {
		if (info.mrep != NULL)
			m_freem(info.mrep);
		goto nfsmout;
	}
	NULLOUT(sfp = nfsm_dissect(&info, NFSX_STATFS(info.v3)));
	sbp->f_flag = nmp->nm_flag;
	sbp->f_owner = nmp->nm_cred->cr_ruid;

	if (info.v3) {
		sbp->f_bsize = NFS_FABLKSIZE;
		sbp->f_frsize = NFS_FABLKSIZE;
		sbp->f_blocks = (fxdr_hyper(&sfp->sf_tbytes) /
				((u_quad_t)NFS_FABLKSIZE));
		sbp->f_bfree = (fxdr_hyper(&sfp->sf_fbytes) /
				((u_quad_t)NFS_FABLKSIZE));
		sbp->f_bavail = (fxdr_hyper(&sfp->sf_abytes) /
				((u_quad_t)NFS_FABLKSIZE));
		sbp->f_files = fxdr_hyper(&sfp->sf_tfiles);
		sbp->f_ffree = fxdr_hyper(&sfp->sf_ffiles);
		sbp->f_favail = fxdr_hyper(&sfp->sf_afiles);
	} else {
		sbp->f_bsize = fxdr_unsigned(int32_t, sfp->sf_bsize);
		sbp->f_blocks = fxdr_unsigned(int32_t, sfp->sf_blocks);
		sbp->f_bfree = fxdr_unsigned(int32_t, sfp->sf_bfree);
		sbp->f_bavail = fxdr_unsigned(int32_t, sfp->sf_bavail);
		sbp->f_files = 0;
		sbp->f_ffree = 0;
		sbp->f_favail = 0;
	}
	sbp->f_syncreads = 0;
	sbp->f_syncwrites = 0;
	sbp->f_asyncreads = 0;
	sbp->f_asyncwrites = 0;
	sbp->f_type = mp->mnt_vfc->vfc_typenum;

	m_freem(info.mrep);
	info.mrep = NULL;
nfsmout:
	vput(vp);
	crfree(cred);
	lwkt_reltoken(&nmp->nm_token);
	return (error);
}
Пример #7
0
/* ARGSUSED */
static int
nfs_mount(struct mount *mp)
{
	struct nfs_args args = {
	    .version = NFS_ARGSVERSION,
	    .addr = NULL,
	    .addrlen = sizeof (struct sockaddr_in),
	    .sotype = SOCK_STREAM,
	    .proto = 0,
	    .fh = NULL,
	    .fhsize = 0,
	    .flags = NFSMNT_RESVPORT,
	    .wsize = NFS_WSIZE,
	    .rsize = NFS_RSIZE,
	    .readdirsize = NFS_READDIRSIZE,
	    .timeo = 10,
	    .retrans = NFS_RETRANS,
	    .maxgrouplist = NFS_MAXGRPS,
	    .readahead = NFS_DEFRAHEAD,
	    .wcommitsize = 0,			/* was: NQ_DEFLEASE */
	    .deadthresh = NFS_MAXDEADTHRESH,	/* was: NQ_DEADTHRESH */
	    .hostname = NULL,
	    /* args version 4 */
	    .acregmin = NFS_MINATTRTIMO,
	    .acregmax = NFS_MAXATTRTIMO,
	    .acdirmin = NFS_MINDIRATTRTIMO,
	    .acdirmax = NFS_MAXDIRATTRTIMO,
	};
	int error, ret, has_nfs_args_opt;
	int has_addr_opt, has_fh_opt, has_hostname_opt;
	struct sockaddr *nam;
	struct vnode *vp;
	char hst[MNAMELEN];
	size_t len;
	u_char nfh[NFSX_V3FHMAX];
	char *opt;
	int nametimeo = NFS_DEFAULT_NAMETIMEO;
	int negnametimeo = NFS_DEFAULT_NEGNAMETIMEO;

	has_nfs_args_opt = 0;
	has_addr_opt = 0;
	has_fh_opt = 0;
	has_hostname_opt = 0;

	if (vfs_filteropt(mp->mnt_optnew, nfs_opts)) {
		error = EINVAL;
		goto out;
	}

	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_UPDATE)) == MNT_ROOTFS) {
		error = nfs_mountroot(mp);
		goto out;
	}

	/*
	 * The old mount_nfs program passed the struct nfs_args
	 * from userspace to kernel.  The new mount_nfs program
	 * passes string options via nmount() from userspace to kernel
	 * and we populate the struct nfs_args in the kernel.
	 */
	if (vfs_getopt(mp->mnt_optnew, "nfs_args", NULL, NULL) == 0) {
		error = vfs_copyopt(mp->mnt_optnew, "nfs_args", &args,
		    sizeof args);
		if (error)
			goto out;

		if (args.version != NFS_ARGSVERSION) {
			error = EPROGMISMATCH;
			goto out;
		}
		has_nfs_args_opt = 1;
	}

	if (vfs_getopt(mp->mnt_optnew, "dumbtimer", NULL, NULL) == 0)
		args.flags |= NFSMNT_DUMBTIMR;
	if (vfs_getopt(mp->mnt_optnew, "noconn", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOCONN;
	if (vfs_getopt(mp->mnt_optnew, "conn", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOCONN;
	if (vfs_getopt(mp->mnt_optnew, "nolockd", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOLOCKD;
	if (vfs_getopt(mp->mnt_optnew, "lockd", NULL, NULL) == 0)
		args.flags &= ~NFSMNT_NOLOCKD;
	if (vfs_getopt(mp->mnt_optnew, "intr", NULL, NULL) == 0)
		args.flags |= NFSMNT_INT;
	if (vfs_getopt(mp->mnt_optnew, "rdirplus", NULL, NULL) == 0)
		args.flags |= NFSMNT_RDIRPLUS;
	if (vfs_getopt(mp->mnt_optnew, "resvport", NULL, NULL) == 0)
		args.flags |= NFSMNT_RESVPORT;
	if (vfs_getopt(mp->mnt_optnew, "noresvport", NULL, NULL) == 0)
		args.flags &= ~NFSMNT_RESVPORT;
	if (vfs_getopt(mp->mnt_optnew, "soft", NULL, NULL) == 0)
		args.flags |= NFSMNT_SOFT;
	if (vfs_getopt(mp->mnt_optnew, "hard", NULL, NULL) == 0)
		args.flags &= ~NFSMNT_SOFT;
	if (vfs_getopt(mp->mnt_optnew, "mntudp", NULL, NULL) == 0)
		args.sotype = SOCK_DGRAM;
	if (vfs_getopt(mp->mnt_optnew, "udp", NULL, NULL) == 0)
		args.sotype = SOCK_DGRAM;
	if (vfs_getopt(mp->mnt_optnew, "tcp", NULL, NULL) == 0)
		args.sotype = SOCK_STREAM;
	if (vfs_getopt(mp->mnt_optnew, "nfsv3", NULL, NULL) == 0)
		args.flags |= NFSMNT_NFSV3;
	if (vfs_getopt(mp->mnt_optnew, "nocto", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOCTO;
	if (vfs_getopt(mp->mnt_optnew, "readdirsize", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal readdirsize");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.readdirsize);
		if (ret != 1 || args.readdirsize <= 0) {
			vfs_mount_error(mp, "illegal readdirsize: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_READDIRSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "readahead", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal readahead");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.readahead);
		if (ret != 1 || args.readahead <= 0) {
			vfs_mount_error(mp, "illegal readahead: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_READAHEAD;
	}
	if (vfs_getopt(mp->mnt_optnew, "wsize", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal wsize");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.wsize);
		if (ret != 1 || args.wsize <= 0) {
			vfs_mount_error(mp, "illegal wsize: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_WSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "rsize", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal rsize");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.rsize);
		if (ret != 1 || args.rsize <= 0) {
			vfs_mount_error(mp, "illegal wsize: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_RSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "retrans", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal retrans");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.retrans);
		if (ret != 1 || args.retrans <= 0) {
			vfs_mount_error(mp, "illegal retrans: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_RETRANS;
	}
	if (vfs_getopt(mp->mnt_optnew, "acregmin", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acregmin);
		if (ret != 1 || args.acregmin < 0) {
			vfs_mount_error(mp, "illegal acregmin: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACREGMIN;
	}
	if (vfs_getopt(mp->mnt_optnew, "acregmax", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acregmax);
		if (ret != 1 || args.acregmax < 0) {
			vfs_mount_error(mp, "illegal acregmax: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACREGMAX;
	}
	if (vfs_getopt(mp->mnt_optnew, "acdirmin", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acdirmin);
		if (ret != 1 || args.acdirmin < 0) {
			vfs_mount_error(mp, "illegal acdirmin: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACDIRMIN;
	}
	if (vfs_getopt(mp->mnt_optnew, "acdirmax", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acdirmax);
		if (ret != 1 || args.acdirmax < 0) {
			vfs_mount_error(mp, "illegal acdirmax: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACDIRMAX;
	}
	if (vfs_getopt(mp->mnt_optnew, "wcommitsize", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.wcommitsize);
		if (ret != 1 || args.wcommitsize < 0) {
			vfs_mount_error(mp, "illegal wcommitsize: %s", opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_WCOMMITSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "deadthresh", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.deadthresh);
		if (ret != 1 || args.deadthresh <= 0) {
			vfs_mount_error(mp, "illegal deadthresh: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_DEADTHRESH;
	}
	if (vfs_getopt(mp->mnt_optnew, "timeout", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.timeo);
		if (ret != 1 || args.timeo <= 0) {
			vfs_mount_error(mp, "illegal timeout: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_TIMEO;
	}
	if (vfs_getopt(mp->mnt_optnew, "maxgroups", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.maxgrouplist);
		if (ret != 1 || args.maxgrouplist <= 0) {
			vfs_mount_error(mp, "illegal maxgroups: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_MAXGRPS;
	}
	if (vfs_getopt(mp->mnt_optnew, "nametimeo", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &nametimeo);
		if (ret != 1 || nametimeo < 0) {
			vfs_mount_error(mp, "illegal nametimeo: %s", opt);
			error = EINVAL;
			goto out;
		}
	}
	if (vfs_getopt(mp->mnt_optnew, "negnametimeo", (void **)&opt, NULL)
	    == 0) {
		ret = sscanf(opt, "%d", &negnametimeo);
		if (ret != 1 || negnametimeo < 0) {
			vfs_mount_error(mp, "illegal negnametimeo: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
	}
	if (vfs_getopt(mp->mnt_optnew, "addr", (void **)&args.addr,
		&args.addrlen) == 0) {
		has_addr_opt = 1;
		if (args.addrlen > SOCK_MAXADDRLEN) {
			error = ENAMETOOLONG;
			goto out;
		}
		nam = malloc(args.addrlen, M_SONAME,
		    M_WAITOK);
		bcopy(args.addr, nam, args.addrlen);
		nam->sa_len = args.addrlen;
	}
	if (vfs_getopt(mp->mnt_optnew, "fh", (void **)&args.fh,
		&args.fhsize) == 0) {
		has_fh_opt = 1;
	}
	if (vfs_getopt(mp->mnt_optnew, "hostname", (void **)&args.hostname,
		NULL) == 0) {
		has_hostname_opt = 1;
	}
	if (args.hostname == NULL) {
		vfs_mount_error(mp, "Invalid hostname");
		error = EINVAL;
		goto out;
	}
	if (args.fhsize < 0 || args.fhsize > NFSX_V3FHMAX) {
		vfs_mount_error(mp, "Bad file handle");
		error = EINVAL;
		goto out;
	}

	if (mp->mnt_flag & MNT_UPDATE) {
		struct nfsmount *nmp = VFSTONFS(mp);

		if (nmp == NULL) {
			error = EIO;
			goto out;
		}

		/*
		 * If a change from TCP->UDP is done and there are thread(s)
		 * that have I/O RPC(s) in progress with a tranfer size
		 * greater than NFS_MAXDGRAMDATA, those thread(s) will be
		 * hung, retrying the RPC(s) forever. Usually these threads
		 * will be seen doing an uninterruptible sleep on wait channel
		 * "newnfsreq" (truncated to "newnfsre" by procstat).
		 */
		if (args.sotype == SOCK_DGRAM && nmp->nm_sotype == SOCK_STREAM)
			tprintf(curthread->td_proc, LOG_WARNING,
	"Warning: mount -u that changes TCP->UDP can result in hung threads\n");

		/*
		 * When doing an update, we can't change from or to
		 * v3, switch lockd strategies or change cookie translation
		 */
		args.flags = (args.flags &
		    ~(NFSMNT_NFSV3 | NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/)) |
		    (nmp->nm_flag &
			(NFSMNT_NFSV3 | NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/));
		nfs_decode_args(mp, nmp, &args, NULL);
		goto out;
	}

	/*
	 * Make the nfs_ip_paranoia sysctl serve as the default connection
	 * or no-connection mode for those protocols that support 
	 * no-connection mode (the flag will be cleared later for protocols
	 * that do not support no-connection mode).  This will allow a client
	 * to receive replies from a different IP then the request was
	 * sent to.  Note: default value for nfs_ip_paranoia is 1 (paranoid),
	 * not 0.
	 */
	if (nfs_ip_paranoia == 0)
		args.flags |= NFSMNT_NOCONN;

	if (has_nfs_args_opt) {
		/*
		 * In the 'nfs_args' case, the pointers in the args
		 * structure are in userland - we copy them in here.
		 */
		if (!has_fh_opt) {
			error = copyin((caddr_t)args.fh, (caddr_t)nfh,
			    args.fhsize);
			if (error) {
				goto out;
			}
			args.fh = nfh;
		}
		if (!has_hostname_opt) {
			error = copyinstr(args.hostname, hst, MNAMELEN-1, &len);
			if (error) {
				goto out;
			}
			bzero(&hst[len], MNAMELEN - len);
			args.hostname = hst;
		}
		if (!has_addr_opt) {
			/* sockargs() call must be after above copyin() calls */
			error = getsockaddr(&nam, (caddr_t)args.addr,
			    args.addrlen);
			if (error) {
				goto out;
			}
		}
	} else if (has_addr_opt == 0) {
		vfs_mount_error(mp, "No server address");
		error = EINVAL;
		goto out;
	}
	error = mountnfs(&args, mp, nam, args.hostname, &vp,
	    curthread->td_ucred, nametimeo, negnametimeo);
out:
	if (!error) {
		MNT_ILOCK(mp);
		mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
		MNT_IUNLOCK(mp);
	}
	return (error);
}


/*
 * VFS Operations.
 *
 * mount system call
 * It seems a bit dumb to copyinstr() the host and path here and then
 * bcopy() them in mountnfs(), but I wanted to detect errors before
 * doing the sockargs() call because sockargs() allocates an mbuf and
 * an error after that means that I have to release the mbuf.
 */
/* ARGSUSED */
static int
nfs_cmount(struct mntarg *ma, void *data, uint64_t flags)
{
	int error;
	struct nfs_args args;

	error = copyin(data, &args, sizeof (struct nfs_args));
	if (error)
		return error;

	ma = mount_arg(ma, "nfs_args", &args, sizeof args);

	error = kernel_mount(ma, flags);
	return (error);
}
Пример #8
0
/*
 * nfs statvfs call
 */
int
nfs_statvfs(struct mount *mp, struct statvfs *sbp)
{
	struct lwp *l = curlwp;
	struct vnode *vp;
	struct nfs_statfs *sfp;
	char *cp;
	u_int32_t *tl;
	int32_t t1, t2;
	char *bpos, *dpos, *cp2;
	struct nfsmount *nmp = VFSTONFS(mp);
	int error = 0, retattr;
#ifdef NFS_V2_ONLY
	const int v3 = 0;
#else
	int v3 = (nmp->nm_flag & NFSMNT_NFSV3);
#endif
	struct mbuf *mreq, *mrep = NULL, *md, *mb;
	kauth_cred_t cred;
	u_quad_t tquad;
	struct nfsnode *np;

#ifndef nolint
	sfp = (struct nfs_statfs *)0;
#endif
	vp = nmp->nm_vnode;
	np = VTONFS(vp);
	cred = kauth_cred_alloc();
#ifndef NFS_V2_ONLY
	if (v3 && (nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
		(void)nfs_fsinfo(nmp, vp, cred, l);
#endif
	nfsstats.rpccnt[NFSPROC_FSSTAT]++;
	nfsm_reqhead(np, NFSPROC_FSSTAT, NFSX_FH(v3));
	nfsm_fhtom(np, v3);
	nfsm_request(np, NFSPROC_FSSTAT, l, cred);
	if (v3)
		nfsm_postop_attr(vp, retattr, 0);
	if (error) {
		if (mrep != NULL) {
			if (mrep->m_next != NULL)
				printf("nfs_vfsops: nfs_statvfs would lose buffers\n");
			m_freem(mrep);
		}
		goto nfsmout;
	}
	nfsm_dissect(sfp, struct nfs_statfs *, NFSX_STATFS(v3));
	sbp->f_flag = nmp->nm_flag;
	sbp->f_iosize = min(nmp->nm_rsize, nmp->nm_wsize);
	if (v3) {
		sbp->f_frsize = sbp->f_bsize = NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_tbytes);
		sbp->f_blocks = ((quad_t)tquad / (quad_t)NFS_FABLKSIZE);
		tquad = fxdr_hyper(&sfp->sf_fbytes);
		sbp->f_bfree = ((quad_t)tquad / (quad_t)NFS_FABLKSIZE);
		tquad = fxdr_hyper(&sfp->sf_abytes);
		tquad = ((quad_t)tquad / (quad_t)NFS_FABLKSIZE);
		sbp->f_bresvd = sbp->f_bfree - tquad;
		sbp->f_bavail = tquad;
		/* Handle older NFS servers returning negative values */
		if ((quad_t)sbp->f_bavail < 0)
			sbp->f_bavail = 0;
		tquad = fxdr_hyper(&sfp->sf_tfiles);
		sbp->f_files = tquad;
		tquad = fxdr_hyper(&sfp->sf_ffiles);
		sbp->f_ffree = tquad;
		sbp->f_favail = tquad;
		sbp->f_fresvd = 0;
		sbp->f_namemax = NFS_MAXNAMLEN;
	} else {
		sbp->f_bsize = NFS_FABLKSIZE;
		sbp->f_frsize = fxdr_unsigned(int32_t, sfp->sf_bsize);
		sbp->f_blocks = fxdr_unsigned(int32_t, sfp->sf_blocks);
		sbp->f_bfree = fxdr_unsigned(int32_t, sfp->sf_bfree);
		sbp->f_bavail = fxdr_unsigned(int32_t, sfp->sf_bavail);
		sbp->f_fresvd = 0;
		sbp->f_files = 0;
		sbp->f_ffree = 0;
		sbp->f_favail = 0;
		sbp->f_fresvd = 0;
		sbp->f_namemax = NFS_MAXNAMLEN;
	}
	copy_statvfs_info(sbp, mp);
	nfsm_reqdone;
	kauth_cred_free(cred);
	return (error);
}
Пример #9
0
static int
nfs_sysctl(struct mount *mp, fsctlop_t op, struct sysctl_req *req)
{
	struct nfsmount *nmp = VFSTONFS(mp);
	struct vfsquery vq;
	int error;

	bzero(&vq, sizeof(vq));
	switch (op) {
#if 0
	case VFS_CTL_NOLOCKS:
		val = (nmp->nm_flag & NFSMNT_NOLOCKS) ? 1 : 0;
 		if (req->oldptr != NULL) {
 			error = SYSCTL_OUT(req, &val, sizeof(val));
 			if (error)
 				return (error);
 		}
 		if (req->newptr != NULL) {
 			error = SYSCTL_IN(req, &val, sizeof(val));
 			if (error)
 				return (error);
			if (val)
				nmp->nm_flag |= NFSMNT_NOLOCKS;
			else
				nmp->nm_flag &= ~NFSMNT_NOLOCKS;
 		}
		break;
#endif
	case VFS_CTL_QUERY:
		mtx_lock(&nmp->nm_mtx);
		if (nmp->nm_state & NFSSTA_TIMEO)
			vq.vq_flags |= VQ_NOTRESP;
		mtx_unlock(&nmp->nm_mtx);
#if 0
		if (!(nmp->nm_flag & NFSMNT_NOLOCKS) &&
		    (nmp->nm_state & NFSSTA_LOCKTIMEO))
			vq.vq_flags |= VQ_NOTRESPLOCK;
#endif
		error = SYSCTL_OUT(req, &vq, sizeof(vq));
		break;
 	case VFS_CTL_TIMEO:
 		if (req->oldptr != NULL) {
 			error = SYSCTL_OUT(req, &nmp->nm_tprintf_initial_delay,
 			    sizeof(nmp->nm_tprintf_initial_delay));
 			if (error)
 				return (error);
 		}
 		if (req->newptr != NULL) {
			error = vfs_suser(mp, req->td);
			if (error)
				return (error);
 			error = SYSCTL_IN(req, &nmp->nm_tprintf_initial_delay,
 			    sizeof(nmp->nm_tprintf_initial_delay));
 			if (error)
 				return (error);
 			if (nmp->nm_tprintf_initial_delay < 0)
 				nmp->nm_tprintf_initial_delay = 0;
 		}
		break;
	default:
		return (ENOTSUP);
	}
	return (0);
}
Пример #10
0
/*
 * nfs statfs call
 */
static int
nfs_statfs(struct mount *mp, struct statfs *sbp)
{
	struct vnode *vp;
	struct thread *td;
	struct nfs_statfs *sfp;
	caddr_t bpos, dpos;
	struct nfsmount *nmp = VFSTONFS(mp);
	int error = 0, v3 = (nmp->nm_flag & NFSMNT_NFSV3), retattr;
	struct mbuf *mreq, *mrep, *md, *mb;
	struct nfsnode *np;
	u_quad_t tquad;

	td = curthread;
#ifndef nolint
	sfp = NULL;
#endif
	error = vfs_busy(mp, MBF_NOWAIT);
	if (error)
		return (error);
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
	if (error) {
		vfs_unbusy(mp);
		return (error);
	}
	vp = NFSTOV(np);
	mtx_lock(&nmp->nm_mtx);
	if (v3 && (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
		mtx_unlock(&nmp->nm_mtx);		
		(void)nfs_fsinfo(nmp, vp, td->td_ucred, td);
	} else
		mtx_unlock(&nmp->nm_mtx);
	nfsstats.rpccnt[NFSPROC_FSSTAT]++;
	mreq = nfsm_reqhead(vp, NFSPROC_FSSTAT, NFSX_FH(v3));
	mb = mreq;
	bpos = mtod(mb, caddr_t);
	nfsm_fhtom(vp, v3);
	nfsm_request(vp, NFSPROC_FSSTAT, td, td->td_ucred);
	if (v3)
		nfsm_postop_attr(vp, retattr);
	if (error) {
		if (mrep != NULL)
			m_freem(mrep);
		goto nfsmout;
	}
	sfp = nfsm_dissect(struct nfs_statfs *, NFSX_STATFS(v3));
	mtx_lock(&nmp->nm_mtx);
	sbp->f_iosize = nfs_iosize(nmp);
	mtx_unlock(&nmp->nm_mtx);
	if (v3) {
		sbp->f_bsize = NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_tbytes);
		sbp->f_blocks = tquad / NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_fbytes);
		sbp->f_bfree = tquad / NFS_FABLKSIZE;
		tquad = fxdr_hyper(&sfp->sf_abytes);
		sbp->f_bavail = tquad / NFS_FABLKSIZE;
		sbp->f_files = (fxdr_unsigned(int32_t,
		    sfp->sf_tfiles.nfsuquad[1]) & 0x7fffffff);
		sbp->f_ffree = (fxdr_unsigned(int32_t,
		    sfp->sf_ffiles.nfsuquad[1]) & 0x7fffffff);
	} else {
		sbp->f_bsize = fxdr_unsigned(int32_t, sfp->sf_bsize);
		sbp->f_blocks = fxdr_unsigned(int32_t, sfp->sf_blocks);
		sbp->f_bfree = fxdr_unsigned(int32_t, sfp->sf_bfree);
		sbp->f_bavail = fxdr_unsigned(int32_t, sfp->sf_bavail);
		sbp->f_files = 0;
		sbp->f_ffree = 0;
	}
	m_freem(mrep);
nfsmout:
	vput(vp);
	vfs_unbusy(mp);
	return (error);
}
Пример #11
0
/*
 * Common code for mount and mountroot
 */
static int
mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam,
    char *hst, struct vnode **vpp, struct ucred *cred, int nametimeo,
    int negnametimeo)
{
	struct nfsmount *nmp;
	struct nfsnode *np;
	int error;
	struct vattr attrs;

	if (mp->mnt_flag & MNT_UPDATE) {
		nmp = VFSTONFS(mp);
		printf("%s: MNT_UPDATE is no longer handled here\n", __func__);
		free(nam, M_SONAME);
		return (0);
	} else {
		nmp = uma_zalloc(nfsmount_zone, M_WAITOK);
		bzero((caddr_t)nmp, sizeof (struct nfsmount));
		TAILQ_INIT(&nmp->nm_bufq);
		mp->mnt_data = nmp;
		nmp->nm_getinfo = nfs_getnlminfo;
		nmp->nm_vinvalbuf = nfs_vinvalbuf;
	}
	vfs_getnewfsid(mp);
	nmp->nm_mountp = mp;
	mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF);			

	/*
	 * V2 can only handle 32 bit filesizes.  A 4GB-1 limit may be too
	 * high, depending on whether we end up with negative offsets in
	 * the client or server somewhere.  2GB-1 may be safer.
	 *
	 * For V3, nfs_fsinfo will adjust this as necessary.  Assume maximum
	 * that we can handle until we find out otherwise.
	 */
	if ((argp->flags & NFSMNT_NFSV3) == 0)
		nmp->nm_maxfilesize = 0xffffffffLL;
	else
		nmp->nm_maxfilesize = OFF_MAX;

	nmp->nm_timeo = NFS_TIMEO;
	nmp->nm_retry = NFS_RETRANS;
	if ((argp->flags & NFSMNT_NFSV3) && argp->sotype == SOCK_STREAM) {
		nmp->nm_wsize = nmp->nm_rsize = NFS_MAXDATA;
	} else {
		nmp->nm_wsize = NFS_WSIZE;
		nmp->nm_rsize = NFS_RSIZE;
	}
	nmp->nm_wcommitsize = hibufspace / (desiredvnodes / 1000);
	nmp->nm_readdirsize = NFS_READDIRSIZE;
	nmp->nm_numgrps = NFS_MAXGRPS;
	nmp->nm_readahead = NFS_DEFRAHEAD;
	nmp->nm_deadthresh = NFS_MAXDEADTHRESH;
	nmp->nm_nametimeo = nametimeo;
	nmp->nm_negnametimeo = negnametimeo;
	nmp->nm_tprintf_delay = nfs_tprintf_delay;
	if (nmp->nm_tprintf_delay < 0)
		nmp->nm_tprintf_delay = 0;
	nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
	if (nmp->nm_tprintf_initial_delay < 0)
		nmp->nm_tprintf_initial_delay = 0;
	nmp->nm_fhsize = argp->fhsize;
	bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize);
	bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN);
	nmp->nm_nam = nam;
	/* Set up the sockets and per-host congestion */
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;
	nmp->nm_rpcops = &nfs_rpcops;

	nfs_decode_args(mp, nmp, argp, hst);

	/*
	 * For Connection based sockets (TCP,...) defer the connect until
	 * the first request, in case the server is not responding.
	 */
	if (nmp->nm_sotype == SOCK_DGRAM &&
		(error = nfs_connect(nmp)))
		goto bad;

	/*
	 * This is silly, but it has to be set so that vinifod() works.
	 * We do not want to do an nfs_statfs() here since we can get
	 * stuck on a dead server and we are holding a lock on the mount
	 * point.
	 */
	mtx_lock(&nmp->nm_mtx);
	mp->mnt_stat.f_iosize = nfs_iosize(nmp);
	mtx_unlock(&nmp->nm_mtx);
	/*
	 * A reference count is needed on the nfsnode representing the
	 * remote root.  If this object is not persistent, then backward
	 * traversals of the mount point (i.e. "..") will not work if
	 * the nfsnode gets flushed out of the cache. Ufs does not have
	 * this problem, because one can identify root inodes by their
	 * number == ROOTINO (2).
	 */
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
	if (error)
		goto bad;
	*vpp = NFSTOV(np);

	/*
	 * Get file attributes and transfer parameters for the
	 * mountpoint.  This has the side effect of filling in
	 * (*vpp)->v_type with the correct value.
	 */
	if (argp->flags & NFSMNT_NFSV3)
		nfs_fsinfo(nmp, *vpp, curthread->td_ucred, curthread);
	else
		VOP_GETATTR(*vpp, &attrs, curthread->td_ucred);

	/*
	 * Lose the lock but keep the ref.
	 */
	VOP_UNLOCK(*vpp, 0);

	return (0);
bad:
	nfs_disconnect(nmp);
	mtx_destroy(&nmp->nm_mtx);
	uma_zfree(nfsmount_zone, nmp);
	free(nam, M_SONAME);
	return (error);
}
Пример #12
0
/*
 * Look up a vnode/nfsnode by file handle and store the pointer in *npp.
 * Callers must check for mount points!!
 * An error number is returned.
 */
int
nfs_nget(struct mount *mnt, nfsfh_t *fh, int fhsize, struct nfsnode **npp)
{
	struct nfsmount		*nmp;
	struct nfsnode		*np, find, *np2;
	struct vnode		*vp, *nvp;
	struct proc		*p = curproc;		/* XXX */
	int			 error;

	nmp = VFSTONFS(mnt);

loop:
	rw_enter_write(&nfs_hashlock);
	find.n_fhp = fh;
	find.n_fhsize = fhsize;
	np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
	if (np != NULL) {
		rw_exit_write(&nfs_hashlock);
		vp = NFSTOV(np);
		error = vget(vp, LK_EXCLUSIVE, p);
		if (error)
			goto loop;
		*npp = np;
		return (0);
	}

	/*
	 * getnewvnode() could recycle a vnode, potentially formerly
	 * owned by NFS. This will cause a VOP_RECLAIM() to happen,
	 * which will cause recursive locking, so we unlock before
	 * calling getnewvnode() lock again afterwards, but must check
	 * to see if this nfsnode has been added while we did not hold
	 * the lock.
	 */
	rw_exit_write(&nfs_hashlock);
	error = getnewvnode(VT_NFS, mnt, &nfs_vops, &nvp);
	/* note that we don't have this vnode set up completely yet */
	rw_enter_write(&nfs_hashlock);
	if (error) {
		*npp = NULL;
		rw_exit_write(&nfs_hashlock);
		return (error);
	}
	nvp->v_flag |= VLARVAL;
	np = RB_FIND(nfs_nodetree, &nmp->nm_ntree, &find);
	if (np != NULL) {
		vgone(nvp);
		rw_exit_write(&nfs_hashlock);
		goto loop;
	}

	vp = nvp;
	np = pool_get(&nfs_node_pool, PR_WAITOK | PR_ZERO);
	vp->v_data = np;
	/* we now have an nfsnode on this vnode */
	vp->v_flag &= ~VLARVAL;
	np->n_vnode = vp;

	rw_init(&np->n_commitlock, "nfs_commitlk");

	/* 
	 * Are we getting the root? If so, make sure the vnode flags
	 * are correct 
	 */
	if ((fhsize == nmp->nm_fhsize) && !bcmp(fh, nmp->nm_fh, fhsize)) {
		if (vp->v_type == VNON)
			vp->v_type = VDIR;
		vp->v_flag |= VROOT;
	}

	np->n_fhp = &np->n_fh;
	bcopy(fh, np->n_fhp, fhsize);
	np->n_fhsize = fhsize;
	np2 = RB_INSERT(nfs_nodetree, &nmp->nm_ntree, np);
	KASSERT(np2 == NULL);
	np->n_accstamp = -1;
	rw_exit(&nfs_hashlock);
	*npp = np;

	return (0);
}
Пример #13
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct nfsnode *np;
	struct vnode *vp;
	struct nfsmount *nmp = VFSTONFS(mntp);
	int error;
	struct fh_match fhm;

	fhm.fhm_fhp = fhp;
	fhm.fhm_fhsize = fhsize;

loop:
	rw_enter(&nmp->nm_rbtlock, RW_READER);
	np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
	if (np != NULL) {
		vp = NFSTOV(np);
		mutex_enter(vp->v_interlock);
		rw_exit(&nmp->nm_rbtlock);
		error = vget(vp, LK_EXCLUSIVE | lkflags);
		if (error == EBUSY)
			return error;
		if (error)
			goto loop;
		*npp = np;
		return(0);
	}
	rw_exit(&nmp->nm_rbtlock);

	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp);
	if (error) {
		*npp = 0;
		return (error);
	}
	np = pool_get(&nfs_node_pool, PR_WAITOK);
	memset(np, 0, sizeof *np);
	np->n_vnode = vp;

	/*
	 * Insert the nfsnode in the hash queue for its new file handle
	 */

	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
	} else
		np->n_fhp = &np->n_fh;
	memcpy(np->n_fhp, fhp, fhsize);
	np->n_fhsize = fhsize;
	np->n_accstamp = -1;
	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);

	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
		rw_exit(&nmp->nm_rbtlock);
		if (fhsize > NFS_SMALLFH) {
			kmem_free(np->n_fhp, fhsize);
		}
		pool_put(&nfs_vattr_pool, np->n_vattr);
		pool_put(&nfs_node_pool, np);
		ungetnewvnode(vp);
		goto loop;
	}
	vp->v_data = np;
	genfs_node_init(vp, &nfs_genfsops);
	/*
	 * Initalize read/write creds to useful values. VOP_OPEN will
	 * overwrite these.
	 */
	np->n_rcred = curlwp->l_cred;
	kauth_cred_hold(np->n_rcred);
	np->n_wcred = curlwp->l_cred;
	kauth_cred_hold(np->n_wcred);
	VOP_LOCK(vp, LK_EXCLUSIVE);
	NFS_INVALIDATE_ATTRCACHE(np);
	uvm_vnp_setsize(vp, 0);
	(void)rb_tree_insert_node(&nmp->nm_rbtree, np);
	rw_exit(&nmp->nm_rbtlock);

	*npp = np;
	return (0);
}
Пример #14
0
int
nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp)
{
	struct nfsnode *np, *np2;
	struct nfsnodehashhead *nhpp;
	struct vnode *vp;
	int error;
	int lkflags;
	struct nfsmount *nmp;

	/*
	 * Calculate nfs mount point and figure out whether the rslock should
	 * be interruptable or not.
	 */
	nmp = VFSTONFS(mntp);
	if (nmp->nm_flag & NFSMNT_INT)
		lkflags = LK_PCATCH;
	else
		lkflags = 0;

	lwkt_gettoken(&nfsnhash_token);

retry:
	nhpp = NFSNOHASH(fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT));
loop:
	for (np = nhpp->lh_first; np; np = np->n_hash.le_next) {
		if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize ||
		    bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) {
			continue;
		}
		vp = NFSTOV(np);
		if (vget(vp, LK_EXCLUSIVE))
			goto loop;
		for (np = nhpp->lh_first; np; np = np->n_hash.le_next) {
			if (mntp == NFSTOV(np)->v_mount &&
			    np->n_fhsize == fhsize &&
			    bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize) == 0
			) {
				break;
			}
		}
		if (np == NULL || NFSTOV(np) != vp) {
			vput(vp);
			goto loop;
		}
		*npp = np;
		lwkt_reltoken(&nfsnhash_token);
		return(0);
	}

	/*
	 * Obtain a lock to prevent a race condition if the getnewvnode()
	 * or MALLOC() below happens to block.
	 */
	if (lockmgr(&nfsnhash_lock, LK_EXCLUSIVE | LK_SLEEPFAIL))
		goto loop;

	/*
	 * Allocate before getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if objcache should block.
	 */
	np = objcache_get(nfsnode_objcache, M_WAITOK);
		
	error = getnewvnode(VT_NFS, mntp, &vp, 0, 0);
	if (error) {
		lockmgr(&nfsnhash_lock, LK_RELEASE);
		*npp = NULL;
		objcache_put(nfsnode_objcache, np);
		lwkt_reltoken(&nfsnhash_token);
		return (error);
	}

	/*
	 * Initialize most of (np).
	 */
	bzero(np, sizeof (*np));
	if (fhsize > NFS_SMALLFH) {
		MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK);
	} else {
Пример #15
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget(
	mount_t mp,
	nfsnode_t dnp,
	struct componentname *cnp,
	u_char *fhp,
	int fhsize,
	struct nfs_vattr *nvap,
	u_int64_t *xidp,
	uint32_t auth,
	int flags,
	nfsnode_t *npp)
{
	nfsnode_t np;
	struct nfsnodehashhead *nhpp;
	vnode_t vp;
	int error, nfsvers;
	mount_t mp2;
	struct vnode_fsparam vfsp;
	uint32_t vid;

	FSDBG_TOP(263, mp, dnp, flags, npp);

	/* Check for unmount in progress */
	if (!mp || vfs_isforce(mp)) {
		*npp = NULL;
		error = ENXIO;
		FSDBG_BOT(263, mp, dnp, 0xd1e, error);
		return (error);
	}
	nfsvers = VFSTONFS(mp)->nm_vers;

	nhpp = NFSNOHASH(nfs_hash(fhp, fhsize));
loop:
	lck_mtx_lock(nfs_node_hash_mutex);
	for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
		mp2 = (np->n_hflag & NHINIT) ? np->n_mount : NFSTOMP(np);
		if (mp != mp2 || np->n_fhsize != fhsize ||
		    bcmp(fhp, np->n_fhp, fhsize))
			continue;
		if (nvap && (nvap->nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) &&
		    cnp && (cnp->cn_namelen > (fhsize - (int)sizeof(dnp)))) {
			/* The name was too long to fit in the file handle.  Check it against the node's name. */
			int namecmp = 0;
			const char *vname = vnode_getname(NFSTOV(np));
			if (vname) {
				if (cnp->cn_namelen != (int)strlen(vname))
					namecmp = 1;
				else
					namecmp = strncmp(vname, cnp->cn_nameptr, cnp->cn_namelen);
				vnode_putname(vname);
			}
			if (namecmp)  /* full name didn't match */
				continue;
		}
		FSDBG(263, dnp, np, np->n_flag, 0xcace0000);
		/* if the node is locked, sleep on it */
		if ((np->n_hflag & NHLOCKED) && !(flags & NG_NOCREATE)) {
			np->n_hflag |= NHLOCKWANT;
			FSDBG(263, dnp, np, np->n_flag, 0xcace2222);
			msleep(np, nfs_node_hash_mutex, PDROP | PINOD, "nfs_nget", NULL);
			FSDBG(263, dnp, np, np->n_flag, 0xcace3333);
			goto loop;
		}
		vp = NFSTOV(np);
		vid = vnode_vid(vp);
		lck_mtx_unlock(nfs_node_hash_mutex);
		if ((error = vnode_getwithvid(vp, vid))) {
			/*
			 * If vnode is being reclaimed or has already
			 * changed identity, no need to wait.
			 */
			FSDBG_BOT(263, dnp, *npp, 0xcace0d1e, error);
			return (error);
		}
		if ((error = nfs_node_lock(np))) {
			/* this only fails if the node is now unhashed */
			/* so let's see if we can find/create it again */
			FSDBG(263, dnp, *npp, 0xcaced1e2, error);
			vnode_put(vp);
			if (flags & NG_NOCREATE) {
				*npp = 0;
				FSDBG_BOT(263, dnp, *npp, 0xcaced1e0, ENOENT);
				return (ENOENT);
			}
			goto loop;
		}
		/* update attributes */
		if (nvap)
			error = nfs_loadattrcache(np, nvap, xidp, 0);
		if (error) {
			nfs_node_unlock(np);
			vnode_put(vp);
		} else {
			if (dnp && cnp && (flags & NG_MAKEENTRY))
				cache_enter(NFSTOV(dnp), vp, cnp);
			/*
			 * Update the vnode if the name/and or the parent has
			 * changed. We need to do this so that if getattrlist is
			 * called asking for ATTR_CMN_NAME, that the "most"
			 * correct name is being returned. In addition for
			 * monitored vnodes we need to kick the vnode out of the
			 * name cache. We do this so that if there are hard
			 * links in the same directory the link will not be
			 * found and a lookup will get us here to return the
			 * name of the current link. In addition by removing the
			 * name from the name cache the old name will not be
			 * found after a rename done on another client or the
			 * server.  The principle reason to do this is because
			 * Finder is asking for notifications on a directory.
			 * The directory changes, Finder gets notified, reads
			 * the directory (which we have purged) and for each
			 * entry returned calls getattrlist with the name
			 * returned from readdir. gettattrlist has to call
			 * namei/lookup to resolve the name, because its not in
			 * the cache we end up here. We need to update the name
			 * so Finder will get the name it called us with.
			 *
			 * We had an imperfect solution with respect to case
			 * sensitivity.  There is a test that is run in
			 * FileBuster that does renames from some name to
			 * another name differing only in case. It then reads
			 * the directory looking for the new name, after it
			 * finds that new name, it ask gettattrlist to verify
			 * that the name is the new name.  Usually that works,
			 * but renames generate fsevents and fseventsd will do a
			 * lookup on the name via lstat. Since that test renames
			 * old name to new name back and forth there is a race
			 * that an fsevent will be behind and will access the
			 * file by the old name, on a case insensitive file
			 * system that will work. Problem is if we do a case
			 * sensitive compare, we're going to change the name,
			 * which the test's getattrlist verification step is
			 * going to fail. So we will check the case sensitivity
			 * of the file system and do the appropriate compare. In
			 * a rare instance for non homogeneous file systems
			 * w.r.t. pathconf we will use case sensitive compares.
			 * That could break if the file system is actually case
			 * insensitive.
			 *
			 * Note that V2 does not know the case, so we just
			 * assume case sensitivity. 
			 *
			 * This is clearly not perfect due to races, but this is
			 * as good as its going to get. You can defeat the
			 * handling of hard links simply by doing:
			 *
			 *	while :; do ls -l > /dev/null; done
			 *
			 * in a terminal window. Even a single ls -l can cause a
			 * race.
			 *
			 * <rant>What we really need is for the caller, that
			 * knows the name being used is valid since it got it
			 * from a readdir to use that name and not ask for the
			 * ATTR_CMN_NAME</rant>
			 */
			if (dnp && cnp && (vp != NFSTOV(dnp))) {
				int update_flags = (vnode_ismonitored((NFSTOV(dnp)))) ? VNODE_UPDATE_CACHE : 0;
				int (*cmp)(const char *s1, const char *s2, size_t n);

				cmp = nfs_case_insensitive(mp) ? strncasecmp : strncmp;

				if (vp->v_name && cnp->cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cnp->cn_namelen))
					update_flags |= VNODE_UPDATE_NAME;
				if ((vp->v_name == NULL && cnp->cn_namelen != 0) || (vp->v_name != NULL && cnp->cn_namelen == 0))
					update_flags |= VNODE_UPDATE_NAME;
				if (vnode_parent(vp) != NFSTOV(dnp))
					update_flags |= VNODE_UPDATE_PARENT;
				if (update_flags) {
					NFS_NODE_DBG("vnode_update_identity old name %s new name %.*s update flags = %x\n",
						     vp->v_name, cnp->cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags);
					vnode_update_identity(vp, NFSTOV(dnp), cnp->cn_nameptr, cnp->cn_namelen, 0, update_flags);
				}
			}

			*npp = np;
		}
		FSDBG_BOT(263, dnp, *npp, 0xcace0000, error);
		return(error);
	}

	FSDBG(263, mp, dnp, npp, 0xaaaaaaaa);

	if (flags & NG_NOCREATE) {
		lck_mtx_unlock(nfs_node_hash_mutex);
		*npp = 0;
		FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOENT);
		return (ENOENT);
	}

	/*
	 * allocate and initialize nfsnode and stick it in the hash
	 * before calling getnewvnode().  Anyone finding it in the
	 * hash before initialization is complete will wait for it.
	 */
	MALLOC_ZONE(np, nfsnode_t, sizeof *np, M_NFSNODE, M_WAITOK);
	if (!np) {
		lck_mtx_unlock(nfs_node_hash_mutex);
		*npp = 0;
		FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOMEM);
		return (ENOMEM);
	}
	bzero(np, sizeof *np);
	np->n_hflag |= (NHINIT | NHLOCKED);
	np->n_mount = mp;
	np->n_auth = auth;
	TAILQ_INIT(&np->n_opens);
	TAILQ_INIT(&np->n_lock_owners);
	TAILQ_INIT(&np->n_locks);
	np->n_dlink.tqe_next = NFSNOLIST;
	np->n_dreturn.tqe_next = NFSNOLIST;
	np->n_monlink.le_next = NFSNOLIST;

	/* ugh... need to keep track of ".zfs" directories to workaround server bugs */
	if ((nvap->nva_type == VDIR) && cnp && (cnp->cn_namelen == 4) &&
	    (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == 'z') &&
	    (cnp->cn_nameptr[2] == 'f') && (cnp->cn_nameptr[3] == 's'))
		np->n_flag |= NISDOTZFS;
	if (dnp && (dnp->n_flag & NISDOTZFS))
		np->n_flag |= NISDOTZFSCHILD;

	if (dnp && cnp && ((cnp->cn_namelen != 2) ||
	    (cnp->cn_nameptr[0] != '.') || (cnp->cn_nameptr[1] != '.'))) {
		vnode_t dvp = NFSTOV(dnp);
		if (!vnode_get(dvp)) {
			if (!vnode_ref(dvp))
				np->n_parent = dvp;
			vnode_put(dvp);
		}
	}

	/* setup node's file handle */
	if (fhsize > NFS_SMALLFH) {
		MALLOC_ZONE(np->n_fhp, u_char *,
				fhsize, M_NFSBIGFH, M_WAITOK);
		if (!np->n_fhp) {
			lck_mtx_unlock(nfs_node_hash_mutex);
			FREE_ZONE(np, sizeof *np, M_NFSNODE);
			*npp = 0;
			FSDBG_BOT(263, dnp, *npp, 0x80000002, ENOMEM);
			return (ENOMEM);
		}
	} else {
Пример #16
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfs_vncmp ncmp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT);
	ncmp.fhsize = fhsize;
	ncmp.fh = fhp;

	error = vfs_hash_get(mntp, hash, flags,
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}

	/*
	 * Allocate before getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if zalloc should block.
	 */
	np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	vp->v_bufobj.bo_ops = &buf_ops_nfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert happened
	 * to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK);
	} else
		np->n_fhp = &np->n_fh;
	bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
	np->n_fhsize = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		if (np->n_fhsize > NFS_SMALLFH) {
			free((caddr_t)np->n_fhp, M_NFSBIGFH);
		}
		mtx_destroy(&np->n_mtx);
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, flags, 
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
Пример #17
0
/*
 * nfs_request - goes something like this
 *	- fill in request struct
 *	- links it into list
 *	- calls nfs_send() for first transmit
 *	- calls nfs_receive() to get reply
 *	- break down rpc header and return with nfs reply pointed to
 *	  by mrep or error
 * nb: always frees up mreq mbuf list
 */
int
nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
    struct thread *td, struct ucred *cred, struct mbuf **mrp,
    struct mbuf **mdp, caddr_t *dposp)
{
	struct mbuf *mrep;
	u_int32_t *tl;
	struct nfsmount *nmp;
	struct mbuf *md;
	time_t waituntil;
	caddr_t dpos;
	int error = 0;
	struct timeval now;
	AUTH *auth = NULL;
	enum nfs_rto_timer_t timer;
	struct nfs_feedback_arg nf;
	struct rpc_callextra ext;
	enum clnt_stat stat;
	struct timeval timo;

	/* Reject requests while attempting a forced unmount. */
	if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
		m_freem(mreq);
		return (ESTALE);
	}
	nmp = VFSTONFS(vp->v_mount);
	bzero(&nf, sizeof(struct nfs_feedback_arg));
	nf.nf_mount = nmp;
	nf.nf_td = td;
	getmicrouptime(&now);
	nf.nf_lastmsg = now.tv_sec -
		((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));

	/*
	 * XXX if not already connected call nfs_connect now. Longer
	 * term, change nfs_mount to call nfs_connect unconditionally
	 * and let clnt_reconnect_create handle reconnects.
	 */
	if (!nmp->nm_client)
		nfs_connect(nmp);

	auth = nfs_getauth(nmp, cred);
	if (!auth) {
		m_freem(mreq);
		return (EACCES);
	}
	bzero(&ext, sizeof(ext));
	ext.rc_auth = auth;

	ext.rc_feedback = nfs_feedback;
	ext.rc_feedback_arg = &nf;

	/*
	 * Use a conservative timeout for RPCs other than getattr,
	 * lookup, read or write. The justification for doing "other"
	 * this way is that these RPCs happen so infrequently that
	 * timer est. would probably be stale.  Also, since many of
	 * these RPCs are non-idempotent, a conservative timeout is
	 * desired.
	 */
	timer = nfs_rto_timer(procnum);
	if (timer != NFS_DEFAULT_TIMER) {
		ext.rc_timers = &nmp->nm_timers[timer - 1];
	} else {
		ext.rc_timers = NULL;
	}

#ifdef KDTRACE_HOOKS
	if (dtrace_nfsclient_nfs23_start_probe != NULL) {
		uint32_t probe_id;
		int probe_procnum;

		if (nmp->nm_flag & NFSMNT_NFSV3) {
			probe_id = nfsclient_nfs3_start_probes[procnum];
			probe_procnum = procnum;
		} else {
			probe_id = nfsclient_nfs2_start_probes[procnum];
			probe_procnum = nfsv2_procid[procnum];
		}
		if (probe_id != 0)
			(dtrace_nfsclient_nfs23_start_probe)(probe_id, vp,
			    mreq, cred, probe_procnum);
	}
#endif

	nfsstats.rpcrequests++;
tryagain:
	timo.tv_sec = nmp->nm_timeo / NFS_HZ;
	timo.tv_usec = (nmp->nm_timeo * 1000000) / NFS_HZ;
	mrep = NULL;
	stat = CLNT_CALL_MBUF(nmp->nm_client, &ext,
	    (nmp->nm_flag & NFSMNT_NFSV3) ? procnum : nfsv2_procid[procnum],
	    mreq, &mrep, timo);

	/*
	 * If there was a successful reply and a tprintf msg.
	 * tprintf a response.
	 */
	if (stat == RPC_SUCCESS) {
		error = 0;
	} else if (stat == RPC_TIMEDOUT) {
		error = ETIMEDOUT;
	} else if (stat == RPC_VERSMISMATCH) {
		error = EOPNOTSUPP;
	} else if (stat == RPC_PROGVERSMISMATCH) {
		error = EPROTONOSUPPORT;
	} else {
		error = EACCES;
	}
	if (error)
		goto nfsmout;

	KASSERT(mrep != NULL, ("mrep shouldn't be NULL if no error\n"));

	/*
	 * Search for any mbufs that are not a multiple of 4 bytes long
	 * or with m_data not longword aligned.
	 * These could cause pointer alignment problems, so copy them to
	 * well aligned mbufs.
	 */
	error = nfs_realign(&mrep, 2 * NFSX_UNSIGNED);
	if (error == ENOMEM) {
		m_freem(mrep);
		AUTH_DESTROY(auth);
		return (error);
	}

	md = mrep;
	dpos = mtod(mrep, caddr_t);
	tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
	if (*tl != 0) {
		error = fxdr_unsigned(int, *tl);
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    error == NFSERR_TRYLATER) {
			m_freem(mrep);
			error = 0;
			waituntil = time_second + nfs3_jukebox_delay;
			while (time_second < waituntil) {
				(void) tsleep(&fake_wchan, PSOCK, "nqnfstry", hz);
			}
			goto tryagain;
		}

		/*
		 * If the File Handle was stale, invalidate the lookup
		 * cache, just in case.
		 */
		if (error == ESTALE)
			nfs_purgecache(vp);
		/*
		 * Skip wcc data on NFS errors for now. NetApp filers
		 * return corrupt postop attrs in the wcc data for NFS
		 * err EROFS. Not sure if they could return corrupt
		 * postop attrs for others errors.
		 */
		if ((nmp->nm_flag & NFSMNT_NFSV3) && !nfs_skip_wcc_data_onerr) {
			*mrp = mrep;
			*mdp = md;
			*dposp = dpos;
			error |= NFSERR_RETERR;
		} else
			m_freem(mrep);
		goto nfsmout;
	}