示例#1
0
/*
 * nfs_request - goes something like this
 *	- fill in request struct
 *	- links it into list
 *	- calls nfs_send() for first transmit
 *	- calls nfs_receive() to get reply
 *	- break down rpc header and return with nfs reply pointed to
 *	  by mrep or error
 * nb: always frees up mreq mbuf list
 */
int
nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
    struct thread *td, struct ucred *cred, struct mbuf **mrp,
    struct mbuf **mdp, caddr_t *dposp)
{
	struct mbuf *mrep;
	u_int32_t *tl;
	struct nfsmount *nmp;
	struct mbuf *md;
	time_t waituntil;
	caddr_t dpos;
	int error = 0;
	struct timeval now;
	AUTH *auth = NULL;
	enum nfs_rto_timer_t timer;
	struct nfs_feedback_arg nf;
	struct rpc_callextra ext;
	enum clnt_stat stat;
	struct timeval timo;

	/* Reject requests while attempting a forced unmount. */
	if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
		m_freem(mreq);
		return (ESTALE);
	}
	nmp = VFSTONFS(vp->v_mount);
	bzero(&nf, sizeof(struct nfs_feedback_arg));
	nf.nf_mount = nmp;
	nf.nf_td = td;
	getmicrouptime(&now);
	nf.nf_lastmsg = now.tv_sec -
	    ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));

	/*
	 * XXX if not already connected call nfs_connect now.  Longer
	 * term, change nfs_mount to call nfs_connect unconditionally
	 * and let clnt_reconnect_create handle reconnects.
	 */
	if (!nmp->nm_client)
		nfs_connect(nmp);

	auth = nfs_getauth(nmp, cred);
	if (!auth) {
		m_freem(mreq);
		return (EACCES);
	}
	bzero(&ext, sizeof(ext));
	ext.rc_auth = auth;

	ext.rc_feedback = nfs_feedback;
	ext.rc_feedback_arg = &nf;

	/*
	 * Use a conservative timeout for RPCs other than getattr,
	 * lookup, read or write.  The justification for doing "other"
	 * this way is that these RPCs happen so infrequently that
	 * timer est. would probably be stale.  Also, since many of
	 * these RPCs are non-idempotent, a conservative timeout is
	 * desired.
	 */
	timer = nfs_rto_timer(procnum);
	if (timer != NFS_DEFAULT_TIMER)
		ext.rc_timers = &nmp->nm_timers[timer - 1];
	else
		ext.rc_timers = NULL;

#ifdef KDTRACE_HOOKS
	if (dtrace_nfsclient_nfs23_start_probe != NULL) {
		uint32_t probe_id;
		int probe_procnum;

		if (nmp->nm_flag & NFSMNT_NFSV3) {
			probe_id = nfsclient_nfs3_start_probes[procnum];
			probe_procnum = procnum;
		} else {
			probe_id = nfsclient_nfs2_start_probes[procnum];
			probe_procnum = nfsv2_procid[procnum];
		}
		if (probe_id != 0)
			(dtrace_nfsclient_nfs23_start_probe)(probe_id, vp,
			    mreq, cred, probe_procnum);
	}
#endif

	nfsstats.rpcrequests++;
tryagain:
	timo.tv_sec = nmp->nm_timeo / NFS_HZ;
	timo.tv_usec = (nmp->nm_timeo * 1000000) / NFS_HZ;
	mrep = NULL;
	stat = CLNT_CALL_MBUF(nmp->nm_client, &ext,
	    (nmp->nm_flag & NFSMNT_NFSV3) ? procnum : nfsv2_procid[procnum],
	    mreq, &mrep, timo);

	/*
	 * If there was a successful reply and a tprintf msg.
	 * tprintf a response.
	 */
	if (stat == RPC_SUCCESS)
		error = 0;
	else if (stat == RPC_TIMEDOUT)
		error = ETIMEDOUT;
	else if (stat == RPC_VERSMISMATCH)
		error = EOPNOTSUPP;
	else if (stat == RPC_PROGVERSMISMATCH)
		error = EPROTONOSUPPORT;
	else
		error = EACCES;
	if (error)
		goto nfsmout;

	KASSERT(mrep != NULL, ("mrep shouldn't be NULL if no error\n"));

	/*
	 * Search for any mbufs that are not a multiple of 4 bytes long
	 * or with m_data not longword aligned.
	 * These could cause pointer alignment problems, so copy them to
	 * well aligned mbufs.
	 */
	error = nfs_realign(&mrep, M_DONTWAIT);
	if (error == ENOMEM) {
		m_freem(mrep);
		AUTH_DESTROY(auth);
		return (error);
	}

	md = mrep;
	dpos = mtod(mrep, caddr_t);
	tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
	if (*tl != 0) {
		error = fxdr_unsigned(int, *tl);
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    error == NFSERR_TRYLATER) {
			m_freem(mrep);
			error = 0;
			waituntil = time_second + nfs3_jukebox_delay;
			while (time_second < waituntil)
				(void)tsleep(&fake_wchan, PSOCK, "nqnfstry",
				    hz);
			goto tryagain;
		}

		/*
		 * If the File Handle was stale, invalidate the lookup
		 * cache, just in case.
		 */
		if (error == ESTALE)
			nfs_purgecache(vp);
		/*
		 * Skip wcc data on NFS errors for now.  NetApp filers
		 * return corrupt postop attrs in the wcc data for NFS
		 * err EROFS.  Not sure if they could return corrupt
		 * postop attrs for others errors.
		 */
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    !nfs_skip_wcc_data_onerr) {
			*mrp = mrep;
			*mdp = md;
			*dposp = dpos;
			error |= NFSERR_RETERR;
		} else
			m_freem(mrep);
		goto nfsmout;
	}
示例#2
0
/*
 * nfs_request - goes something like this
 *	- fill in request struct
 *	- links it into list
 *	- calls nfs_send() for first transmit
 *	- calls nfs_receive() to get reply
 *	- break down rpc header and return with nfs reply pointed to
 *	  by mrep or error
 * nb: always frees up mreq mbuf list
 */
int
nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
    struct thread *td, struct ucred *cred, struct mbuf **mrp,
    struct mbuf **mdp, caddr_t *dposp)
{
	struct mbuf *mrep;
	u_int32_t *tl;
	struct nfsmount *nmp;
	struct mbuf *md;
	time_t waituntil;
	caddr_t dpos;
	int error = 0, timeo;
	AUTH *auth = NULL;
	enum nfs_rto_timer_t timer;
	struct nfs_feedback_arg nf;
	struct rpc_callextra ext;
	enum clnt_stat stat;
	struct timeval timo;

	/* Reject requests while attempting a forced unmount. */
	if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
		m_freem(mreq);
		return (ESTALE);
	}
	nmp = VFSTONFS(vp->v_mount);
	bzero(&nf, sizeof(struct nfs_feedback_arg));
	nf.nf_mount = nmp;
	nf.nf_td = td;
	nf.nf_lastmsg = time_uptime -
	    ((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));

	/*
	 * XXX if not already connected call nfs_connect now.  Longer
	 * term, change nfs_mount to call nfs_connect unconditionally
	 * and let clnt_reconnect_create handle reconnects.
	 */
	if (!nmp->nm_client)
		nfs_connect(nmp);

	auth = nfs_getauth(nmp, cred);
	if (!auth) {
		m_freem(mreq);
		return (EACCES);
	}
	bzero(&ext, sizeof(ext));
	ext.rc_auth = auth;

	ext.rc_feedback = nfs_feedback;
	ext.rc_feedback_arg = &nf;

	/*
	 * Use a conservative timeout for RPCs other than getattr,
	 * lookup, read or write.  The justification for doing "other"
	 * this way is that these RPCs happen so infrequently that
	 * timer est. would probably be stale.  Also, since many of
	 * these RPCs are non-idempotent, a conservative timeout is
	 * desired.
	 */
	timer = nfs_rto_timer(procnum);
	if (timer != NFS_DEFAULT_TIMER)
		ext.rc_timers = &nmp->nm_timers[timer - 1];
	else
		ext.rc_timers = NULL;

#ifdef KDTRACE_HOOKS
	if (dtrace_nfsclient_nfs23_start_probe != NULL) {
		uint32_t probe_id;
		int probe_procnum;

		if (nmp->nm_flag & NFSMNT_NFSV3) {
			probe_id = nfsclient_nfs3_start_probes[procnum];
			probe_procnum = procnum;
		} else {
			probe_id = nfsclient_nfs2_start_probes[procnum];
			probe_procnum = nfsv2_procid[procnum];
		}
		if (probe_id != 0)
			(dtrace_nfsclient_nfs23_start_probe)(probe_id, vp,
			    mreq, cred, probe_procnum);
	}
#endif

	nfsstats.rpcrequests++;
tryagain:
	/*
	 * This timeout specifies when a new socket should be created,
	 * along with new xid values. For UDP, this should be done
	 * infrequently, since retransmits of RPC requests should normally
	 * use the same xid.
	 */
	if (nmp->nm_sotype == SOCK_DGRAM) {
		if ((nmp->nm_flag & NFSMNT_SOFT) != 0) {
			/*
			 * CLSET_RETRIES is set to 2, so this should be half
			 * of the total timeout required.
			 */
			timeo = nmp->nm_retry * nmp->nm_timeo / 2;
			if (timeo < 1)
				timeo = 1;
			timo.tv_sec = timeo / NFS_HZ;
			timo.tv_usec = (timeo % NFS_HZ) * 1000000 / NFS_HZ;
		} else {
			/* For UDP hard mounts, use a large value. */
			timo.tv_sec = NFS_MAXTIMEO / NFS_HZ;
			timo.tv_usec = 0;
		}
	} else {
		timo.tv_sec = nmp->nm_timeo / NFS_HZ;
		timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ;
	}
	mrep = NULL;
	stat = CLNT_CALL_MBUF(nmp->nm_client, &ext,
	    (nmp->nm_flag & NFSMNT_NFSV3) ? procnum : nfsv2_procid[procnum],
	    mreq, &mrep, timo);

	/*
	 * If there was a successful reply and a tprintf msg.
	 * tprintf a response.
	 */
	if (stat == RPC_SUCCESS)
		error = 0;
	else if (stat == RPC_TIMEDOUT) {
		nfsstats.rpctimeouts++;
		error = ETIMEDOUT;
	} else if (stat == RPC_VERSMISMATCH) {
		nfsstats.rpcinvalid++;
		error = EOPNOTSUPP;
	} else if (stat == RPC_PROGVERSMISMATCH) {
		nfsstats.rpcinvalid++;
		error = EPROTONOSUPPORT;
	} else if (stat == RPC_INTR) {
		error = EINTR;
	} else {
		nfsstats.rpcinvalid++;
		error = EACCES;
	}
	if (error)
		goto nfsmout;

	KASSERT(mrep != NULL, ("mrep shouldn't be NULL if no error\n"));

	/*
	 * Search for any mbufs that are not a multiple of 4 bytes long
	 * or with m_data not longword aligned.
	 * These could cause pointer alignment problems, so copy them to
	 * well aligned mbufs.
	 */
	error = nfs_realign(&mrep, M_NOWAIT);
	if (error == ENOMEM) {
		m_freem(mrep);
		AUTH_DESTROY(auth);
		nfsstats.rpcinvalid++;
		return (error);
	}

	md = mrep;
	dpos = mtod(mrep, caddr_t);
	tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
	if (*tl != 0) {
		error = fxdr_unsigned(int, *tl);
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    error == NFSERR_TRYLATER) {
			m_freem(mrep);
			error = 0;
			waituntil = time_second + nfs3_jukebox_delay;
			while (time_second < waituntil)
				(void)tsleep(&fake_wchan, PSOCK, "nqnfstry",
				    hz);
			goto tryagain;
		}
		/*
		 * Make sure NFSERR_RETERR isn't bogusly set by a server
		 * such as amd. (No actual NFS error has bit 31 set.)
		 */
		error &= ~NFSERR_RETERR;

		/*
		 * If the File Handle was stale, invalidate the lookup
		 * cache, just in case.
		 */
		if (error == ESTALE)
			nfs_purgecache(vp);
		/*
		 * Skip wcc data on non-ENOENT NFS errors for now.
		 * NetApp filers return corrupt postop attrs in the
		 * wcc data for NFS err EROFS.  Not sure if they could
		 * return corrupt postop attrs for others errors.
		 * Blocking ENOENT post-op attributes breaks negative
		 * name caching, so always allow it through.
		 */
		if ((nmp->nm_flag & NFSMNT_NFSV3) &&
		    (!nfs_skip_wcc_data_onerr || error == ENOENT)) {
			*mrp = mrep;
			*mdp = md;
			*dposp = dpos;
			error |= NFSERR_RETERR;
		} else
			m_freem(mrep);
		goto nfsmout;
	}
示例#3
0
/*
 * Common code for mount and mountroot
 */
static int
mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam,
    char *hst, struct vnode **vpp, struct ucred *cred, int nametimeo,
    int negnametimeo)
{
	struct nfsmount *nmp;
	struct nfsnode *np;
	int error;
	struct vattr attrs;

	if (mp->mnt_flag & MNT_UPDATE) {
		nmp = VFSTONFS(mp);
		printf("%s: MNT_UPDATE is no longer handled here\n", __func__);
		free(nam, M_SONAME);
		return (0);
	} else {
		nmp = uma_zalloc(nfsmount_zone, M_WAITOK);
		bzero((caddr_t)nmp, sizeof (struct nfsmount));
		TAILQ_INIT(&nmp->nm_bufq);
		mp->mnt_data = nmp;
		nmp->nm_getinfo = nfs_getnlminfo;
		nmp->nm_vinvalbuf = nfs_vinvalbuf;
	}
	vfs_getnewfsid(mp);
	nmp->nm_mountp = mp;
	mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF);			

	/*
	 * V2 can only handle 32 bit filesizes.  A 4GB-1 limit may be too
	 * high, depending on whether we end up with negative offsets in
	 * the client or server somewhere.  2GB-1 may be safer.
	 *
	 * For V3, nfs_fsinfo will adjust this as necessary.  Assume maximum
	 * that we can handle until we find out otherwise.
	 */
	if ((argp->flags & NFSMNT_NFSV3) == 0)
		nmp->nm_maxfilesize = 0xffffffffLL;
	else
		nmp->nm_maxfilesize = OFF_MAX;

	nmp->nm_timeo = NFS_TIMEO;
	nmp->nm_retry = NFS_RETRANS;
	if ((argp->flags & NFSMNT_NFSV3) && argp->sotype == SOCK_STREAM) {
		nmp->nm_wsize = nmp->nm_rsize = NFS_MAXDATA;
	} else {
		nmp->nm_wsize = NFS_WSIZE;
		nmp->nm_rsize = NFS_RSIZE;
	}
	nmp->nm_wcommitsize = hibufspace / (desiredvnodes / 1000);
	nmp->nm_readdirsize = NFS_READDIRSIZE;
	nmp->nm_numgrps = NFS_MAXGRPS;
	nmp->nm_readahead = NFS_DEFRAHEAD;
	nmp->nm_deadthresh = NFS_MAXDEADTHRESH;
	nmp->nm_nametimeo = nametimeo;
	nmp->nm_negnametimeo = negnametimeo;
	nmp->nm_tprintf_delay = nfs_tprintf_delay;
	if (nmp->nm_tprintf_delay < 0)
		nmp->nm_tprintf_delay = 0;
	nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
	if (nmp->nm_tprintf_initial_delay < 0)
		nmp->nm_tprintf_initial_delay = 0;
	nmp->nm_fhsize = argp->fhsize;
	bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize);
	bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN);
	nmp->nm_nam = nam;
	/* Set up the sockets and per-host congestion */
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;
	nmp->nm_rpcops = &nfs_rpcops;

	nfs_decode_args(mp, nmp, argp, hst);

	/*
	 * For Connection based sockets (TCP,...) defer the connect until
	 * the first request, in case the server is not responding.
	 */
	if (nmp->nm_sotype == SOCK_DGRAM &&
		(error = nfs_connect(nmp)))
		goto bad;

	/*
	 * This is silly, but it has to be set so that vinifod() works.
	 * We do not want to do an nfs_statfs() here since we can get
	 * stuck on a dead server and we are holding a lock on the mount
	 * point.
	 */
	mtx_lock(&nmp->nm_mtx);
	mp->mnt_stat.f_iosize = nfs_iosize(nmp);
	mtx_unlock(&nmp->nm_mtx);
	/*
	 * A reference count is needed on the nfsnode representing the
	 * remote root.  If this object is not persistent, then backward
	 * traversals of the mount point (i.e. "..") will not work if
	 * the nfsnode gets flushed out of the cache. Ufs does not have
	 * this problem, because one can identify root inodes by their
	 * number == ROOTINO (2).
	 */
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
	if (error)
		goto bad;
	*vpp = NFSTOV(np);

	/*
	 * Get file attributes and transfer parameters for the
	 * mountpoint.  This has the side effect of filling in
	 * (*vpp)->v_type with the correct value.
	 */
	if (argp->flags & NFSMNT_NFSV3)
		nfs_fsinfo(nmp, *vpp, curthread->td_ucred, curthread);
	else
		VOP_GETATTR(*vpp, &attrs, curthread->td_ucred);

	/*
	 * Lose the lock but keep the ref.
	 */
	VOP_UNLOCK(*vpp, 0);

	return (0);
bad:
	nfs_disconnect(nmp);
	mtx_destroy(&nmp->nm_mtx);
	uma_zfree(nfsmount_zone, nmp);
	free(nam, M_SONAME);
	return (error);
}
示例#4
0
static void
nfs_decode_args(struct mount *mp, struct nfsmount *nmp, struct nfs_args *argp,
	const char *hostname)
{
	int s;
	int adjsock;
	int maxio;
	char *p;
	char *secname;
	char *principal;

	s = splnet();

	/*
	 * Set read-only flag if requested; otherwise, clear it if this is
	 * an update.  If this is not an update, then either the read-only
	 * flag is already clear, or this is a root mount and it was set
	 * intentionally at some previous point.
	 */
	if (vfs_getopt(mp->mnt_optnew, "ro", NULL, NULL) == 0) {
		MNT_ILOCK(mp);
		mp->mnt_flag |= MNT_RDONLY;
		MNT_IUNLOCK(mp);
	} else if (mp->mnt_flag & MNT_UPDATE) {
		MNT_ILOCK(mp);
		mp->mnt_flag &= ~MNT_RDONLY;
		MNT_IUNLOCK(mp);
	}

	/*
	 * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes
	 * no sense in that context.  Also, set up appropriate retransmit
	 * and soft timeout behavior.
	 */
	if (argp->sotype == SOCK_STREAM) {
		nmp->nm_flag &= ~NFSMNT_NOCONN;
		nmp->nm_flag |= NFSMNT_DUMBTIMR;
		nmp->nm_timeo = NFS_MAXTIMEO;
		nmp->nm_retry = NFS_RETRANS_TCP;
	}

	/* Also clear RDIRPLUS if not NFSv3, it crashes some servers */
	if ((argp->flags & NFSMNT_NFSV3) == 0)
		nmp->nm_flag &= ~NFSMNT_RDIRPLUS;

	/* Re-bind if rsrvd port requested and wasn't on one */
	adjsock = !(nmp->nm_flag & NFSMNT_RESVPORT)
		  && (argp->flags & NFSMNT_RESVPORT);
	/* Also re-bind if we're switching to/from a connected UDP socket */
	adjsock |= ((nmp->nm_flag & NFSMNT_NOCONN) !=
		    (argp->flags & NFSMNT_NOCONN));

	/* Update flags atomically.  Don't change the lock bits. */
	nmp->nm_flag = argp->flags | nmp->nm_flag;
	splx(s);

	if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) {
		nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10;
		if (nmp->nm_timeo < NFS_MINTIMEO)
			nmp->nm_timeo = NFS_MINTIMEO;
		else if (nmp->nm_timeo > NFS_MAXTIMEO)
			nmp->nm_timeo = NFS_MAXTIMEO;
	}

	if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) {
		nmp->nm_retry = argp->retrans;
		if (nmp->nm_retry > NFS_MAXREXMIT)
			nmp->nm_retry = NFS_MAXREXMIT;
	}

	if (argp->flags & NFSMNT_NFSV3) {
		if (argp->sotype == SOCK_DGRAM)
			maxio = NFS_MAXDGRAMDATA;
		else
			maxio = NFS_MAXDATA;
	} else
		maxio = NFS_V2MAXDATA;

	if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) {
		nmp->nm_wsize = argp->wsize;
		/* Round down to multiple of blocksize */
		nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_wsize <= 0)
			nmp->nm_wsize = NFS_FABLKSIZE;
	}
	if (nmp->nm_wsize > maxio)
		nmp->nm_wsize = maxio;
	if (nmp->nm_wsize > MAXBSIZE)
		nmp->nm_wsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) {
		nmp->nm_rsize = argp->rsize;
		/* Round down to multiple of blocksize */
		nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_rsize <= 0)
			nmp->nm_rsize = NFS_FABLKSIZE;
	}
	if (nmp->nm_rsize > maxio)
		nmp->nm_rsize = maxio;
	if (nmp->nm_rsize > MAXBSIZE)
		nmp->nm_rsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) {
		nmp->nm_readdirsize = argp->readdirsize;
	}
	if (nmp->nm_readdirsize > maxio)
		nmp->nm_readdirsize = maxio;
	if (nmp->nm_readdirsize > nmp->nm_rsize)
		nmp->nm_readdirsize = nmp->nm_rsize;

	if ((argp->flags & NFSMNT_ACREGMIN) && argp->acregmin >= 0)
		nmp->nm_acregmin = argp->acregmin;
	else
		nmp->nm_acregmin = NFS_MINATTRTIMO;
	if ((argp->flags & NFSMNT_ACREGMAX) && argp->acregmax >= 0)
		nmp->nm_acregmax = argp->acregmax;
	else
		nmp->nm_acregmax = NFS_MAXATTRTIMO;
	if ((argp->flags & NFSMNT_ACDIRMIN) && argp->acdirmin >= 0)
		nmp->nm_acdirmin = argp->acdirmin;
	else
		nmp->nm_acdirmin = NFS_MINDIRATTRTIMO;
	if ((argp->flags & NFSMNT_ACDIRMAX) && argp->acdirmax >= 0)
		nmp->nm_acdirmax = argp->acdirmax;
	else
		nmp->nm_acdirmax = NFS_MAXDIRATTRTIMO;
	if (nmp->nm_acdirmin > nmp->nm_acdirmax)
		nmp->nm_acdirmin = nmp->nm_acdirmax;
	if (nmp->nm_acregmin > nmp->nm_acregmax)
		nmp->nm_acregmin = nmp->nm_acregmax;

	if ((argp->flags & NFSMNT_MAXGRPS) && argp->maxgrouplist >= 0) {
		if (argp->maxgrouplist <= NFS_MAXGRPS)
			nmp->nm_numgrps = argp->maxgrouplist;
		else
			nmp->nm_numgrps = NFS_MAXGRPS;
	}
	if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0) {
		if (argp->readahead <= NFS_MAXRAHEAD)
			nmp->nm_readahead = argp->readahead;
		else
			nmp->nm_readahead = NFS_MAXRAHEAD;
	}
	if ((argp->flags & NFSMNT_WCOMMITSIZE) && argp->wcommitsize >= 0) {
		if (argp->wcommitsize < nmp->nm_wsize)
			nmp->nm_wcommitsize = nmp->nm_wsize;
		else
			nmp->nm_wcommitsize = argp->wcommitsize;
	}
	if ((argp->flags & NFSMNT_DEADTHRESH) && argp->deadthresh >= 0) {
		if (argp->deadthresh <= NFS_MAXDEADTHRESH)
			nmp->nm_deadthresh = argp->deadthresh;
		else
			nmp->nm_deadthresh = NFS_MAXDEADTHRESH;
	}

	adjsock |= ((nmp->nm_sotype != argp->sotype) ||
		    (nmp->nm_soproto != argp->proto));
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;

	if (nmp->nm_client && adjsock) {
		nfs_safedisconnect(nmp);
		if (nmp->nm_sotype == SOCK_DGRAM)
			while (nfs_connect(nmp)) {
				printf("nfs_args: retrying connect\n");
				(void) tsleep(&fake_wchan, PSOCK, "nfscon", hz);
			}
	}

	if (hostname) {
		strlcpy(nmp->nm_hostname, hostname,
		    sizeof(nmp->nm_hostname));
		p = strchr(nmp->nm_hostname, ':');
		if (p)
			*p = '\0';
	}

	if (vfs_getopt(mp->mnt_optnew, "sec",
		(void **) &secname, NULL) == 0) {
		nmp->nm_secflavor = nfs_sec_name_to_num(secname);
	} else {
		nmp->nm_secflavor = AUTH_SYS;
	}

	if (vfs_getopt(mp->mnt_optnew, "principal",
		(void **) &principal, NULL) == 0) {
		strlcpy(nmp->nm_principal, principal,
		    sizeof(nmp->nm_principal));
	} else {
		snprintf(nmp->nm_principal, sizeof(nmp->nm_principal),
		    "nfs@%s", nmp->nm_hostname);
	}
}
示例#5
0
void
nfs_decode_args(struct nfsmount *nmp, struct nfs_args *argp, struct lwp *l)
{
	int s;
	int adjsock;
	int maxio;

	s = splsoftnet();

	/*
	 * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes
	 * no sense in that context.
	 */
	if (argp->sotype == SOCK_STREAM)
		argp->flags &= ~NFSMNT_NOCONN;

	/*
	 * Cookie translation is not needed for v2, silently ignore it.
	 */
	if ((argp->flags & (NFSMNT_XLATECOOKIE|NFSMNT_NFSV3)) ==
	    NFSMNT_XLATECOOKIE)
		argp->flags &= ~NFSMNT_XLATECOOKIE;

	/* Re-bind if rsrvd port requested and wasn't on one */
	adjsock = !(nmp->nm_flag & NFSMNT_RESVPORT)
		  && (argp->flags & NFSMNT_RESVPORT);
	/* Also re-bind if we're switching to/from a connected UDP socket */
	adjsock |= ((nmp->nm_flag & NFSMNT_NOCONN) !=
		    (argp->flags & NFSMNT_NOCONN));

	/* Update flags. */
	nmp->nm_flag = argp->flags;
	splx(s);

	if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) {
		nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10;
		if (nmp->nm_timeo < NFS_MINTIMEO)
			nmp->nm_timeo = NFS_MINTIMEO;
		else if (nmp->nm_timeo > NFS_MAXTIMEO)
			nmp->nm_timeo = NFS_MAXTIMEO;
	}

	if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) {
		nmp->nm_retry = argp->retrans;
		if (nmp->nm_retry > NFS_MAXREXMIT)
			nmp->nm_retry = NFS_MAXREXMIT;
	}

#ifndef NFS_V2_ONLY
	if (argp->flags & NFSMNT_NFSV3) {
		if (argp->sotype == SOCK_DGRAM)
			maxio = NFS_MAXDGRAMDATA;
		else
			maxio = NFS_MAXDATA;
	} else
#endif
		maxio = NFS_V2MAXDATA;

	if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) {
		int osize = nmp->nm_wsize;
		nmp->nm_wsize = argp->wsize;
		/* Round down to multiple of blocksize */
		nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_wsize <= 0)
			nmp->nm_wsize = NFS_FABLKSIZE;
		adjsock |= (nmp->nm_wsize != osize);
	}
	if (nmp->nm_wsize > maxio)
		nmp->nm_wsize = maxio;
	if (nmp->nm_wsize > MAXBSIZE)
		nmp->nm_wsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) {
		int osize = nmp->nm_rsize;
		nmp->nm_rsize = argp->rsize;
		/* Round down to multiple of blocksize */
		nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1);
		if (nmp->nm_rsize <= 0)
			nmp->nm_rsize = NFS_FABLKSIZE;
		adjsock |= (nmp->nm_rsize != osize);
	}
	if (nmp->nm_rsize > maxio)
		nmp->nm_rsize = maxio;
	if (nmp->nm_rsize > MAXBSIZE)
		nmp->nm_rsize = MAXBSIZE;

	if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) {
		nmp->nm_readdirsize = argp->readdirsize;
		/* Round down to multiple of minimum blocksize */
		nmp->nm_readdirsize &= ~(NFS_DIRFRAGSIZ - 1);
		if (nmp->nm_readdirsize < NFS_DIRFRAGSIZ)
			nmp->nm_readdirsize = NFS_DIRFRAGSIZ;
		/* Bigger than buffer size makes no sense */
		if (nmp->nm_readdirsize > NFS_DIRBLKSIZ)
			nmp->nm_readdirsize = NFS_DIRBLKSIZ;
	} else if (argp->flags & NFSMNT_RSIZE)
		nmp->nm_readdirsize = nmp->nm_rsize;

	if (nmp->nm_readdirsize > maxio)
		nmp->nm_readdirsize = maxio;

	if ((argp->flags & NFSMNT_MAXGRPS) && argp->maxgrouplist >= 0 &&
		argp->maxgrouplist <= NFS_MAXGRPS)
		nmp->nm_numgrps = argp->maxgrouplist;
	if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0 &&
		argp->readahead <= NFS_MAXRAHEAD)
		nmp->nm_readahead = argp->readahead;
	if ((argp->flags & NFSMNT_DEADTHRESH) && argp->deadthresh >= 1 &&
		argp->deadthresh <= NFS_NEVERDEAD)
		nmp->nm_deadthresh = argp->deadthresh;

	adjsock |= ((nmp->nm_sotype != argp->sotype) ||
		    (nmp->nm_soproto != argp->proto));
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;

	if (nmp->nm_so && adjsock) {
		nfs_safedisconnect(nmp);
		if (nmp->nm_sotype == SOCK_DGRAM)
			while (nfs_connect(nmp, (struct nfsreq *)0, l)) {
				printf("nfs_args: retrying connect\n");
				kpause("nfscn3", false, hz, NULL);
			}
	}
}