예제 #1
0
/*
 * NLM_UNSHARE, NLM4_UNSHARE
 *
 * Release a DOS-style share reservation
 */
void
nlm_do_unshare(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr)
{
	struct nlm_globals *g;
	struct nlm_host *host;
	struct netbuf *addr;
	vnode_t *vp = NULL;
	char *netid;
	int error;
	struct shrlock shr;

	nlm_copy_netobj(&resp->cookie, &argp->cookie);

	netid = svc_getnetid(sr->rq_xprt);
	addr = svc_getrpccaller(sr->rq_xprt);

	g = zone_getspecific(nlm_zone_key, curzone);
	host = nlm_host_find(g, netid, addr);
	if (host == NULL) {
		resp->stat = nlm4_denied_nolocks;
		return;
	}

	DTRACE_PROBE3(unshare__start, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_shareargs *, argp);

	if (NLM_IN_GRACE(g)) {
		resp->stat = nlm4_denied_grace_period;
		goto out;
	}

	vp = nlm_fh_to_vp(&argp->share.fh);
	if (vp == NULL) {
		resp->stat = nlm4_stale_fh;
		goto out;
	}

	/* Convert to local form. */
	nlm_init_shrlock(&shr, &argp->share, host);
	error = VOP_SHRLOCK(vp, F_UNSHARE, &shr,
	    FREAD | FWRITE, CRED(), NULL);

	(void) error;
	resp->stat = nlm4_granted;

out:
	DTRACE_PROBE3(unshare__end, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_shareres *, resp);

	if (vp != NULL)
		VN_RELE(vp);

	nlm_host_release(g, host);
}
예제 #2
0
/*
 * NLM_UNLOCK, NLM_UNLOCK_MSG,
 * NLM4_UNLOCK, NLM4_UNLOCK_MSG,
 * Client removes one of their locks.
 */
void
nlm_do_unlock(nlm4_unlockargs *argp, nlm4_res *resp,
    struct svc_req *sr, nlm_res_cb cb)
{
	struct nlm_globals *g;
	struct nlm_host *host;
	struct netbuf *addr;
	nlm_rpc_t *rpcp = NULL;
	vnode_t *vp = NULL;
	char *netid;
	char *name;
	int error;
	struct flock64 fl;

	nlm_copy_netobj(&resp->cookie, &argp->cookie);

	netid = svc_getnetid(sr->rq_xprt);
	addr = svc_getrpccaller(sr->rq_xprt);
	name = argp->alock.caller_name;

	/*
	 * NLM_UNLOCK operation doesn't have an error code
	 * denoting that operation failed, so we always
	 * return nlm4_granted except when the server is
	 * in a grace period.
	 */
	resp->stat.stat = nlm4_granted;

	g = zone_getspecific(nlm_zone_key, curzone);
	host = nlm_host_findcreate(g, name, netid, addr);
	if (host == NULL)
		return;

	if (cb != NULL) {
		error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
		if (error != 0)
			goto out;
	}

	DTRACE_PROBE3(start, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_unlockargs *, argp);

	if (NLM_IN_GRACE(g)) {
		resp->stat.stat = nlm4_denied_grace_period;
		goto out;
	}

	vp = nlm_fh_to_vp(&argp->alock.fh);
	if (vp == NULL)
		goto out;

	/* Convert to local form. */
	error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers, F_UNLCK);
	if (error)
		goto out;

	/* BSD: VOP_ADVLOCK(nv->nv_vp, NULL, F_UNLCK, &fl, F_REMOTE); */
	error = nlm_vop_frlock(vp, F_SETLK, &fl,
	    F_REMOTELOCK | FREAD | FWRITE,
	    (u_offset_t)0, NULL, CRED(), NULL);

	DTRACE_PROBE1(unlock__res, int, error);
out:
	/*
	 * If we have a callback function, use that to
	 * deliver the response via another RPC call.
	 */
	if (cb != NULL && rpcp != NULL)
		NLM_INVOKE_CALLBACK("unlock", rpcp, resp, cb);

	DTRACE_PROBE3(unlock__end, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_res *, resp);

	if (vp != NULL)
		VN_RELE(vp);
	if (rpcp != NULL)
		nlm_host_rele_rpc(host, rpcp);

	nlm_host_release(g, host);
}
예제 #3
0
/*
 * NLM_CANCEL, NLM_CANCEL_MSG,
 * NLM4_CANCEL, NLM4_CANCEL_MSG,
 * Client gives up waiting for a blocking lock.
 */
void
nlm_do_cancel(nlm4_cancargs *argp, nlm4_res *resp,
    struct svc_req *sr, nlm_res_cb cb)
{
	struct nlm_globals *g;
	struct nlm_host *host;
	struct netbuf *addr;
	struct nlm_vhold *nvp = NULL;
	nlm_rpc_t *rpcp = NULL;
	char *netid;
	char *name;
	int error;
	struct flock64 fl;

	nlm_copy_netobj(&resp->cookie, &argp->cookie);
	netid = svc_getnetid(sr->rq_xprt);
	addr = svc_getrpccaller(sr->rq_xprt);
	name = argp->alock.caller_name;

	g = zone_getspecific(nlm_zone_key, curzone);
	host = nlm_host_findcreate(g, name, netid, addr);
	if (host == NULL) {
		resp->stat.stat = nlm4_denied_nolocks;
		return;
	}
	if (cb != NULL) {
		error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
		if (error != 0) {
			resp->stat.stat = nlm4_denied_nolocks;
			goto out;
		}
	}

	DTRACE_PROBE3(start, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_cancargs *, argp);

	if (NLM_IN_GRACE(g)) {
		resp->stat.stat = nlm4_denied_grace_period;
		goto out;
	}

	nvp = nlm_fh_to_vhold(host, &argp->alock.fh);
	if (nvp == NULL) {
		resp->stat.stat = nlm4_stale_fh;
		goto out;
	}

	/* Convert to local form. */
	error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers,
	    (argp->exclusive) ? F_WRLCK : F_RDLCK);
	if (error) {
		resp->stat.stat = nlm4_failed;
		goto out;
	}

	error = nlm_slreq_unregister(host, nvp, &fl);
	if (error != 0) {
		/*
		 * There's no sleeping lock request corresponding
		 * to the lock. Then requested sleeping lock
		 * doesn't exist.
		 */
		resp->stat.stat = nlm4_denied;
		goto out;
	}

	fl.l_type = F_UNLCK;
	error = nlm_vop_frlock(nvp->nv_vp, F_SETLK, &fl,
	    F_REMOTELOCK | FREAD | FWRITE,
	    (u_offset_t)0, NULL, CRED(), NULL);

	resp->stat.stat = (error == 0) ?
	    nlm4_granted : nlm4_denied;

out:
	/*
	 * If we have a callback function, use that to
	 * deliver the response via another RPC call.
	 */
	if (cb != NULL && rpcp != NULL)
		NLM_INVOKE_CALLBACK("cancel", rpcp, resp, cb);

	DTRACE_PROBE3(cancel__end, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_res *, resp);

	if (rpcp != NULL)
		nlm_host_rele_rpc(host, rpcp);

	nlm_vhold_release(host, nvp);
	nlm_host_release(g, host);
}
예제 #4
0
/*
 * NLM_LOCK, NLM_LOCK_MSG, NLM_NM_LOCK
 * NLM4_LOCK, NLM4_LOCK_MSG, NLM4_NM_LOCK
 *
 * Client request to set a lock, possibly blocking.
 *
 * If the lock needs to block, we return status blocked to
 * this RPC call, and then later call back the client with
 * a "granted" callback.  Tricky aspects of this include:
 * sending a reply before this function returns, and then
 * borrowing this thread from the RPC service pool for the
 * wait on the lock and doing the later granted callback.
 *
 * We also have to keep a list of locks (pending + granted)
 * both to handle retransmitted requests, and to keep the
 * vnodes for those locks active.
 */
void
nlm_do_lock(nlm4_lockargs *argp, nlm4_res *resp, struct svc_req *sr,
    nlm_reply_cb reply_cb, nlm_res_cb res_cb, nlm_testargs_cb grant_cb)
{
	struct nlm_globals *g;
	struct flock64 fl;
	struct nlm_host *host = NULL;
	struct netbuf *addr;
	struct nlm_vhold *nvp = NULL;
	nlm_rpc_t *rpcp = NULL;
	char *netid;
	char *name;
	int error, flags;
	bool_t do_blocking = FALSE;
	bool_t do_mon_req = FALSE;
	enum nlm4_stats status;

	nlm_copy_netobj(&resp->cookie, &argp->cookie);

	name = argp->alock.caller_name;
	netid = svc_getnetid(sr->rq_xprt);
	addr = svc_getrpccaller(sr->rq_xprt);

	g = zone_getspecific(nlm_zone_key, curzone);
	host = nlm_host_findcreate(g, name, netid, addr);
	if (host == NULL) {
		DTRACE_PROBE4(no__host, struct nlm_globals *, g,
		    char *, name, char *, netid, struct netbuf *, addr);
		status = nlm4_denied_nolocks;
		goto doreply;
	}

	DTRACE_PROBE3(start, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_lockargs *, argp);

	/*
	 * If we may need to do _msg_ call needing an RPC
	 * callback, get the RPC client handle now,
	 * so we know if we can bind to the NLM service on
	 * this client.
	 *
	 * Note: host object carries transport type.
	 * One client using multiple transports gets
	 * separate sysids for each of its transports.
	 */
	if (res_cb != NULL || (grant_cb != NULL && argp->block == TRUE)) {
		error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
		if (error != 0) {
			status = nlm4_denied_nolocks;
			goto doreply;
		}
	}

	/*
	 * During the "grace period", only allow reclaim.
	 */
	if (argp->reclaim == 0 && NLM_IN_GRACE(g)) {
		status = nlm4_denied_grace_period;
		goto doreply;
	}

	/*
	 * Check whether we missed host shutdown event
	 */
	if (nlm_host_get_state(host) != argp->state)
		nlm_host_notify_server(host, argp->state);

	/*
	 * Get a hold on the vnode for a lock operation.
	 * Only lock() and share() need vhold objects.
	 */
	nvp = nlm_fh_to_vhold(host, &argp->alock.fh);
	if (nvp == NULL) {
		status = nlm4_stale_fh;
		goto doreply;
	}

	/* Convert to local form. */
	error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers,
	    (argp->exclusive) ? F_WRLCK : F_RDLCK);
	if (error) {
		status = nlm4_failed;
		goto doreply;
	}

	/*
	 * Try to lock non-blocking first.  If we succeed
	 * getting the lock, we can reply with the granted
	 * status directly and avoid the complications of
	 * making the "granted" RPC callback later.
	 *
	 * This also let's us find out now about some
	 * possible errors like EROFS, etc.
	 */
	flags = F_REMOTELOCK | FREAD | FWRITE;
	error = nlm_vop_frlock(nvp->nv_vp, F_SETLK, &fl, flags,
	    (u_offset_t)0, NULL, CRED(), NULL);

	DTRACE_PROBE3(setlk__res, struct flock64 *, &fl,
	    int, flags, int, error);

	switch (error) {
	case 0:
		/* Got it without waiting! */
		status = nlm4_granted;
		do_mon_req = TRUE;
		break;

	/* EINPROGRESS too? */
	case EAGAIN:
		/* We did not get the lock. Should we block? */
		if (argp->block == FALSE || grant_cb == NULL) {
			status = nlm4_denied;
			break;
		}
		/*
		 * Should block.  Try to reserve this thread
		 * so we can use it to wait for the lock and
		 * later send the granted message.  If this
		 * reservation fails, say "no resources".
		 */
		if (!svc_reserve_thread(sr->rq_xprt)) {
			status = nlm4_denied_nolocks;
			break;
		}
		/*
		 * OK, can detach this thread, so this call
		 * will block below (after we reply).
		 */
		status = nlm4_blocked;
		do_blocking = TRUE;
		do_mon_req = TRUE;
		break;

	case ENOLCK:
		/* Failed for lack of resources. */
		status = nlm4_denied_nolocks;
		break;

	case EROFS:
		/* read-only file system */
		status = nlm4_rofs;
		break;

	case EFBIG:
		/* file too big */
		status = nlm4_fbig;
		break;

	case EDEADLK:
		/* dead lock condition */
		status = nlm4_deadlck;
		break;

	default:
		status = nlm4_denied;
		break;
	}

doreply:
	resp->stat.stat = status;

	/*
	 * We get one of two function pointers; one for a
	 * normal RPC reply, and another for doing an RPC
	 * "callback" _res reply for a _msg function.
	 * Use either of those to send the reply now.
	 *
	 * If sending this reply fails, just leave the
	 * lock in the list for retransmitted requests.
	 * Cleanup is via unlock or host rele (statmon).
	 */
	if (reply_cb != NULL) {
		/* i.e. nlm_lock_1_reply */
		if (!(*reply_cb)(sr->rq_xprt, resp))
			svcerr_systemerr(sr->rq_xprt);
	}
	if (res_cb != NULL && rpcp != NULL)
		NLM_INVOKE_CALLBACK("lock", rpcp, resp, res_cb);

	/*
	 * The reply has been sent to the client.
	 * Start monitoring this client (maybe).
	 *
	 * Note that the non-monitored (NM) calls pass grant_cb=NULL
	 * indicating that the client doesn't support RPC callbacks.
	 * No monitoring for these (lame) clients.
	 */
	if (do_mon_req && grant_cb != NULL)
		nlm_host_monitor(g, host, argp->state);

	if (do_blocking) {
		/*
		 * We need to block on this lock, and when that
		 * completes, do the granted RPC call. Note that
		 * we "reserved" this thread above, so we can now
		 * "detach" it from the RPC SVC pool, allowing it
		 * to block indefinitely if needed.
		 */
		ASSERT(rpcp != NULL);
		(void) svc_detach_thread(sr->rq_xprt);
		nlm_block(argp, host, nvp, rpcp, &fl, grant_cb);
	}

	DTRACE_PROBE3(lock__end, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_res *, resp);

	if (rpcp != NULL)
		nlm_host_rele_rpc(host, rpcp);

	nlm_vhold_release(host, nvp);
	nlm_host_release(g, host);
}
예제 #5
0
/*
 * NLM_TEST, NLM_TEST_MSG,
 * NLM4_TEST, NLM4_TEST_MSG,
 * Client inquiry about locks, non-blocking.
 */
void
nlm_do_test(nlm4_testargs *argp, nlm4_testres *resp,
    struct svc_req *sr, nlm_testres_cb cb)
{
	struct nlm_globals *g;
	struct nlm_host *host;
	struct nlm4_holder *lh;
	struct nlm_owner_handle *oh;
	nlm_rpc_t *rpcp = NULL;
	vnode_t *vp = NULL;
	struct netbuf *addr;
	char *netid;
	char *name;
	int error;
	struct flock64 fl;

	nlm_copy_netobj(&resp->cookie, &argp->cookie);

	name = argp->alock.caller_name;
	netid = svc_getnetid(sr->rq_xprt);
	addr = svc_getrpccaller(sr->rq_xprt);

	g = zone_getspecific(nlm_zone_key, curzone);
	host = nlm_host_findcreate(g, name, netid, addr);
	if (host == NULL) {
		resp->stat.stat = nlm4_denied_nolocks;
		return;
	}
	if (cb != NULL) {
		error = nlm_host_get_rpc(host, sr->rq_vers, &rpcp);
		if (error != 0) {
			resp->stat.stat = nlm4_denied_nolocks;
			goto out;
		}
	}

	vp = nlm_fh_to_vp(&argp->alock.fh);
	if (vp == NULL) {
		resp->stat.stat = nlm4_stale_fh;
		goto out;
	}

	if (NLM_IN_GRACE(g)) {
		resp->stat.stat = nlm4_denied_grace_period;
		goto out;
	}

	/* Convert to local form. */
	error = nlm_init_flock(&fl, &argp->alock, host, sr->rq_vers,
	    (argp->exclusive) ? F_WRLCK : F_RDLCK);
	if (error) {
		resp->stat.stat = nlm4_failed;
		goto out;
	}

	/* BSD: VOP_ADVLOCK(nv->nv_vp, NULL, F_GETLK, &fl, F_REMOTE); */
	error = nlm_vop_frlock(vp, F_GETLK, &fl,
	    F_REMOTELOCK | FREAD | FWRITE,
	    (u_offset_t)0, NULL, CRED(), NULL);
	if (error) {
		resp->stat.stat = nlm4_failed;
		goto out;
	}

	if (fl.l_type == F_UNLCK) {
		resp->stat.stat = nlm4_granted;
		goto out;
	}
	resp->stat.stat = nlm4_denied;

	/*
	 * This lock "test" fails due to a conflicting lock.
	 *
	 * If this is a v1 client, make sure the conflicting
	 * lock range we report can be expressed with 32-bit
	 * offsets.  The lock range requested was expressed
	 * as 32-bit offset and length, so at least part of
	 * the conflicting lock should lie below MAX_UOFF32.
	 * If the conflicting lock extends past that, we'll
	 * trim the range to end at MAX_UOFF32 so this lock
	 * can be represented in a 32-bit response.  Check
	 * the start also (paranoid, but a low cost check).
	 */
	if (sr->rq_vers < NLM4_VERS) {
		uint64 maxlen;
		if (fl.l_start > MAX_UOFF32)
			fl.l_start = MAX_UOFF32;
		maxlen = MAX_UOFF32 + 1 - fl.l_start;
		if (fl.l_len > maxlen)
			fl.l_len = maxlen;
	}

	/*
	 * Build the nlm4_holder result structure.
	 *
	 * Note that lh->oh is freed via xdr_free,
	 * xdr_nlm4_holder, xdr_netobj, xdr_bytes.
	 */
	oh = kmem_zalloc(sizeof (*oh), KM_SLEEP);
	oh->oh_sysid = (sysid_t)fl.l_sysid;
	lh = &resp->stat.nlm4_testrply_u.holder;
	lh->exclusive = (fl.l_type == F_WRLCK);
	lh->svid = fl.l_pid;
	lh->oh.n_len = sizeof (*oh);
	lh->oh.n_bytes = (void *)oh;
	lh->l_offset = fl.l_start;
	lh->l_len = fl.l_len;

out:
	/*
	 * If we have a callback function, use that to
	 * deliver the response via another RPC call.
	 */
	if (cb != NULL && rpcp != NULL)
		NLM_INVOKE_CALLBACK("test", rpcp, resp, cb);

	if (vp != NULL)
		VN_RELE(vp);
	if (rpcp != NULL)
		nlm_host_rele_rpc(host, rpcp);

	nlm_host_release(g, host);
}
예제 #6
0
/*
 * NLM_SHARE, NLM4_SHARE
 *
 * Request a DOS-style share reservation
 */
void
nlm_do_share(nlm4_shareargs *argp, nlm4_shareres *resp, struct svc_req *sr)
{
	struct nlm_globals *g;
	struct nlm_host *host;
	struct netbuf *addr;
	struct nlm_vhold *nvp = NULL;
	char *netid;
	char *name;
	int error;
	struct shrlock shr;

	nlm_copy_netobj(&resp->cookie, &argp->cookie);

	name = argp->share.caller_name;
	netid = svc_getnetid(sr->rq_xprt);
	addr = svc_getrpccaller(sr->rq_xprt);

	g = zone_getspecific(nlm_zone_key, curzone);
	host = nlm_host_findcreate(g, name, netid, addr);
	if (host == NULL) {
		resp->stat = nlm4_denied_nolocks;
		return;
	}

	DTRACE_PROBE3(share__start, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_shareargs *, argp);

	if (argp->reclaim == 0 && NLM_IN_GRACE(g)) {
		resp->stat = nlm4_denied_grace_period;
		goto out;
	}

	/*
	 * Get holded vnode when on lock operation.
	 * Only lock() and share() need vhold objects.
	 */
	nvp = nlm_fh_to_vhold(host, &argp->share.fh);
	if (nvp == NULL) {
		resp->stat = nlm4_stale_fh;
		goto out;
	}

	/* Convert to local form. */
	nlm_init_shrlock(&shr, &argp->share, host);
	error = VOP_SHRLOCK(nvp->nv_vp, F_SHARE, &shr,
	    FREAD | FWRITE, CRED(), NULL);

	if (error == 0) {
		resp->stat = nlm4_granted;
		nlm_host_monitor(g, host, 0);
	} else {
		resp->stat = nlm4_denied;
	}

out:
	DTRACE_PROBE3(share__end, struct nlm_globals *, g,
	    struct nlm_host *, host, nlm4_shareres *, resp);

	nlm_vhold_release(host, nvp);
	nlm_host_release(g, host);
}
예제 #7
0
/*
 * Get the access information from the cache or callup to the mountd
 * to get and cache the access information in the kernel.
 */
int
nfsauth_cache_get(struct exportinfo *exi, struct svc_req *req, int flavor)
{
	struct netbuf		  addr;
	struct netbuf		 *claddr;
	struct auth_cache	**head;
	struct auth_cache	 *ap;
	int			  access;
	varg_t			  varg = {0};
	nfsauth_res_t		  res = {0};
	XDR			  xdrs_a;
	XDR			  xdrs_r;
	size_t			  absz;
	caddr_t			  abuf;
	size_t			  rbsz = (size_t)(BYTES_PER_XDR_UNIT * 2);
	char			  result[BYTES_PER_XDR_UNIT * 2] = {0};
	caddr_t			  rbuf = (caddr_t)&result;
	int			  last = 0;
	door_arg_t		  da;
	door_info_t		  di;
	door_handle_t		  dh;
	uint_t			  ntries = 0;

	/*
	 * Now check whether this client already
	 * has an entry for this flavor in the cache
	 * for this export.
	 * Get the caller's address, mask off the
	 * parts of the address that do not identify
	 * the host (port number, etc), and then hash
	 * it to find the chain of cache entries.
	 */

	claddr = svc_getrpccaller(req->rq_xprt);
	addr = *claddr;
	addr.buf = kmem_alloc(addr.len, KM_SLEEP);
	bcopy(claddr->buf, addr.buf, claddr->len);
	addrmask(&addr, svc_getaddrmask(req->rq_xprt));
	head = &exi->exi_cache[hash(&addr)];

	rw_enter(&exi->exi_cache_lock, RW_READER);
	for (ap = *head; ap; ap = ap->auth_next) {
		if (EQADDR(&addr, &ap->auth_addr) && flavor == ap->auth_flavor)
			break;
	}
	if (ap) {				/* cache hit */
		access = ap->auth_access;
		ap->auth_time = gethrestime_sec();
		nfsauth_cache_hit++;
	}

	rw_exit(&exi->exi_cache_lock);

	if (ap) {
		kmem_free(addr.buf, addr.len);
		return (access);
	}

	nfsauth_cache_miss++;

	/*
	 * No entry in the cache for this client/flavor
	 * so we need to call the nfsauth service in the
	 * mount daemon.
	 */
retry:
	mutex_enter(&mountd_lock);
	dh = mountd_dh;
	if (dh)
		door_ki_hold(dh);
	mutex_exit(&mountd_lock);

	if (dh == NULL) {
		/*
		 * The rendezvous point has not been established yet !
		 * This could mean that either mountd(1m) has not yet
		 * been started or that _this_ routine nuked the door
		 * handle after receiving an EINTR for a REVOKED door.
		 *
		 * Returning NFSAUTH_DROP will cause the NFS client
		 * to retransmit the request, so let's try to be more
		 * rescillient and attempt for ntries before we bail.
		 */
		if (++ntries % NFSAUTH_DR_TRYCNT) {
			delay(hz);
			goto retry;
		}
		sys_log("nfsauth: mountd has not established door");
		kmem_free(addr.buf, addr.len);
		return (NFSAUTH_DROP);
	}
	ntries = 0;
	varg.vers = V_PROTO;
	varg.arg_u.arg.cmd = NFSAUTH_ACCESS;
	varg.arg_u.arg.areq.req_client.n_len = addr.len;
	varg.arg_u.arg.areq.req_client.n_bytes = addr.buf;
	varg.arg_u.arg.areq.req_netid = svc_getnetid(req->rq_xprt);
	varg.arg_u.arg.areq.req_path = exi->exi_export.ex_path;
	varg.arg_u.arg.areq.req_flavor = flavor;

	/*
	 * Setup the XDR stream for encoding the arguments. Notice that
	 * in addition to the args having variable fields (req_netid and
	 * req_path), the argument data structure is itself versioned,
	 * so we need to make sure we can size the arguments buffer
	 * appropriately to encode all the args. If we can't get sizing
	 * info _or_ properly encode the arguments, there's really no
	 * point in continuting, so we fail the request.
	 */
	DTRACE_PROBE1(nfsserv__func__nfsauth__varg, varg_t *, &varg);
	if ((absz = xdr_sizeof(xdr_varg, (void *)&varg)) == 0) {
		door_ki_rele(dh);
		kmem_free(addr.buf, addr.len);
		return (NFSAUTH_DENIED);
	}
	abuf = (caddr_t)kmem_alloc(absz, KM_SLEEP);
	xdrmem_create(&xdrs_a, abuf, absz, XDR_ENCODE);
	if (!xdr_varg(&xdrs_a, &varg)) {
		door_ki_rele(dh);
		goto fail;
	}
	XDR_DESTROY(&xdrs_a);

	/*
	 * The result (nfsauth_res_t) is always two int's, so we don't
	 * have to dynamically size (or allocate) the results buffer.
	 * Now that we've got what we need, we prep the door arguments
	 * and place the call.
	 */
	da.data_ptr = (char *)abuf;
	da.data_size = absz;
	da.desc_ptr = NULL;
	da.desc_num = 0;
	da.rbuf = (char *)rbuf;
	da.rsize = rbsz;

	switch (door_ki_upcall_limited(dh, &da, NULL, SIZE_MAX, 0)) {
		case 0:				/* Success */
			if (da.data_ptr != da.rbuf && da.data_size == 0) {
				/*
				 * The door_return that contained the data
				 * failed ! We're here because of the 2nd
				 * door_return (w/o data) such that we can
				 * get control of the thread (and exit
				 * gracefully).
				 */
				DTRACE_PROBE1(nfsserv__func__nfsauth__door__nil,
				    door_arg_t *, &da);
				door_ki_rele(dh);
				goto fail;

			} else if (rbuf != da.rbuf) {