/* * This the routine has the same definition as clnt_create_vers(), * except it takes an additional timeout parameter - a pointer to * a timeval structure. A NULL value for the pointer indicates * that the default timeout value should be used. */ CLIENT * clnt_create_vers_timed(const char *hostname, const rpcprog_t prog, rpcvers_t *vers_out, const rpcvers_t vers_low_in, const rpcvers_t vers_high_in, const char *nettype, const struct timeval *tp) { CLIENT *clnt; struct timeval to; enum clnt_stat rpc_stat; struct rpc_err rpcerr; rpcvers_t vers_high = vers_high_in; rpcvers_t vers_low = vers_low_in; clnt = clnt_create_timed(hostname, prog, vers_high, nettype, tp); if (clnt == NULL) { return (NULL); } to.tv_sec = 10; to.tv_usec = 0; rpc_stat = clnt_call(clnt, NULLPROC, (xdrproc_t)xdr_void, (char *)NULL, (xdrproc_t)xdr_void, (char *)NULL, to); if (rpc_stat == RPC_SUCCESS) { *vers_out = vers_high; return (clnt); } while (rpc_stat == RPC_PROGVERSMISMATCH && vers_high > vers_low) { unsigned int minvers, maxvers; clnt_geterr(clnt, &rpcerr); minvers = rpcerr.re_vers.low; maxvers = rpcerr.re_vers.high; if (maxvers < vers_high) vers_high = maxvers; else vers_high--; if (minvers > vers_low) vers_low = minvers; if (vers_low > vers_high) { goto error; } CLNT_CONTROL(clnt, CLSET_VERS, (char *)&vers_high); rpc_stat = clnt_call(clnt, NULLPROC, (xdrproc_t)xdr_void, (char *)NULL, (xdrproc_t)xdr_void, (char *)NULL, to); if (rpc_stat == RPC_SUCCESS) { *vers_out = vers_high; return (clnt); } } clnt_geterr(clnt, &rpcerr); error: rpc_createerr.cf_stat = rpc_stat; rpc_createerr.cf_error = rpcerr; clnt_destroy(clnt); return (NULL); }
/* * This has the same definition as clnt_tp_create(), except it * takes an additional parameter - a pointer to a timeval structure. * A NULL value for the timeout pointer indicates that the default * value for the timeout should be used. */ CLIENT * clnt_tp_create_timed(const char *hostname, rpcprog_t prog, rpcvers_t vers, const struct netconfig *nconf, const struct timeval *tp) { struct netbuf *svcaddr; /* servers address */ CLIENT *cl = NULL; /* client handle */ if (nconf == NULL) { rpc_createerr.cf_stat = RPC_UNKNOWNPROTO; return (NULL); } /* * Get the address of the server */ if ((svcaddr = __rpcb_findaddr_timed(prog, vers, (struct netconfig *)nconf, (char *)hostname, &cl, (struct timeval *)tp)) == NULL) { /* appropriate error number is set by rpcbind libraries */ return (NULL); } if (cl == NULL) { cl = clnt_tli_create(RPC_ANYFD, nconf, svcaddr, prog, vers, 0, 0); } else { /* Reuse the CLIENT handle and change the appropriate fields */ if (CLNT_CONTROL(cl, CLSET_SVC_ADDR, (void *)svcaddr) == TRUE) { if (cl->cl_netid == NULL) cl->cl_netid = strdup(nconf->nc_netid); if (cl->cl_tp == NULL) cl->cl_tp = strdup(nconf->nc_device); (void) CLNT_CONTROL(cl, CLSET_PROG, (void *)&prog); (void) CLNT_CONTROL(cl, CLSET_VERS, (void *)&vers); } else { CLNT_DESTROY(cl); cl = clnt_tli_create(RPC_ANYFD, nconf, svcaddr, prog, vers, 0, 0); } } free(svcaddr->buf); free(svcaddr); return (cl); }
CLIENT * clntudp_bufcreate(struct sockaddr_in *raddr, rpcprog_t prog, rpcvers_t vers, struct timeval wait, int *sockp, uint_t sendsz, uint_t recvsz) { CLIENT *cl; cl = clnt_com_create(raddr, prog, vers, sockp, sendsz, recvsz, "udp"); if (cl == NULL) return (NULL); (void) CLNT_CONTROL(cl, CLSET_RETRY_TIMEOUT, (char *)&wait); return (cl); }
CLIENT * clntudp_bufcreate(struct sockaddr_in *raddr, u_long prog, u_long vers, struct timeval wait, int *sockp, u_int sendsz, u_int recvsz) { CLIENT *cl; cl = clnt_com_create(raddr, (rpcprog_t)prog, (rpcvers_t)vers, sockp, sendsz, recvsz, "udp"); if (cl == NULL) { return (NULL); } (void) CLNT_CONTROL(cl, CLSET_RETRY_TIMEOUT, &wait); return (cl); }
static enum clnt_stat callaurpc(char *host, int prognum, int versnum, int procnum, xdrproc_t inproc, char *in, xdrproc_t outproc, char *out) { enum clnt_stat clnt_stat; struct timeval timeout, tottimeout; CLIENT *client = NULL; client = clnt_create(host, prognum, versnum, "udp"); if (client == NULL) return ((int)rpc_createerr.cf_stat); timeout.tv_usec = 0; timeout.tv_sec = 6; CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, (char *)(void *)&timeout); client->cl_auth = authunix_create_default(); tottimeout.tv_sec = 25; tottimeout.tv_usec = 0; clnt_stat = clnt_call(client, procnum, inproc, in, outproc, out, tottimeout); return (clnt_stat); }
/* * Return a client handle, either from a the small * rfs4_client_t cache or one that we just created. */ static CLIENT * rfs4_cb_getch(rfs4_cbinfo_t *cbp) { CLIENT *cbch = NULL; uint32_t zilch = 0; mutex_enter(cbp->cb_lock); if (cbp->cb_chc_free) { cbp->cb_chc_free--; cbch = cbp->cb_chc[ cbp->cb_chc_free ]; mutex_exit(cbp->cb_lock); (void) CLNT_CONTROL(cbch, CLSET_XID, (char *)&zilch); return (cbch); } mutex_exit(cbp->cb_lock); /* none free so make it now */ cbch = rfs4_cbch_init(cbp); return (cbch); }
/* * Initialize sockets and congestion for a new NFS connection. * We do not free the sockaddr if error. */ int nfs_connect(struct nfsmount *nmp) { int rcvreserve, sndreserve; int pktscale; struct sockaddr *saddr; struct ucred *origcred; struct thread *td = curthread; CLIENT *client; struct netconfig *nconf; rpcvers_t vers; int one = 1, retries; struct timeval timo; /* * We need to establish the socket using the credentials of * the mountpoint. Some parts of this process (such as * sobind() and soconnect()) will use the curent thread's * credential instead of the socket credential. To work * around this, temporarily change the current thread's * credential to that of the mountpoint. * * XXX: It would be better to explicitly pass the correct * credential to sobind() and soconnect(). */ origcred = td->td_ucred; td->td_ucred = nmp->nm_mountp->mnt_cred; saddr = nmp->nm_nam; vers = NFS_VER2; if (nmp->nm_flag & NFSMNT_NFSV3) vers = NFS_VER3; else if (nmp->nm_flag & NFSMNT_NFSV4) vers = NFS_VER4; if (saddr->sa_family == AF_INET) if (nmp->nm_sotype == SOCK_DGRAM) nconf = getnetconfigent("udp"); else nconf = getnetconfigent("tcp"); else if (nmp->nm_sotype == SOCK_DGRAM) nconf = getnetconfigent("udp6"); else nconf = getnetconfigent("tcp6"); /* * Get buffer reservation size from sysctl, but impose reasonable * limits. */ pktscale = nfs_bufpackets; if (pktscale < 2) pktscale = 2; if (pktscale > 64) pktscale = 64; mtx_lock(&nmp->nm_mtx); if (nmp->nm_sotype == SOCK_DGRAM) { sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + NFS_MAXPKTHDR) * pktscale; } else if (nmp->nm_sotype == SOCK_SEQPACKET) { sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + NFS_MAXPKTHDR) * pktscale; } else { if (nmp->nm_sotype != SOCK_STREAM) panic("nfscon sotype"); sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_int32_t)) * pktscale; rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_int32_t)) * pktscale; } mtx_unlock(&nmp->nm_mtx); client = clnt_reconnect_create(nconf, saddr, NFS_PROG, vers, sndreserve, rcvreserve); CLNT_CONTROL(client, CLSET_WAITCHAN, "nfsreq"); if (nmp->nm_flag & NFSMNT_INT) CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one); if (nmp->nm_flag & NFSMNT_RESVPORT) CLNT_CONTROL(client, CLSET_PRIVPORT, &one); if ((nmp->nm_flag & NFSMNT_SOFT) != 0) { if (nmp->nm_sotype == SOCK_DGRAM) /* * For UDP, the large timeout for a reconnect will * be set to "nm_retry * nm_timeo / 2", so we only * want to do 2 reconnect timeout retries. */ retries = 2; else retries = nmp->nm_retry; } else retries = INT_MAX; CLNT_CONTROL(client, CLSET_RETRIES, &retries); /* * For UDP, there are 2 timeouts: * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer * that does a retransmit of an RPC request using the same socket * and xid. This is what you normally want to do, since NFS * servers depend on "same xid" for their Duplicate Request Cache. * - timeout specified in CLNT_CALL_MBUF(), which specifies when * retransmits on the same socket should fail and a fresh socket * created. Each of these timeouts counts as one CLSET_RETRIES, * as set above. * Set the initial retransmit timeout for UDP. This timeout doesn't * exist for TCP and the following call just fails, which is ok. */ timo.tv_sec = nmp->nm_timeo / NFS_HZ; timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ; CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo); mtx_lock(&nmp->nm_mtx); if (nmp->nm_client) { /* * Someone else already connected. */ CLNT_RELEASE(client); } else nmp->nm_client = client; /* * Protocols that do not require connections may be optionally left * unconnected for servers that reply from a port other than NFS_PORT. */ if (!(nmp->nm_flag & NFSMNT_NOCONN)) { mtx_unlock(&nmp->nm_mtx); CLNT_CONTROL(client, CLSET_CONNECT, &one); } else mtx_unlock(&nmp->nm_mtx); /* Restore current thread's credentials. */ td->td_ucred = origcred; mtx_lock(&nmp->nm_mtx); /* Initialize other non-zero congestion variables. */ nfs_init_rtt(nmp); mtx_unlock(&nmp->nm_mtx); return (0); }
/* * This is the simplified interface to the client rpc layer. * The client handle is not destroyed here and is reused for * the future calls to same prog, vers, host and nettype combination. * * The total time available is 25 seconds. */ enum clnt_stat rpc_call(const char *host, /* host name */ rpcprog_t prognum, /* program number */ rpcvers_t versnum, /* version number */ rpcproc_t procnum, /* procedure number */ xdrproc_t inproc, const char *in, xdrproc_t outproc, /* in/out XDR procedures */ char *out, /* recv/send data */ const char *nettype) /* nettype */ { struct rpc_call_private *rcp = NULL; enum clnt_stat clnt_stat; struct timeval timeout, tottimeout; static thread_key_t rpc_call_key; int main_thread = 1; if ((main_thread = thr_main())) { rcp = rpc_call_private_main; } else { if (rpc_call_key == 0) { mutex_lock(&tsd_lock); if (rpc_call_key == 0) thr_keycreate(&rpc_call_key, rpc_call_destroy); mutex_unlock(&tsd_lock); } rcp = (struct rpc_call_private *)thr_getspecific(rpc_call_key); } if (rcp == NULL) { rcp = malloc(sizeof (*rcp)); if (rcp == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; return (rpc_createerr.cf_stat); } if (main_thread) rpc_call_private_main = rcp; else thr_setspecific(rpc_call_key, (void *) rcp); rcp->valid = 0; rcp->client = NULL; } if ((nettype == NULL) || (nettype[0] == 0)) nettype = "netpath"; if (!(rcp->valid && rcp->pid == getpid() && (rcp->prognum == prognum) && (rcp->versnum == versnum) && (!strcmp(rcp->host, host)) && (!strcmp(rcp->nettype, nettype)))) { int fd; rcp->valid = 0; if (rcp->client) CLNT_DESTROY(rcp->client); /* * Using the first successful transport for that type */ rcp->client = clnt_create(host, prognum, versnum, nettype); rcp->pid = getpid(); if (rcp->client == NULL) { return (rpc_createerr.cf_stat); } /* * Set time outs for connectionless case. Do it * unconditionally. Faster than doing a t_getinfo() * and then doing the right thing. */ timeout.tv_usec = 0; timeout.tv_sec = 5; CLNT_CONTROL(rcp->client, CLSET_RETRY_TIMEOUT, (char *)(void *)&timeout); if (CLNT_CONTROL(rcp->client, CLGET_FD, (char *)(void *)&fd)) _fcntl(fd, F_SETFD, 1); /* make it "close on exec" */ rcp->prognum = prognum; rcp->versnum = versnum; if ((strlen(host) < (size_t)MAXHOSTNAMELEN) && (strlen(nettype) < (size_t)NETIDLEN)) { strcpy(rcp->host, host); strcpy(rcp->nettype, nettype); rcp->valid = 1; } else { rcp->valid = 0; } } /* else reuse old client */ tottimeout.tv_sec = 25; tottimeout.tv_usec = 0; /*LINTED const castaway*/ clnt_stat = CLNT_CALL(rcp->client, procnum, inproc, (char *) in, outproc, out, tottimeout); /* * if call failed, empty cache */ if (clnt_stat != RPC_SUCCESS) rcp->valid = 0; return (clnt_stat); }
/* * Generic client creation: returns client handle. * Default options are set, which the user can * change using the rpc equivalent of _ioctl()'s : clnt_control(). * If fd is RPC_ANYFD, it will be opened using nconf. * It will be bound if not so. * If sizes are 0; appropriate defaults will be chosen. */ CLIENT * clnt_tli_create(int fd, const struct netconfig *nconf, struct netbuf *svcaddr, rpcprog_t prog, rpcvers_t vers, uint sendsz, uint recvsz) { CLIENT *cl; /* client handle */ bool_t madefd = FALSE; /* whether fd opened here */ long servtype; int one = 1; struct __rpc_sockinfo si; extern int __rpc_minfd; if (fd == RPC_ANYFD) { if (nconf == NULL) { rpc_createerr.cf_stat = RPC_UNKNOWNPROTO; return (NULL); } fd = __rpc_nconf2fd(nconf); if (fd == -1) goto err; if (fd < __rpc_minfd) fd = __rpc_raise_fd(fd); madefd = TRUE; servtype = nconf->nc_semantics; if (!__rpc_fd2sockinfo(fd, &si)) goto err; bindresvport(fd, NULL); } else { if (!__rpc_fd2sockinfo(fd, &si)) goto err; servtype = __rpc_socktype2seman(si.si_socktype); if (servtype == -1) { rpc_createerr.cf_stat = RPC_UNKNOWNPROTO; return (NULL); } } if (si.si_af != ((struct sockaddr *)svcaddr->buf)->sa_family) { rpc_createerr.cf_stat = RPC_UNKNOWNHOST; /* XXX */ goto err1; } switch (servtype) { case NC_TPI_COTS: cl = clnt_vc_create(fd, svcaddr, prog, vers, sendsz, recvsz); break; case NC_TPI_COTS_ORD: if (nconf && ((strcmp(nconf->nc_protofmly, "inet") == 0) || (strcmp(nconf->nc_protofmly, "inet6") == 0))) { setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof (one)); } cl = clnt_vc_create(fd, svcaddr, prog, vers, sendsz, recvsz); break; case NC_TPI_CLTS: cl = clnt_dg_create(fd, svcaddr, prog, vers, sendsz, recvsz); break; default: goto err; } if (cl == NULL) goto err1; /* borrow errors from clnt_dg/vc creates */ if (nconf) { cl->cl_netid = strdup(nconf->nc_netid); cl->cl_tp = strdup(nconf->nc_device); } else { cl->cl_netid = ""; cl->cl_tp = ""; } if (madefd) { (void) CLNT_CONTROL(cl, CLSET_FD_CLOSE, NULL); /* (void) CLNT_CONTROL(cl, CLSET_POP_TIMOD, NULL); */ }; return (cl); err: rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; err1: if (madefd) (void)close(fd); return (NULL); }
/* * Given the information in the callback info struct, create a client * handle that can be used by the server for its callback path. */ static CLIENT * rfs4_cbch_init(rfs4_cbinfo_t *cbp) { struct knetconfig knc; vnode_t *vp; struct sockaddr_in addr4; struct sockaddr_in6 addr6; void *addr, *taddr; in_port_t *pp; int af; char *devnam; struct netbuf nb; int size; CLIENT *ch = NULL; int useresvport = 0; mutex_enter(cbp->cb_lock); if (cbp->cb_callback.cb_location.r_netid == NULL || cbp->cb_callback.cb_location.r_addr == NULL) { goto cb_init_out; } if (strcmp(cbp->cb_callback.cb_location.r_netid, "tcp") == 0) { knc.knc_semantics = NC_TPI_COTS; knc.knc_protofmly = "inet"; knc.knc_proto = "tcp"; devnam = "/dev/tcp"; af = AF_INET; } else if (strcmp(cbp->cb_callback.cb_location.r_netid, "udp") == 0) { knc.knc_semantics = NC_TPI_CLTS; knc.knc_protofmly = "inet"; knc.knc_proto = "udp"; devnam = "/dev/udp"; af = AF_INET; } else if (strcmp(cbp->cb_callback.cb_location.r_netid, "tcp6") == 0) { knc.knc_semantics = NC_TPI_COTS; knc.knc_protofmly = "inet6"; knc.knc_proto = "tcp"; devnam = "/dev/tcp6"; af = AF_INET6; } else if (strcmp(cbp->cb_callback.cb_location.r_netid, "udp6") == 0) { knc.knc_semantics = NC_TPI_CLTS; knc.knc_protofmly = "inet6"; knc.knc_proto = "udp"; devnam = "/dev/udp6"; af = AF_INET6; } else { goto cb_init_out; } if (lookupname(devnam, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp) != 0) { goto cb_init_out; } if (vp->v_type != VCHR) { VN_RELE(vp); goto cb_init_out; } knc.knc_rdev = vp->v_rdev; VN_RELE(vp); if (af == AF_INET) { size = sizeof (addr4); bzero(&addr4, size); addr4.sin_family = (sa_family_t)af; addr = &addr4.sin_addr; pp = &addr4.sin_port; taddr = &addr4; } else /* AF_INET6 */ { size = sizeof (addr6); bzero(&addr6, size); addr6.sin6_family = (sa_family_t)af; addr = &addr6.sin6_addr; pp = &addr6.sin6_port; taddr = &addr6; } if (uaddr2sockaddr(af, cbp->cb_callback.cb_location.r_addr, addr, pp)) { goto cb_init_out; } nb.maxlen = nb.len = size; nb.buf = (char *)taddr; if (clnt_tli_kcreate(&knc, &nb, cbp->cb_callback.cb_program, NFS_CB, 0, 0, curthread->t_cred, &ch)) { ch = NULL; } /* turn off reserved port usage */ (void) CLNT_CONTROL(ch, CLSET_BINDRESVPORT, (char *)&useresvport); cb_init_out: mutex_exit(cbp->cb_lock); return (ch); }
/* * A common clnt create routine */ static CLIENT * clnt_com_create(struct sockaddr_in *raddr, rpcprog_t prog, rpcvers_t vers, int *sockp, uint_t sendsz, uint_t recvsz, char *tp) { CLIENT *cl; int madefd = FALSE; int fd = *sockp; struct t_info tinfo; struct netconfig *nconf; int port; struct netbuf bindaddr; bool_t locked = TRUE; (void) mutex_lock(&rpcsoc_lock); if ((nconf = __rpc_getconfip(tp)) == NULL) { rpc_createerr.cf_stat = RPC_UNKNOWNPROTO; (void) mutex_unlock(&rpcsoc_lock); return (NULL); } if (fd == RPC_ANYSOCK) { fd = t_open(nconf->nc_device, O_RDWR, &tinfo); if (fd == -1) goto syserror; RPC_RAISEFD(fd); madefd = TRUE; } else { if (t_getinfo(fd, &tinfo) == -1) goto syserror; } if (raddr->sin_port == 0) { uint_t proto; ushort_t sport; /* pmap_getport is recursive */ (void) mutex_unlock(&rpcsoc_lock); proto = strcmp(tp, "udp") == 0 ? IPPROTO_UDP : IPPROTO_TCP; sport = pmap_getport(raddr, prog, vers, proto); if (sport == 0) { locked = FALSE; goto err; } raddr->sin_port = htons(sport); /* pmap_getport is recursive */ (void) mutex_lock(&rpcsoc_lock); } /* Transform sockaddr_in to netbuf */ bindaddr.maxlen = bindaddr.len = __rpc_get_a_size(tinfo.addr); bindaddr.buf = (char *)raddr; (void) __rpc_bindresvport(fd, NULL, &port, 0); cl = clnt_tli_create(fd, nconf, &bindaddr, prog, vers, sendsz, recvsz); if (cl) { if (madefd == TRUE) { /* * The fd should be closed while destroying the handle. */ (void) CLNT_CONTROL(cl, CLSET_FD_CLOSE, NULL); *sockp = fd; } (void) freenetconfigent(nconf); (void) mutex_unlock(&rpcsoc_lock); return (cl); } goto err; syserror: rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; rpc_createerr.cf_error.re_terrno = t_errno; err: if (madefd == TRUE) (void) t_close(fd); (void) freenetconfigent(nconf); if (locked == TRUE) (void) mutex_unlock(&rpcsoc_lock); return (NULL); }
/** * FSAL_proxy_setclientid_force: * Client ID negociation * * \param p_context (input): * Authentication context for the operation (user,...). * * \return Major error codes : * - ERR_FSAL_NO_ERROR (no error) * - ERR_FSAL_FAULT (a NULL pointer was passed as mandatory argument) * - Other error codes can be returned : * ERR_FSAL_ACCESS, ERR_FSAL_IO, ... */ static fsal_status_t FSAL_proxy_setclientid_force(proxyfsal_op_context_t * p_context) { int rc; COMPOUND4args argnfs4; COMPOUND4res resnfs4; #define FSAL_CLIENTID_NB_OP_ALLOC 1 nfs_argop4 argoparray[FSAL_CLIENTID_NB_OP_ALLOC]; nfs_resop4 resoparray[FSAL_CLIENTID_NB_OP_ALLOC]; nfs_client_id4 nfsclientid; cb_client4 cbproxy; char clientid_name[MAXNAMLEN+1]; char cbaddr[MAXNAMLEN+1]; char cbnetid[MAXNAMLEN+1]; struct timeval timeout = TIMEOUTRPC; int fd; struct sockaddr_in sin; socklen_t l = sizeof(sin); LogEvent( COMPONENT_FSAL, "Negotiating a new ClientId with the remote server" ) ; /* sanity checks. */ if(!p_context) ReturnCode(ERR_FSAL_FAULT, 0); if(!CLNT_CONTROL(p_context->rpc_client, CLGET_FD, &fd)) ReturnCode(ERR_FSAL_FAULT, EBADF); if(getsockname(fd, &sin, &l)) ReturnCode(ERR_FSAL_FAULT, errno); /* Client id negociation is to be done only one time for the whole FSAL */ P(fsal_clientid_mutex_renew); /* Setup results structures */ argnfs4.argarray.argarray_val = argoparray; resnfs4.resarray.resarray_val = resoparray; argnfs4.minorversion = 0; argnfs4.tag.utf8string_val = NULL; argnfs4.tag.utf8string_len = 0; argnfs4.argarray.argarray_len = 0; snprintf(clientid_name, MAXNAMLEN, "%s(%d) - GANESHA NFSv4 Proxy", inet_ntop(AF_INET, &sin.sin_addr, cbaddr, sizeof(cbaddr)), getpid()); nfsclientid.id.id_len = strlen(clientid_name); nfsclientid.id.id_val = clientid_name; snprintf(nfsclientid.verifier, NFS4_VERIFIER_SIZE, "%x", (int)ServerBootTime); cbproxy.cb_program = 0; strncpy(cbnetid, "tcp", MAXNAMLEN); strncpy(cbaddr, "127.0.0.1", MAXNAMLEN); cbproxy.cb_location.r_netid = cbnetid; cbproxy.cb_location.r_addr = cbaddr; COMPOUNDV4_ARG_ADD_OP_SETCLIENTID(argnfs4, nfsclientid, cbproxy); TakeTokenFSCall(); p_context->credential.user = 0; p_context->credential.group = 0; p_context->credential.nbgroups = 0; /* Call the NFSv4 function */ rc = COMPOUNDV4_EXECUTE_SIMPLE(p_context, argnfs4, resnfs4); if(rc != RPC_SUCCESS) { ReleaseTokenFSCall(); V(fsal_clientid_mutex_renew); ReturnCode(ERR_FSAL_IO, rc); } ReleaseTokenFSCall(); if(resnfs4.status != NFS4_OK) { V(fsal_clientid_mutex_renew); return fsal_internal_proxy_error_convert(resnfs4.status, INDEX_FSAL_InitClientContext); } /* Step 2: Confirm the client id */ argnfs4.minorversion = 0; argnfs4.tag.utf8string_val = NULL; argnfs4.tag.utf8string_len = 0; argnfs4.argarray.argarray_len = 0; argnfs4.argarray.argarray_val[0].argop = NFS4_OP_SETCLIENTID_CONFIRM; argnfs4.argarray.argarray_val[0].nfs_argop4_u.opsetclientid_confirm.clientid = resnfs4.resarray.resarray_val[0].nfs_resop4_u.opsetclientid.SETCLIENTID4res_u. resok4.clientid; memcpy((char *)argnfs4.argarray.argarray_val[0].nfs_argop4_u.opsetclientid_confirm. setclientid_confirm, (char *)resnfs4.resarray.resarray_val[0].nfs_resop4_u.opsetclientid. SETCLIENTID4res_u.resok4.setclientid_confirm, NFS4_VERIFIER_SIZE); argnfs4.argarray.argarray_len = 1; /* Call the NFSv4 function */ TakeTokenFSCall(); rc = COMPOUNDV4_EXECUTE_SIMPLE(p_context, argnfs4, resnfs4); if(rc != RPC_SUCCESS) { ReleaseTokenFSCall(); V(fsal_clientid_mutex_renew); ReturnCode(ERR_FSAL_IO, rc); } ReleaseTokenFSCall(); if(resnfs4.status != NFS4_OK) { V(fsal_clientid_mutex_renew); return fsal_internal_proxy_error_convert(resnfs4.status, INDEX_FSAL_InitClientContext); } /* Keep the confirmed client id */ fsal_clientid = argnfs4.argarray.argarray_val[0].nfs_argop4_u.opsetclientid_confirm.clientid; clientid_renewed = time( NULL ) ; V(fsal_clientid_mutex_renew); p_context->clientid = fsal_clientid; p_context->last_lease_renewal = 0; /* Needs to be renewed */ ReturnCode(ERR_FSAL_NO_ERROR, 0); } /* FSAL_proxy_setclientid_force */
/* * CLNT_CONTROL() on the client returned by clnt_reconnect_create() must * always be called before CLNT_CALL_MBUF() by a single thread only. */ static bool_t clnt_reconnect_control(CLIENT *cl, u_int request, void *info) { struct rc_data *rc = (struct rc_data *)cl->cl_private; SVCXPRT *xprt; if (info == NULL) { return (FALSE); } switch (request) { case CLSET_TIMEOUT: rc->rc_timeout = *(struct timeval *)info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_TIMEOUT: *(struct timeval *)info = rc->rc_timeout; break; case CLSET_RETRY_TIMEOUT: rc->rc_retry = *(struct timeval *)info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_RETRY_TIMEOUT: *(struct timeval *)info = rc->rc_retry; break; case CLGET_VERS: *(uint32_t *)info = rc->rc_vers; break; case CLSET_VERS: rc->rc_vers = *(uint32_t *) info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, CLSET_VERS, info); break; case CLGET_PROG: *(uint32_t *)info = rc->rc_prog; break; case CLSET_PROG: rc->rc_prog = *(uint32_t *) info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLSET_WAITCHAN: rc->rc_waitchan = (char *)info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_WAITCHAN: *(const char **) info = rc->rc_waitchan; break; case CLSET_INTERRUPTIBLE: rc->rc_intr = *(int *) info; if (rc->rc_client) CLNT_CONTROL(rc->rc_client, request, info); break; case CLGET_INTERRUPTIBLE: *(int *) info = rc->rc_intr; break; case CLSET_RETRIES: rc->rc_retries = *(int *) info; break; case CLGET_RETRIES: *(int *) info = rc->rc_retries; break; case CLSET_PRIVPORT: rc->rc_privport = *(int *) info; break; case CLGET_PRIVPORT: *(int *) info = rc->rc_privport; break; case CLSET_BACKCHANNEL: xprt = (SVCXPRT *)info; SVC_ACQUIRE(xprt); xprt_register(xprt); rc->rc_backchannel = info; break; default: return (FALSE); } return (TRUE); }
static enum clnt_stat clnt_reconnect_connect(CLIENT *cl) { struct thread *td = curthread; struct rc_data *rc = (struct rc_data *)cl->cl_private; struct socket *so; enum clnt_stat stat; int error; int one = 1; struct ucred *oldcred; CLIENT *newclient = NULL; mtx_lock(&rc->rc_lock); while (rc->rc_connecting) { error = msleep(rc, &rc->rc_lock, rc->rc_intr ? PCATCH : 0, "rpcrecon", 0); if (error) { mtx_unlock(&rc->rc_lock); return (RPC_INTR); } } if (rc->rc_closed) { mtx_unlock(&rc->rc_lock); return (RPC_CANTSEND); } if (rc->rc_client) { mtx_unlock(&rc->rc_lock); return (RPC_SUCCESS); } /* * My turn to attempt a connect. The rc_connecting variable * serializes the following code sequence, so it is guaranteed * that rc_client will still be NULL after it is re-locked below, * since that is the only place it is set non-NULL. */ rc->rc_connecting = TRUE; mtx_unlock(&rc->rc_lock); oldcred = td->td_ucred; td->td_ucred = rc->rc_ucred; so = __rpc_nconf2socket(rc->rc_nconf); if (!so) { stat = rpc_createerr.cf_stat = RPC_TLIERROR; rpc_createerr.cf_error.re_errno = 0; td->td_ucred = oldcred; goto out; } if (rc->rc_privport) bindresvport(so, NULL); if (rc->rc_nconf->nc_semantics == NC_TPI_CLTS) newclient = clnt_dg_create(so, (struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers, rc->rc_sendsz, rc->rc_recvsz); else newclient = clnt_vc_create(so, (struct sockaddr *) &rc->rc_addr, rc->rc_prog, rc->rc_vers, rc->rc_sendsz, rc->rc_recvsz, rc->rc_intr); td->td_ucred = oldcred; if (!newclient) { soclose(so); rc->rc_err = rpc_createerr.cf_error; stat = rpc_createerr.cf_stat; goto out; } CLNT_CONTROL(newclient, CLSET_FD_CLOSE, 0); CLNT_CONTROL(newclient, CLSET_CONNECT, &one); CLNT_CONTROL(newclient, CLSET_TIMEOUT, &rc->rc_timeout); CLNT_CONTROL(newclient, CLSET_RETRY_TIMEOUT, &rc->rc_retry); CLNT_CONTROL(newclient, CLSET_WAITCHAN, rc->rc_waitchan); CLNT_CONTROL(newclient, CLSET_INTERRUPTIBLE, &rc->rc_intr); if (rc->rc_backchannel != NULL) CLNT_CONTROL(newclient, CLSET_BACKCHANNEL, rc->rc_backchannel); stat = RPC_SUCCESS; out: mtx_lock(&rc->rc_lock); KASSERT(rc->rc_client == NULL, ("rc_client not null")); if (!rc->rc_closed) { rc->rc_client = newclient; newclient = NULL; } rc->rc_connecting = FALSE; wakeup(rc); mtx_unlock(&rc->rc_lock); if (newclient) { /* * It has been closed, so discard the new client. * nb: clnt_[dg|vc]_close()/clnt_[dg|vc]_destroy() cannot * be called with the rc_lock mutex held, since they may * msleep() while holding a different mutex. */ CLNT_CLOSE(newclient); CLNT_RELEASE(newclient); } return (stat); }
/* get the best possible NFS version for a host and transport */ static CLIENT * amu_clnt_create_best_vers(const char *hostname, u_long program, u_long *out_version, u_long low_version, u_long high_version, const char *nettype) { CLIENT *clnt; enum clnt_stat rpc_stat; struct rpc_err rpcerr; struct timeval tv; u_long lo, hi; /* 3 seconds is more than enough for a LAN */ tv.tv_sec = 3; tv.tv_usec = 0; #ifdef HAVE_CLNT_CREATE_TIMED clnt = clnt_create_timed(hostname, program, high_version, nettype, &tv); if (!clnt) { plog(XLOG_INFO, "failed to create RPC client to \"%s\" after %d seconds", hostname, (int) tv.tv_sec); return NULL; } #else /* not HAVE_CLNT_CREATE_TIMED */ /* Solaris 2.3 and earlier didn't have clnt_create_timed() */ clnt = clnt_create(hostname, program, high_version, nettype); if (!clnt) { plog(XLOG_INFO, "failed to create RPC client to \"%s\"", hostname); return NULL; } #endif /* not HAVE_CLNT_CREATE_TIMED */ rpc_stat = clnt_call(clnt, NULLPROC, (XDRPROC_T_TYPE) xdr_void, NULL, (XDRPROC_T_TYPE) xdr_void, NULL, tv); if (rpc_stat == RPC_SUCCESS) { *out_version = high_version; return clnt; } while (low_version < high_version) { if (rpc_stat != RPC_PROGVERSMISMATCH) break; clnt_geterr(clnt, &rpcerr); lo = rpcerr.re_vers.low; hi = rpcerr.re_vers.high; if (hi < high_version) high_version = hi; else high_version--; if (lo > low_version) low_version = lo; if (low_version > high_version) goto out; CLNT_CONTROL(clnt, CLSET_VERS, (char *)&high_version); rpc_stat = clnt_call(clnt, NULLPROC, (XDRPROC_T_TYPE) xdr_void, NULL, (XDRPROC_T_TYPE) xdr_void, NULL, tv); if (rpc_stat == RPC_SUCCESS) { *out_version = high_version; return clnt; } } clnt_geterr(clnt, &rpcerr); out: rpc_createerr.cf_stat = rpc_stat; rpc_createerr.cf_error = rpcerr; clnt_destroy(clnt); return NULL; }
/* * A common clnt create routine */ static CLIENT * clnt_com_create(struct sockaddr_in *raddr, rpcprog_t prog, rpcvers_t vers, int *sockp, u_int sendsz, u_int recvsz, const char *tp) { CLIENT *cl; int madefd = FALSE; int fd; struct netconfig *nconf; struct netbuf bindaddr; _DIAGASSERT(raddr != NULL); _DIAGASSERT(sockp != NULL); _DIAGASSERT(tp != NULL); fd = *sockp; mutex_lock(&rpcsoc_lock); if ((nconf = __rpc_getconfip(tp)) == NULL) { rpc_createerr.cf_stat = RPC_UNKNOWNPROTO; mutex_unlock(&rpcsoc_lock); return (NULL); } if (fd == RPC_ANYSOCK) { fd = __rpc_nconf2fd(nconf); if (fd == -1) goto syserror; madefd = TRUE; } if (raddr->sin_port == 0) { u_int proto; u_short sport; mutex_unlock(&rpcsoc_lock); /* pmap_getport is recursive */ proto = strcmp(tp, "udp") == 0 ? IPPROTO_UDP : IPPROTO_TCP; sport = pmap_getport(raddr, (u_long)prog, (u_long)vers, proto); if (sport == 0) { goto err; } raddr->sin_port = htons(sport); mutex_lock(&rpcsoc_lock); /* pmap_getport is recursive */ } /* Transform sockaddr_in to netbuf */ bindaddr.maxlen = bindaddr.len = sizeof (struct sockaddr_in); bindaddr.buf = raddr; (void)bindresvport(fd, NULL); cl = clnt_tli_create(fd, nconf, &bindaddr, prog, vers, sendsz, recvsz); if (cl) { if (madefd == TRUE) { /* * The fd should be closed while destroying the handle. */ (void) CLNT_CONTROL(cl, CLSET_FD_CLOSE, NULL); *sockp = fd; } (void) freenetconfigent(nconf); mutex_unlock(&rpcsoc_lock); return (cl); } goto err; syserror: rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; err: if (madefd == TRUE) (void) close(fd); (void) freenetconfigent(nconf); mutex_unlock(&rpcsoc_lock); return (NULL); }
/* * Initialize sockets and congestion for a new NFS connection. * We do not free the sockaddr if error. */ int nfs_connect(struct nfsmount *nmp) { int rcvreserve, sndreserve; int pktscale; struct sockaddr *saddr; struct ucred *origcred; struct thread *td = curthread; CLIENT *client; struct netconfig *nconf; rpcvers_t vers; int one = 1, retries; /* * We need to establish the socket using the credentials of * the mountpoint. Some parts of this process (such as * sobind() and soconnect()) will use the curent thread's * credential instead of the socket credential. To work * around this, temporarily change the current thread's * credential to that of the mountpoint. * * XXX: It would be better to explicitly pass the correct * credential to sobind() and soconnect(). */ origcred = td->td_ucred; td->td_ucred = nmp->nm_mountp->mnt_cred; saddr = nmp->nm_nam; vers = NFS_VER2; if (nmp->nm_flag & NFSMNT_NFSV3) vers = NFS_VER3; else if (nmp->nm_flag & NFSMNT_NFSV4) vers = NFS_VER4; if (saddr->sa_family == AF_INET) if (nmp->nm_sotype == SOCK_DGRAM) nconf = getnetconfigent("udp"); else nconf = getnetconfigent("tcp"); else if (nmp->nm_sotype == SOCK_DGRAM) nconf = getnetconfigent("udp6"); else nconf = getnetconfigent("tcp6"); /* * Get buffer reservation size from sysctl, but impose reasonable * limits. */ pktscale = nfs_bufpackets; if (pktscale < 2) pktscale = 2; if (pktscale > 64) pktscale = 64; mtx_lock(&nmp->nm_mtx); if (nmp->nm_sotype == SOCK_DGRAM) { sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + NFS_MAXPKTHDR) * pktscale; } else if (nmp->nm_sotype == SOCK_SEQPACKET) { sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale; rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + NFS_MAXPKTHDR) * pktscale; } else { if (nmp->nm_sotype != SOCK_STREAM) panic("nfscon sotype"); sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_int32_t)) * pktscale; rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_int32_t)) * pktscale; } mtx_unlock(&nmp->nm_mtx); client = clnt_reconnect_create(nconf, saddr, NFS_PROG, vers, sndreserve, rcvreserve); CLNT_CONTROL(client, CLSET_WAITCHAN, "nfsreq"); if (nmp->nm_flag & NFSMNT_INT) CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one); if (nmp->nm_flag & NFSMNT_RESVPORT) CLNT_CONTROL(client, CLSET_PRIVPORT, &one); if (nmp->nm_flag & NFSMNT_SOFT) retries = nmp->nm_retry; else retries = INT_MAX; CLNT_CONTROL(client, CLSET_RETRIES, &retries); mtx_lock(&nmp->nm_mtx); if (nmp->nm_client) { /* * Someone else already connected. */ CLNT_RELEASE(client); } else nmp->nm_client = client; /* * Protocols that do not require connections may be optionally left * unconnected for servers that reply from a port other than NFS_PORT. */ if (!(nmp->nm_flag & NFSMNT_NOCONN)) { mtx_unlock(&nmp->nm_mtx); CLNT_CONTROL(client, CLSET_CONNECT, &one); } else mtx_unlock(&nmp->nm_mtx); /* Restore current thread's credentials. */ td->td_ucred = origcred; mtx_lock(&nmp->nm_mtx); /* Initialize other non-zero congestion variables. */ nfs_init_rtt(nmp); mtx_unlock(&nmp->nm_mtx); return (0); }
/* * This is the simplified interface to the client rpc layer. * The client handle is not destroyed here and is reused for * the future calls to same prog, vers, host and nettype combination. * * The total time available is 25 seconds. */ enum clnt_stat rpc_call(const char *host, const rpcprog_t prognum, const rpcvers_t versnum, const rpcproc_t procnum, const xdrproc_t inproc, const char *in, const xdrproc_t outproc, char *out, const char *netclass) { struct rpc_call_private *rcp; enum clnt_stat clnt_stat; struct timeval timeout, tottimeout; static pthread_key_t rpc_call_key = PTHREAD_ONCE_KEY_NP; char nettype_array[NETIDLEN]; char *nettype = &nettype_array[0]; if (netclass == NULL) nettype = NULL; else { size_t len = strlen(netclass); if (len >= sizeof (nettype_array)) { rpc_createerr.cf_stat = RPC_UNKNOWNPROTO; return (rpc_createerr.cf_stat); } (void) strcpy(nettype, netclass); } rcp = thr_get_storage(&rpc_call_key, sizeof (*rcp), rpc_call_destroy); if (rcp == NULL) { rpc_createerr.cf_stat = RPC_SYSTEMERROR; rpc_createerr.cf_error.re_errno = errno; return (rpc_createerr.cf_stat); } if ((nettype == NULL) || (nettype[0] == NULL)) nettype = "netpath"; if (!(rcp->valid && rcp->pid == getpid() && rcp->prognum == prognum && rcp->versnum == versnum && strcmp(rcp->host, host) == 0 && strcmp(rcp->nettype, nettype) == 0)) { int fd; rcp->valid = 0; if (rcp->client) CLNT_DESTROY(rcp->client); /* * Using the first successful transport for that type */ rcp->client = clnt_create(host, prognum, versnum, nettype); rcp->pid = getpid(); if (rcp->client == NULL) return (rpc_createerr.cf_stat); /* * Set time outs for connectionless case. Do it * unconditionally. Faster than doing a t_getinfo() * and then doing the right thing. */ timeout.tv_usec = 0; timeout.tv_sec = 5; (void) CLNT_CONTROL(rcp->client, CLSET_RETRY_TIMEOUT, (char *)&timeout); if (CLNT_CONTROL(rcp->client, CLGET_FD, (char *)&fd)) (void) fcntl(fd, F_SETFD, 1); /* close on exec */ rcp->prognum = prognum; rcp->versnum = versnum; if ((strlen(host) < (size_t)MAXHOSTNAMELEN) && (strlen(nettype) < (size_t)NETIDLEN)) { (void) strcpy(rcp->host, host); (void) strcpy(rcp->nettype, nettype); rcp->valid = 1; } else { rcp->valid = 0; } } /* else reuse old client */ tottimeout.tv_sec = 25; tottimeout.tv_usec = 0; clnt_stat = CLNT_CALL(rcp->client, procnum, inproc, (char *)in, outproc, out, tottimeout); /* * if call failed, empty cache */ if (clnt_stat != RPC_SUCCESS) rcp->valid = 0; return (clnt_stat); }