/* * Return TRUE if 'o1' and 'o2' are the same, FALSE otherwise. * We perform the comparison by XDR encoding the objects, and then * checking the XDR buffers for equality. However, we don't want to * include the zo_oid (i.e., ctime and mtime) in the comparison. */ bool_t sameNisPlusObj(nis_object *o1, nis_object *o2) { XDR x1, x2; void *b1, *b2; int l1, l2; bool_t ret; nis_object obj1, obj2; char *myself = "sameNisPlusObj"; if (o1 == o2) return (TRUE); else if (o1 == 0 || o2 == 0) return (FALSE); /* * We want to exclude the zo_oid from the comparison. In order * not to modify the objects (even very briefly), we do this by * making copies (nis_object itself only, not the underlying * structures accessed through pointers), and setting the zo_oid * to zero in the copies. */ obj1 = *o1; obj2 = *o2; obj1.zo_oid.ctime = obj1.zo_oid.mtime = 0; obj2.zo_oid.ctime = obj2.zo_oid.mtime = 0; l1 = xdr_sizeof(xdr_nis_object, &obj1); l2 = xdr_sizeof(xdr_nis_object, &obj2); if (l1 != l2) return (FALSE); b1 = am(myself, l1); b2 = am(myself, l2); if (b1 == 0 || b2 == 0) { sfree(b1); sfree(b2); return (FALSE); } xdrmem_create(&x1, (char *)b1, l1, XDR_ENCODE); xdrmem_create(&x2, (char *)b2, l2, XDR_ENCODE); if (xdr_nis_object(&x1, &obj1) && xdr_nis_object(&x2, &obj2)) { ret = (memcmp(b1, b2, l1) == 0); } else { logmsg(MSG_NOTIMECHECK, LOG_WARNING, "%s: xdr_nis_object() error", myself); ret = FALSE; } sfree(b1); sfree(b2); return (ret); }
char * __nis_xdr_dup(xdrproc_t proc, char *src, char *dst) { uint_t size; char *data; XDR xdrs; size = xdr_sizeof(proc, src); data = malloc(size); if (data == NULL) return (NULL); xdrmem_create(&xdrs, data, size, XDR_ENCODE); if (!proc(&xdrs, src)) { free(data); return (NULL); } xdrmem_create(&xdrs, data, size, XDR_DECODE); if (!proc(&xdrs, dst)) { free(data); return (NULL); } free(data); return (dst); }
struct msg* msg_xdr_extalloc(uint16_t ty, xdrproc_t xdrproc, void *payload, size_t extra_len, void **extra) { struct msg *m; size_t xl; uint32_t len; XDR xdrs; xl = xdr_sizeof(xdrproc, payload); len = sizeof(struct msg) + xl + extra_len; m = calloc(1, len); if (!m) return ERR_PTR(ENOMEM); pack_to_be32(&m->len, len); pack_to_be16(&m->ty, ty); pack_to_8(&m->refcnt, 1); xdrmem_create(&xdrs, (void*)&m->data, xl, XDR_ENCODE); if (!xdrproc(&xdrs, (void*)payload)) { free(m); xdr_destroy(&xdrs); return ERR_PTR(EINVAL); } xdr_destroy(&xdrs); *extra = m->data + xl; return m; }
/* * After a successful authentication, request the access token. */ static int smbd_authsvc_gettoken(authsvc_context_t *ctx) { XDR xdrs; smb_token_t *token = NULL; int rc = 0; int len; if ((token = ctx->ctx_token) == NULL) return (NT_STATUS_ACCESS_DENIED); /* * Encode the token response */ len = xdr_sizeof(smb_token_xdr, token); if (len > ctx->ctx_orawlen) { if ((ctx->ctx_orawbuf = realloc(ctx->ctx_orawbuf, len)) == NULL) { return (NT_STATUS_INTERNAL_ERROR); } } ctx->ctx_orawtype = LSA_MTYPE_TOKEN; ctx->ctx_orawlen = len; xdrmem_create(&xdrs, ctx->ctx_orawbuf, len, XDR_ENCODE); if (!smb_token_xdr(&xdrs, token)) rc = NT_STATUS_INTERNAL_ERROR; xdr_destroy(&xdrs); return (rc); }
static int smb_door_encode(smb_doorarg_t *da, uint32_t cmd) { XDR xdrs; char *buf; uint32_t buflen; buflen = xdr_sizeof(smb_doorhdr_xdr, &da->da_hdr); if (da->da_req_xdr != NULL) buflen += xdr_sizeof(da->da_req_xdr, da->da_req_data); smb_door_sethdr(&da->da_hdr, cmd, buflen); if ((buf = malloc(buflen)) == NULL) return (-1); xdrmem_create(&xdrs, buf, buflen, XDR_ENCODE); if (!smb_doorhdr_xdr(&xdrs, &da->da_hdr)) { errno = EPROTO; free(buf); xdr_destroy(&xdrs); return (-1); } if (da->da_req_xdr != NULL) { if (!da->da_req_xdr(&xdrs, da->da_req_data)) { errno = EPROTO; free(buf); xdr_destroy(&xdrs); return (-1); } } da->da_arg.data_ptr = buf; da->da_arg.data_size = buflen; da->da_arg.desc_ptr = NULL; da->da_arg.desc_num = 0; da->da_arg.rbuf = buf; da->da_arg.rsize = buflen; xdr_destroy(&xdrs); return (0); }
static int buffer_pack(void *input, void **output, int32_t *output_size, xdrproc_t pack_func) { XDR pack_xdrs; *output_size = xdr_sizeof(pack_func, input); *output=malloc(*output_size); xdrmem_create(&pack_xdrs, (caddr_t)*output, *output_size, XDR_ENCODE); pack_func(&pack_xdrs, input); return(0); }
size_t Message::getXDRSize(void) { ulong lenght; //length calculated xdrproc_t proc; //pointer to the filter proc = (xdrproc_t)xdr_int; //set filter to xdr_int //use size of to work out size of encoded data //note must pass void* pointer to data lenght = xdr_sizeof(proc,(void*)&type); return lenght; }
struct iobuf * quotad_serialize_reply (rpcsvc_request_t *req, void *arg, struct iovec *outmsg, xdrproc_t xdrproc) { struct iobuf *iob = NULL; ssize_t retlen = 0; ssize_t xdr_size = 0; GF_VALIDATE_OR_GOTO ("server", req, ret); /* First, get the io buffer into which the reply in arg will * be serialized. */ if (arg && xdrproc) { xdr_size = xdr_sizeof (xdrproc, arg); iob = iobuf_get2 (req->svc->ctx->iobuf_pool, xdr_size); if (!iob) { gf_log_callingfn (THIS->name, GF_LOG_ERROR, "Failed to get iobuf"); goto ret; }; iobuf_to_iovec (iob, outmsg); /* Use the given serializer to translate the give C structure in arg * to XDR format which will be written into the buffer in outmsg. */ /* retlen is used to received the error since size_t is unsigned and we * need -1 for error notification during encoding. */ retlen = xdr_serialize_generic (*outmsg, arg, xdrproc); if (retlen == -1) { /* Failed to Encode 'GlusterFS' msg in RPC is not exactly failure of RPC return values.. client should get notified about this, so there are no missing frames */ gf_log_callingfn ("", GF_LOG_ERROR, "Failed to encode message"); req->rpc_err = GARBAGE_ARGS; retlen = 0; } } outmsg->iov_len = retlen; ret: if (retlen == -1) { iobuf_unref (iob); iob = NULL; } return iob; }
nis_result * nis_clone_result (const nis_result *src, nis_result *dest) { char *addr; unsigned int size; XDR xdrs; if (src == NULL) return (NULL); size = xdr_sizeof ((xdrproc_t)_xdr_nis_result, (char *)src); if ((addr = calloc(1, size)) == NULL) return NULL; xdrmem_create (&xdrs, addr, size, XDR_ENCODE); if (!_xdr_nis_result (&xdrs, (nis_result *)src)) { xdr_destroy (&xdrs); free (addr); return NULL; } xdr_destroy (&xdrs); nis_result *res; if (dest == NULL) { if ((res = calloc (1, sizeof (nis_result))) == NULL) { free (addr); return NULL; } } else res = dest; xdrmem_create (&xdrs, addr, size, XDR_DECODE); if (!_xdr_nis_result (&xdrs, res)) { xdr_destroy (&xdrs); if (res != dest) free (res); free (addr); return NULL; } xdr_destroy (&xdrs); free (addr); return res; }
directory_obj * nis_clone_directory (const directory_obj *src, directory_obj *dest) { char *addr; unsigned int size; XDR xdrs; if (src == NULL) return NULL; size = xdr_sizeof ((xdrproc_t)_xdr_directory_obj, (char *)src); if ((addr = calloc(1, size)) == NULL) return NULL; xdrmem_create(&xdrs, addr, size, XDR_ENCODE); if (!_xdr_directory_obj (&xdrs, (directory_obj *)src)) { xdr_destroy (&xdrs); free (addr); return NULL; } xdr_destroy (&xdrs); directory_obj *res; if (dest == NULL) { if ((res = calloc (1, sizeof (directory_obj))) == NULL) { free (addr); return NULL; } } else res = dest; xdrmem_create (&xdrs, addr, size, XDR_DECODE); if (!_xdr_directory_obj (&xdrs, res)) { xdr_destroy (&xdrs); if (res != dest) free (res); free (addr); return NULL; } xdr_destroy (&xdrs); free (addr); return res; }
nis_object * nis_clone_object (const nis_object *src, nis_object *dest) { char *addr; unsigned int size; XDR xdrs; nis_object *res = NULL; if (src == NULL) return (NULL); size = xdr_sizeof ((xdrproc_t)_xdr_nis_object, (char *) src); if ((addr = calloc (1, size)) == NULL) return NULL; if (dest == NULL) { if ((res = calloc (1, sizeof (nis_object))) == NULL) goto out; } else res = dest; xdrmem_create (&xdrs, addr, size, XDR_ENCODE); if (!_xdr_nis_object (&xdrs, (nis_object *) src)) goto out2; xdr_destroy (&xdrs); xdrmem_create (&xdrs, addr, size, XDR_DECODE); if (!_xdr_nis_object (&xdrs, res)) { out2: if (dest == NULL) free (res); res = NULL; } xdr_destroy (&xdrs); out: free (addr); return res; }
int lic_submit_request(void *req, call_frame_t *frame, rpc_clnt_prog_t *prog, int procnum, fop_cbk_fn_t cbkfn, xdrproc_t xdrproc) { int ret = -1; struct iovec iov = {0}; struct iobuf *iobuf = NULL; ssize_t xdr_size = 0; if (req) { xdr_size = xdr_sizeof(xdrproc, req); iobuf = iobuf_get2(THIS->ctx->iobuf_pool, xdr_size); if (!iobuf) { goto out; }; iov.iov_base = iobuf->ptr; iov.iov_len = iobuf_size (iobuf); /* Create the xdr payload */ ret = xdr_serialize_generic (iov, req, xdrproc); if (ret == -1) goto out; iov.iov_len = ret; } lic_await_connected(60); /* Send the msg */ ret = rpc_clnt_submit (g_rpc, prog, procnum, cbkfn, &iov, 1, NULL, 0, NULL, frame, NULL, 0, NULL, 0, NULL); ret = 0; out: if (iobuf) iobuf_unref (iobuf); return ret; }
int RPGP_product_serialize (void *prod, char **serial_data) { int size, ret; XDR xdrs; char *buf; *serial_data = NULL; size = xdr_sizeof ((xdrproc_t)xdr_RPGP_product_t, prod); if (size <= 0) return (-1); buf = (char *)malloc (size); if (buf == NULL) return (-1); xdrmem_create (&xdrs, buf, size, XDR_ENCODE); ret = xdr_RPGP_product_t (&xdrs, ( RPGP_product_t *)prod); xdr_destroy (&xdrs); if (ret == 0) { free (buf); return (-1); } *serial_data = buf; return (size); }
void sped_populate_npools(spe_npool *p) { struct nfsspe_args args; XDR xdrs; char *buf; size_t len = 0; args.nsa_opcode = SPE_OP_NPOOL_POPULATE; args.nsa_did = 0xdead4ead; if (!p) return; args.nsa_xdr_len = xdr_sizeof((xdrproc_t)xdr_spe_npool, (void *)p); args.nsa_xdr = calloc(args.nsa_xdr_len, sizeof (char)); if (!args.nsa_xdr) return; xdrmem_create(&xdrs, args.nsa_xdr, args.nsa_xdr_len, XDR_ENCODE); if (!xdr_spe_npool(&xdrs, p)) { free(args.nsa_xdr); xdr_destroy(&xdrs); return; } #if 0 sped_xdr_dump(args.nsa_xdr, args.nsa_xdr_len); #endif (void) _nfssys(NFS_SPE, &args); free(args.nsa_xdr); xdr_destroy(&xdrs); }
/* * Build an authentication request message and * send it to the local logon service. */ static uint32_t smb_auth_do_oldreq(smb_request_t *sr) { smb_lsa_msg_hdr_t msg_hdr; smb_logon_t user_info; XDR xdrs; smb_arg_sessionsetup_t *sinfo = sr->sr_ssetup; smb_user_t *user = sr->uid_user; void *sbuf = NULL; void *rbuf = NULL; uint32_t slen = 0; uint32_t rlen = 0; uint32_t status; bool_t ok; bzero(&user_info, sizeof (smb_logon_t)); user_info.lg_level = NETR_NETWORK_LOGON; user_info.lg_username = sinfo->ssi_user; user_info.lg_domain = sinfo->ssi_domain; user_info.lg_workstation = sr->session->workstation; user_info.lg_clnt_ipaddr = sr->session->ipaddr; user_info.lg_local_ipaddr = sr->session->local_ipaddr; user_info.lg_local_port = sr->session->s_local_port; user_info.lg_challenge_key.val = sr->session->challenge_key; user_info.lg_challenge_key.len = sr->session->challenge_len; user_info.lg_nt_password.val = sinfo->ssi_ntpwd; user_info.lg_nt_password.len = sinfo->ssi_ntpwlen; user_info.lg_lm_password.val = sinfo->ssi_lmpwd; user_info.lg_lm_password.len = sinfo->ssi_lmpwlen; user_info.lg_native_os = sr->session->native_os; user_info.lg_native_lm = sr->session->native_lm; /* lg_flags? */ slen = xdr_sizeof(smb_logon_xdr, &user_info); sbuf = kmem_alloc(slen, KM_SLEEP); xdrmem_create(&xdrs, sbuf, slen, XDR_ENCODE); ok = smb_logon_xdr(&xdrs, &user_info); xdr_destroy(&xdrs); if (!ok) { status = RPC_NT_BAD_STUB_DATA; goto out; } msg_hdr.lmh_msgtype = LSA_MTYPE_OLDREQ; msg_hdr.lmh_msglen = slen; status = smb_authsock_sendrecv(user, &msg_hdr, sbuf, &rbuf); if (status != 0) goto out; rlen = msg_hdr.lmh_msglen; kmem_free(sbuf, slen); sbuf = NULL; /* * Decode the response message. */ switch (msg_hdr.lmh_msgtype) { case LSA_MTYPE_OK: status = 0; break; case LSA_MTYPE_ERROR: if (rlen == sizeof (smb_lsa_eresp_t)) { smb_lsa_eresp_t *ler = rbuf; status = ler->ler_ntstatus; break; } /* FALLTHROUGH */ default: /* Bogus message type */ status = NT_STATUS_INTERNAL_ERROR; break; } out: if (rbuf != NULL) kmem_free(rbuf, rlen); if (sbuf != NULL) kmem_free(sbuf, slen); return (status); }
/* * Input: A (nis_object *), and (optionally) an (entry_obj *) array. * Output: Pointer to an XDR:ed version of an (xdr_nis_object_t). */ void * xdrNisObject(nis_object *obj, entry_obj **ea, int numEa, int *xdrLenP) { xdr_nis_object_t xno; void *buf; int xdrLen; XDR xdrs; bool_t xret; char *myself = "xdrNisObject"; if (obj == 0) return (0); /* * The version tells us what the XDR:ed buffer contains. * Should be incremented whenever xdr_nis_object_t changes * incompatibly. */ xno.xversion = 1; xno.obj = obj; if (obj->zo_data.zo_type == NIS_DIRECTORY_OBJ && ea != 0 && numEa > 0) { int i; /* * The ea[] array is expected to contain the kind of * pseudo-entry object stored in the nisdb incarnation * of a NIS+ directory. Column zero contains the XDR:ed * directory entry object (which we ignore), while column * one contains the name of said entry. It's the latter * that we borrow for use in the dirEntry[] list of the * xdr_nis_object_t. */ xno.dirEntry.dirEntry_len = 0; xno.dirEntry.dirEntry_val = am(myself, numEa * sizeof (xno.dirEntry.dirEntry_val[0])); if (xno.dirEntry.dirEntry_val == 0) return (0); for (i = 0; i < numEa; i++) { if (ea[i] == 0 || ea[i]->en_cols.en_cols_val == 0 || ea[i]->en_cols.en_cols_len != 2 || ea[i]->en_cols.en_cols_val[1]. ec_value.ec_value_len == 0) continue; /* * Yes, there's a NUL at the end of the dir entry * name. */ xno.dirEntry.dirEntry_val[xno.dirEntry.dirEntry_len] = ea[i]->en_cols.en_cols_val[1]. ec_value.ec_value_val; xno.dirEntry.dirEntry_len++; } } else { /* No directory entries */ xno.dirEntry.dirEntry_len = 0; xno.dirEntry.dirEntry_val = 0; } xdrLen = xdr_sizeof(xdr_xdr_nis_object_t, &xno); buf = am(myself, xdrLen); if (buf == 0) return (0); xdrmem_create(&xdrs, (char *)buf, xdrLen, XDR_ENCODE); xret = xdr_xdr_nis_object_t(&xdrs, &xno); sfree(xno.dirEntry.dirEntry_val); if (!xret) { sfree(buf); return (0); } if (xdrLenP != 0) *xdrLenP = xdrLen; return (buf); }
/* * Given an input NIS+ object, create the kind * of pseudo-entry_obj (with an XDR-encoded nis_object in the * first column) that's stored in the DB. Note that: * * If the input object is an entry, it's assumed to have the * columns moved up one step (col 0 in en_cols.en_cols_val[1], * etc.). en_cols.en_cols_val[0] will be overwritten. The * input object will be changed (some pointers set to zero, * etc.) on exit. * * 'eo' is assumed to be a pointer to an empty entry_obj (or, * at least, one that can be overwritten). It must not be a * pointer to the entry_obj in 'obj'. If the input object is * of a type other than entry, the 'eo' pointer must have * en_cols.en_cols_val appropriately initialized to an array of * (at least) length one. * * 'tobj' is a pointer to the table object for the table for * which the entry_obj is destined. It's needed for entry objects, * but unused for other object types. */ entry_obj * makePseudoEntryObj(nis_object *obj, entry_obj *eo, nis_object *tobj) { int bufsize; char *buf; XDR xdrs; bool_t xret; uint_t ecl; entry_col *ecv; char *myself = "makePseudoEntryObj"; if (obj == 0 || eo == 0) return (0); if (obj->zo_data.zo_type == NIS_ENTRY_OBJ) { *eo = obj->zo_data.objdata_u.en_data; eo->en_type = 0; /* * To prevent the XDR function from making a copy of * the entry columns, we set the columns structure to * 0 (ie no column data) */ ecl = obj->EN_data.en_cols.en_cols_len; ecv = obj->EN_data.en_cols.en_cols_val; obj->EN_data.en_cols.en_cols_len = 0; obj->EN_data.en_cols.en_cols_val = 0; } else { eo->en_type = (char *)in_directory; } bufsize = xdr_sizeof(xdr_nis_object, obj); buf = am(myself, bufsize); if (buf == 0) { if (obj->zo_data.zo_type == NIS_ENTRY_OBJ) { obj->EN_data.en_cols.en_cols_len = ecl; obj->EN_data.en_cols.en_cols_val = ecv; } return (0); } xdrmem_create(&xdrs, (char *)buf, bufsize, XDR_ENCODE); if (obj->zo_data.zo_type == NIS_ENTRY_OBJ) { xret = xdr_nis_fetus_object(&xdrs, obj, tobj); } else { xret = xdr_nis_object(&xdrs, obj); } /* Restore the 'obj' */ if (obj->zo_data.zo_type == NIS_ENTRY_OBJ) { obj->EN_data.en_cols.en_cols_len = ecl; obj->EN_data.en_cols.en_cols_val = ecv; } if (!xret) { logmsg(MSG_NOTIMECHECK, LOG_ERR, "%s: XDR encode failure", myself); sfree(buf); return (0); } eo->en_cols.en_cols_val[0].ec_value.ec_value_val = buf; eo->en_cols.en_cols_val[0].ec_value.ec_value_len = xdr_getpos(&xdrs); eo->en_cols.en_cols_val[0].ec_flags = EN_BINARY+EN_XDR; return (eo); }
int rpc_clnt_mgmt_pmap_signout (glusterfs_ctx_t *ctx, char *brickname) { int ret = 0; pmap_signout_req req = {0, }; call_frame_t *frame = NULL; cmd_args_t *cmd_args = NULL; char brick_name[PATH_MAX] = {0,}; struct iovec iov = {0, }; struct iobuf *iobuf = NULL; struct iobref *iobref = NULL; ssize_t xdr_size = 0; frame = create_frame (THIS, ctx->pool); cmd_args = &ctx->cmd_args; if (!cmd_args->brick_port && (!cmd_args->brick_name || !brickname)) { gf_log ("fsd-mgmt", GF_LOG_DEBUG, "portmapper signout arguments not given"); goto out; } if (cmd_args->volfile_server_transport && !strcmp(cmd_args->volfile_server_transport, "rdma")) { snprintf (brick_name, sizeof(brick_name), "%s.rdma", cmd_args->brick_name); req.brick = brick_name; } else { if (brickname) req.brick = brickname; else req.brick = cmd_args->brick_name; } req.port = cmd_args->brick_port; req.rdma_port = cmd_args->brick_port2; /* mgmt_submit_request is not available in libglusterfs. * Need to serialize and submit manually. */ iobref = iobref_new (); if (!iobref) { goto out; } xdr_size = xdr_sizeof ((xdrproc_t)xdr_pmap_signout_req, &req); iobuf = iobuf_get2 (ctx->iobuf_pool, xdr_size); if (!iobuf) { goto out; }; iobref_add (iobref, iobuf); iov.iov_base = iobuf->ptr; iov.iov_len = iobuf_pagesize (iobuf); /* Create the xdr payload */ ret = xdr_serialize_generic (iov, &req, (xdrproc_t)xdr_pmap_signout_req); if (ret == -1) { gf_log (THIS->name, GF_LOG_WARNING, "failed to create XDR payload"); goto out; } iov.iov_len = ret; ret = rpc_clnt_submit (ctx->mgmt, &clnt_pmap_signout_prog, GF_PMAP_SIGNOUT, mgmt_pmap_signout_cbk, &iov, 1, NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL); out: if (iobref) iobref_unref (iobref); if (iobuf) iobuf_unref (iobuf); return ret; }
/* * Get the access information from the cache or callup to the mountd * to get and cache the access information in the kernel. */ int nfsauth_cache_get(struct exportinfo *exi, struct svc_req *req, int flavor) { struct netbuf addr; struct netbuf *claddr; struct auth_cache **head; struct auth_cache *ap; int access; varg_t varg = {0}; nfsauth_res_t res = {0}; XDR xdrs_a; XDR xdrs_r; size_t absz; caddr_t abuf; size_t rbsz = (size_t)(BYTES_PER_XDR_UNIT * 2); char result[BYTES_PER_XDR_UNIT * 2] = {0}; caddr_t rbuf = (caddr_t)&result; int last = 0; door_arg_t da; door_info_t di; door_handle_t dh; uint_t ntries = 0; /* * Now check whether this client already * has an entry for this flavor in the cache * for this export. * Get the caller's address, mask off the * parts of the address that do not identify * the host (port number, etc), and then hash * it to find the chain of cache entries. */ claddr = svc_getrpccaller(req->rq_xprt); addr = *claddr; addr.buf = kmem_alloc(addr.len, KM_SLEEP); bcopy(claddr->buf, addr.buf, claddr->len); addrmask(&addr, svc_getaddrmask(req->rq_xprt)); head = &exi->exi_cache[hash(&addr)]; rw_enter(&exi->exi_cache_lock, RW_READER); for (ap = *head; ap; ap = ap->auth_next) { if (EQADDR(&addr, &ap->auth_addr) && flavor == ap->auth_flavor) break; } if (ap) { /* cache hit */ access = ap->auth_access; ap->auth_time = gethrestime_sec(); nfsauth_cache_hit++; } rw_exit(&exi->exi_cache_lock); if (ap) { kmem_free(addr.buf, addr.len); return (access); } nfsauth_cache_miss++; /* * No entry in the cache for this client/flavor * so we need to call the nfsauth service in the * mount daemon. */ retry: mutex_enter(&mountd_lock); dh = mountd_dh; if (dh) door_ki_hold(dh); mutex_exit(&mountd_lock); if (dh == NULL) { /* * The rendezvous point has not been established yet ! * This could mean that either mountd(1m) has not yet * been started or that _this_ routine nuked the door * handle after receiving an EINTR for a REVOKED door. * * Returning NFSAUTH_DROP will cause the NFS client * to retransmit the request, so let's try to be more * rescillient and attempt for ntries before we bail. */ if (++ntries % NFSAUTH_DR_TRYCNT) { delay(hz); goto retry; } sys_log("nfsauth: mountd has not established door"); kmem_free(addr.buf, addr.len); return (NFSAUTH_DROP); } ntries = 0; varg.vers = V_PROTO; varg.arg_u.arg.cmd = NFSAUTH_ACCESS; varg.arg_u.arg.areq.req_client.n_len = addr.len; varg.arg_u.arg.areq.req_client.n_bytes = addr.buf; varg.arg_u.arg.areq.req_netid = svc_getnetid(req->rq_xprt); varg.arg_u.arg.areq.req_path = exi->exi_export.ex_path; varg.arg_u.arg.areq.req_flavor = flavor; /* * Setup the XDR stream for encoding the arguments. Notice that * in addition to the args having variable fields (req_netid and * req_path), the argument data structure is itself versioned, * so we need to make sure we can size the arguments buffer * appropriately to encode all the args. If we can't get sizing * info _or_ properly encode the arguments, there's really no * point in continuting, so we fail the request. */ DTRACE_PROBE1(nfsserv__func__nfsauth__varg, varg_t *, &varg); if ((absz = xdr_sizeof(xdr_varg, (void *)&varg)) == 0) { door_ki_rele(dh); kmem_free(addr.buf, addr.len); return (NFSAUTH_DENIED); } abuf = (caddr_t)kmem_alloc(absz, KM_SLEEP); xdrmem_create(&xdrs_a, abuf, absz, XDR_ENCODE); if (!xdr_varg(&xdrs_a, &varg)) { door_ki_rele(dh); goto fail; } XDR_DESTROY(&xdrs_a); /* * The result (nfsauth_res_t) is always two int's, so we don't * have to dynamically size (or allocate) the results buffer. * Now that we've got what we need, we prep the door arguments * and place the call. */ da.data_ptr = (char *)abuf; da.data_size = absz; da.desc_ptr = NULL; da.desc_num = 0; da.rbuf = (char *)rbuf; da.rsize = rbsz; switch (door_ki_upcall_limited(dh, &da, NULL, SIZE_MAX, 0)) { case 0: /* Success */ if (da.data_ptr != da.rbuf && da.data_size == 0) { /* * The door_return that contained the data * failed ! We're here because of the 2nd * door_return (w/o data) such that we can * get control of the thread (and exit * gracefully). */ DTRACE_PROBE1(nfsserv__func__nfsauth__door__nil, door_arg_t *, &da); door_ki_rele(dh); goto fail; } else if (rbuf != da.rbuf) {
/* * Add an update to the log. The update's kdb_entry_sno and kdb_time fields * must already be set. The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ static krb5_error_code store_update(kdb_log_context *log_ctx, kdb_incr_update_t *upd) { XDR xdrs; kdb_ent_header_t *indx_log; unsigned int i, recsize; unsigned long upd_size; krb5_error_code retval; kdb_hlog_t *ulog = log_ctx->ulog; uint32_t ulogentries = log_ctx->ulogentries; upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof(kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { retval = resize(ulog, ulogentries, log_ctx->ulogfd, recsize); if (retval) return retval; } ulog->kdb_state = KDB_UNSTABLE; i = (upd->kdb_entry_sno - 1) % ulogentries; indx_log = INDEX(ulog, i); memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = upd->kdb_entry_sno; indx_log->kdb_time = upd->kdb_time; indx_log->kdb_commit = FALSE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return KRB5_LOG_CONV; indx_log->kdb_commit = TRUE; retval = sync_update(ulog, indx_log); if (retval) return retval; /* Modify the ulog header to reflect the new update. */ ulog->kdb_last_sno = upd->kdb_entry_sno; ulog->kdb_last_time = upd->kdb_time; if (ulog->kdb_num == 0) { ulog->kdb_num = 1; ulog->kdb_first_sno = upd->kdb_entry_sno; ulog->kdb_first_time = upd->kdb_time; } else if (ulog->kdb_num < ulogentries) { ulog->kdb_num++; } else { /* We are circling; set kdb_first_sno and time to the next update. */ i = upd->kdb_entry_sno % ulogentries; indx_log = INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } ulog->kdb_state = KDB_STABLE; sync_header(ulog); return 0; }
/* * Adds an entry to the update log. * The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ krb5_error_code ulog_add_update(krb5_context context, kdb_incr_update_t *upd) { XDR xdrs; kdbe_time_t ktime; struct timeval timestamp; kdb_ent_header_t *indx_log; uint_t i, recsize; ulong_t upd_size; krb5_error_code retval; kdb_sno_t cur_sno; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; int ulogfd; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; ulogfd = log_ctx->ulogfd; if (upd == NULL) return (KRB5_LOG_ERROR); (void) gettimeofday(×tamp, NULL); ktime.seconds = timestamp.tv_sec; ktime.useconds = timestamp.tv_usec; upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof (kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { if ((retval = ulog_resize(ulog, ulogentries, ulogfd, recsize))) { /* Resize element array failed */ return (retval); } } cur_sno = ulog->kdb_last_sno; /* * We need to overflow our sno, replicas will do full * resyncs once they see their sno > than the masters. */ if (cur_sno == (kdb_sno_t)-1) cur_sno = 1; else cur_sno++; /* * We squirrel this away for finish_update() to index */ upd->kdb_entry_sno = cur_sno; i = (cur_sno - 1) % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); (void) memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = cur_sno; indx_log->kdb_time = upd->kdb_time = ktime; indx_log->kdb_commit = upd->kdb_commit = FALSE; ulog->kdb_state = KDB_UNSTABLE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return (KRB5_LOG_CONV); if ((retval = ulog_sync_update(ulog, indx_log))) return (retval); if (ulog->kdb_num < ulogentries) ulog->kdb_num++; ulog->kdb_last_sno = cur_sno; ulog->kdb_last_time = ktime; /* * Since this is a circular array, once we circled, kdb_first_sno is * always kdb_entry_sno + 1. */ if (cur_sno > ulogentries) { i = upd->kdb_entry_sno % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } else if (cur_sno == 1) { ulog->kdb_first_sno = 1; ulog->kdb_first_time = indx_log->kdb_time; } ulog_sync_header(ulog); return (0); }
/* * Add an entry to the update log. The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ krb5_error_code ulog_add_update(krb5_context context, kdb_incr_update_t *upd) { XDR xdrs; kdbe_time_t ktime; kdb_ent_header_t *indx_log; unsigned int i, recsize; unsigned long upd_size; krb5_error_code retval; kdb_sno_t cur_sno; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; int ulogfd; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; ulogfd = log_ctx->ulogfd; if (upd == NULL) return KRB5_LOG_ERROR; time_current(&ktime); upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof(kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { retval = resize(ulog, ulogentries, ulogfd, recsize); if (retval) return retval; } /* If we have reached the last possible serial number, reinitialize the * ulog and start over. Slaves will do a full resync. */ if (ulog->kdb_last_sno == (kdb_sno_t)-1) reset_header(ulog); /* Get the next serial number and save it for finish_update() to index. */ cur_sno = ulog->kdb_last_sno + 1; upd->kdb_entry_sno = cur_sno; i = (cur_sno - 1) % ulogentries; indx_log = INDEX(ulog, i); memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = cur_sno; indx_log->kdb_time = upd->kdb_time = ktime; indx_log->kdb_commit = upd->kdb_commit = FALSE; ulog->kdb_state = KDB_UNSTABLE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return KRB5_LOG_CONV; retval = sync_update(ulog, indx_log); if (retval) return retval; if (ulog->kdb_num < ulogentries) ulog->kdb_num++; ulog->kdb_last_sno = cur_sno; ulog->kdb_last_time = ktime; if (cur_sno > ulogentries) { /* Once we've circled, kdb_first_sno is the sno of the next entry. */ i = upd->kdb_entry_sno % ulogentries; indx_log = INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } else if (cur_sno == 1) { /* This is the first update. */ ulog->kdb_first_sno = 1; ulog->kdb_first_time = indx_log->kdb_time; } ulog_sync_header(ulog); return 0; }
/* ARGSUSED */ static enum clnt_stat clnt_rdma_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, struct timeval wait) { cku_private_t *p = htop(h); int try_call_again; int refresh_attempt = AUTH_REFRESH_COUNT; int status; int msglen; XDR *call_xdrp, callxdr; /* for xdrrdma encoding the RPC call */ XDR *reply_xdrp, replyxdr; /* for xdrrdma decoding the RPC reply */ XDR *rdmahdr_o_xdrs, *rdmahdr_i_xdrs; struct rpc_msg reply_msg; rdma_registry_t *m; struct clist *cl_sendlist; struct clist *cl_recvlist; struct clist *cl; struct clist *cl_rpcmsg; struct clist *cl_rdma_reply; struct clist *cl_rpcreply_wlist; struct clist *cl_long_reply; rdma_buf_t rndup; uint_t vers; uint_t op; uint_t off; uint32_t seg_array_len; uint_t long_reply_len; uint_t rpcsec_gss; uint_t gss_i_or_p; CONN *conn = NULL; rdma_buf_t clmsg; rdma_buf_t rpcmsg; rdma_chunkinfo_lengths_t rcil; clock_t ticks; bool_t wlist_exists_reply; uint32_t rdma_credit = rdma_bufs_rqst; RCSTAT_INCR(rccalls); call_again: bzero(&clmsg, sizeof (clmsg)); bzero(&rpcmsg, sizeof (rpcmsg)); bzero(&rndup, sizeof (rndup)); try_call_again = 0; cl_sendlist = NULL; cl_recvlist = NULL; cl = NULL; cl_rpcmsg = NULL; cl_rdma_reply = NULL; call_xdrp = NULL; reply_xdrp = NULL; wlist_exists_reply = FALSE; cl_rpcreply_wlist = NULL; cl_long_reply = NULL; rcil.rcil_len = 0; rcil.rcil_len_alt = 0; long_reply_len = 0; rw_enter(&rdma_lock, RW_READER); m = (rdma_registry_t *)p->cku_rd_handle; if (m->r_mod_state == RDMA_MOD_INACTIVE) { /* * If we didn't find a matching RDMA module in the registry * then there is no transport. */ rw_exit(&rdma_lock); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; ticks = clnt_rdma_min_delay * drv_usectohz(1000000); if (h->cl_nosignal == TRUE) { delay(ticks); } else { if (delay_sig(ticks) == EINTR) { p->cku_err.re_status = RPC_INTR; p->cku_err.re_errno = EINTR; } } return (RPC_CANTSEND); } /* * Get unique xid */ if (p->cku_xid == 0) p->cku_xid = alloc_xid(); status = RDMA_GET_CONN(p->cku_rd_mod->rdma_ops, &p->cku_srcaddr, &p->cku_addr, p->cku_addrfmly, p->cku_rd_handle, &conn); rw_exit(&rdma_lock); /* * If there is a problem with the connection reflect the issue * back to the higher level to address, we MAY delay for a short * period so that we are kind to the transport. */ if (conn == NULL) { /* * Connect failed to server. Could be because of one * of several things. In some cases we don't want * the caller to retry immediately - delay before * returning to caller. */ switch (status) { case RDMA_TIMEDOUT: /* * Already timed out. No need to delay * some more. */ p->cku_err.re_status = RPC_TIMEDOUT; p->cku_err.re_errno = ETIMEDOUT; break; case RDMA_INTR: /* * Failed because of an signal. Very likely * the caller will not retry. */ p->cku_err.re_status = RPC_INTR; p->cku_err.re_errno = EINTR; break; default: /* * All other failures - server down or service * down or temporary resource failure. Delay before * returning to caller. */ ticks = clnt_rdma_min_delay * drv_usectohz(1000000); p->cku_err.re_status = RPC_CANTCONNECT; p->cku_err.re_errno = EIO; if (h->cl_nosignal == TRUE) { delay(ticks); } else { if (delay_sig(ticks) == EINTR) { p->cku_err.re_status = RPC_INTR; p->cku_err.re_errno = EINTR; } } break; } return (p->cku_err.re_status); } if (p->cku_srcaddr.maxlen < conn->c_laddr.len) { if ((p->cku_srcaddr.maxlen != 0) && (p->cku_srcaddr.buf != NULL)) kmem_free(p->cku_srcaddr.buf, p->cku_srcaddr.maxlen); p->cku_srcaddr.buf = kmem_zalloc(conn->c_laddr.maxlen, KM_SLEEP); p->cku_srcaddr.maxlen = conn->c_laddr.maxlen; } p->cku_srcaddr.len = conn->c_laddr.len; bcopy(conn->c_laddr.buf, p->cku_srcaddr.buf, conn->c_laddr.len); clnt_check_credit(conn); status = CLNT_RDMA_FAIL; rpcsec_gss = gss_i_or_p = FALSE; if (IS_RPCSEC_GSS(h)) { rpcsec_gss = TRUE; if (rpc_gss_get_service_type(h->cl_auth) == rpc_gss_svc_integrity || rpc_gss_get_service_type(h->cl_auth) == rpc_gss_svc_privacy) gss_i_or_p = TRUE; } /* * Try a regular RDMA message if RPCSEC_GSS is not being used * or if RPCSEC_GSS is being used for authentication only. */ if (rpcsec_gss == FALSE || (rpcsec_gss == TRUE && gss_i_or_p == FALSE)) { /* * Grab a send buffer for the request. Try to * encode it to see if it fits. If not, then it * needs to be sent in a chunk. */ rpcmsg.type = SEND_BUFFER; if (rdma_buf_alloc(conn, &rpcmsg)) { DTRACE_PROBE(krpc__e__clntrdma__callit_nobufs); goto done; } /* First try to encode into regular send buffer */ op = RDMA_MSG; call_xdrp = &callxdr; xdrrdma_create(call_xdrp, rpcmsg.addr, rpcmsg.len, rdma_minchunk, NULL, XDR_ENCODE, conn); status = clnt_compose_rpcmsg(h, procnum, &rpcmsg, call_xdrp, xdr_args, argsp); if (status != CLNT_RDMA_SUCCESS) { /* Clean up from previous encode attempt */ rdma_buf_free(conn, &rpcmsg); XDR_DESTROY(call_xdrp); } else { XDR_CONTROL(call_xdrp, XDR_RDMA_GET_CHUNK_LEN, &rcil); } } /* If the encode didn't work, then try a NOMSG */ if (status != CLNT_RDMA_SUCCESS) { msglen = CKU_HDRSIZE + BYTES_PER_XDR_UNIT + MAX_AUTH_BYTES + xdr_sizeof(xdr_args, argsp); msglen = calc_length(msglen); /* pick up the lengths for the reply buffer needed */ (void) xdrrdma_sizeof(xdr_args, argsp, 0, &rcil.rcil_len, &rcil.rcil_len_alt); /* * Construct a clist to describe the CHUNK_BUFFER * for the rpcmsg. */ cl_rpcmsg = clist_alloc(); cl_rpcmsg->c_len = msglen; cl_rpcmsg->rb_longbuf.type = RDMA_LONG_BUFFER; cl_rpcmsg->rb_longbuf.len = msglen; if (rdma_buf_alloc(conn, &cl_rpcmsg->rb_longbuf)) { clist_free(cl_rpcmsg); goto done; } cl_rpcmsg->w.c_saddr3 = cl_rpcmsg->rb_longbuf.addr; op = RDMA_NOMSG; call_xdrp = &callxdr; xdrrdma_create(call_xdrp, cl_rpcmsg->rb_longbuf.addr, cl_rpcmsg->rb_longbuf.len, 0, cl_rpcmsg, XDR_ENCODE, conn); status = clnt_compose_rpcmsg(h, procnum, &rpcmsg, call_xdrp, xdr_args, argsp); if (status != CLNT_RDMA_SUCCESS) { p->cku_err.re_status = RPC_CANTENCODEARGS; p->cku_err.re_errno = EIO; DTRACE_PROBE(krpc__e__clntrdma__callit__composemsg); goto done; } } /* * During the XDR_ENCODE we may have "allocated" an RDMA READ or * RDMA WRITE clist. * * First pull the RDMA READ chunk list from the XDR private * area to keep it handy. */ XDR_CONTROL(call_xdrp, XDR_RDMA_GET_RLIST, &cl); if (gss_i_or_p) { long_reply_len = rcil.rcil_len + rcil.rcil_len_alt; long_reply_len += MAX_AUTH_BYTES; } else { long_reply_len = rcil.rcil_len; } /* * Update the chunk size information for the Long RPC msg. */ if (cl && op == RDMA_NOMSG) cl->c_len = p->cku_outsz; /* * Prepare the RDMA header. On success xdrs will hold the result * of xdrmem_create() for a SEND_BUFFER. */ status = clnt_compose_rdma_header(conn, h, &clmsg, &rdmahdr_o_xdrs, &op); if (status != CLNT_RDMA_SUCCESS) { p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; RCSTAT_INCR(rcnomem); DTRACE_PROBE(krpc__e__clntrdma__callit__nobufs2); goto done; } /* * Now insert the RDMA READ list iff present */ status = clnt_setup_rlist(conn, rdmahdr_o_xdrs, call_xdrp); if (status != CLNT_RDMA_SUCCESS) { DTRACE_PROBE(krpc__e__clntrdma__callit__clistreg); rdma_buf_free(conn, &clmsg); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; goto done; } /* * Setup RDMA WRITE chunk list for nfs read operation * other operations will have a NULL which will result * as a NULL list in the XDR stream. */ status = clnt_setup_wlist(conn, rdmahdr_o_xdrs, call_xdrp, &rndup); if (status != CLNT_RDMA_SUCCESS) { rdma_buf_free(conn, &clmsg); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; goto done; } /* * If NULL call and RPCSEC_GSS, provide a chunk such that * large responses can flow back to the client. * If RPCSEC_GSS with integrity or privacy is in use, get chunk. */ if ((procnum == 0 && rpcsec_gss == TRUE) || (rpcsec_gss == TRUE && gss_i_or_p == TRUE)) long_reply_len += 1024; status = clnt_setup_long_reply(conn, &cl_long_reply, long_reply_len); if (status != CLNT_RDMA_SUCCESS) { rdma_buf_free(conn, &clmsg); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; goto done; } /* * XDR encode the RDMA_REPLY write chunk */ seg_array_len = (cl_long_reply ? 1 : 0); (void) xdr_encode_reply_wchunk(rdmahdr_o_xdrs, cl_long_reply, seg_array_len); /* * Construct a clist in "sendlist" that represents what we * will push over the wire. * * Start with the RDMA header and clist (if any) */ clist_add(&cl_sendlist, 0, XDR_GETPOS(rdmahdr_o_xdrs), &clmsg.handle, clmsg.addr, NULL, NULL); /* * Put the RPC call message in sendlist if small RPC */ if (op == RDMA_MSG) { clist_add(&cl_sendlist, 0, p->cku_outsz, &rpcmsg.handle, rpcmsg.addr, NULL, NULL); } else { /* Long RPC already in chunk list */ RCSTAT_INCR(rclongrpcs); } /* * Set up a reply buffer ready for the reply */ status = rdma_clnt_postrecv(conn, p->cku_xid); if (status != RDMA_SUCCESS) { rdma_buf_free(conn, &clmsg); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; goto done; } /* * sync the memory for dma */ if (cl != NULL) { status = clist_syncmem(conn, cl, CLIST_REG_SOURCE); if (status != RDMA_SUCCESS) { (void) rdma_clnt_postrecv_remove(conn, p->cku_xid); rdma_buf_free(conn, &clmsg); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; goto done; } } /* * Send the RDMA Header and RPC call message to the server */ status = RDMA_SEND(conn, cl_sendlist, p->cku_xid); if (status != RDMA_SUCCESS) { (void) rdma_clnt_postrecv_remove(conn, p->cku_xid); p->cku_err.re_status = RPC_CANTSEND; p->cku_err.re_errno = EIO; goto done; } /* * RDMA plugin now owns the send msg buffers. * Clear them out and don't free them. */ clmsg.addr = NULL; if (rpcmsg.type == SEND_BUFFER) rpcmsg.addr = NULL; /* * Recv rpc reply */ status = RDMA_RECV(conn, &cl_recvlist, p->cku_xid); /* * Now check recv status */ if (status != 0) { if (status == RDMA_INTR) { p->cku_err.re_status = RPC_INTR; p->cku_err.re_errno = EINTR; RCSTAT_INCR(rcintrs); } else if (status == RPC_TIMEDOUT) { p->cku_err.re_status = RPC_TIMEDOUT; p->cku_err.re_errno = ETIMEDOUT; RCSTAT_INCR(rctimeouts); } else { p->cku_err.re_status = RPC_CANTRECV; p->cku_err.re_errno = EIO; } goto done; } /* * Process the reply message. * * First the chunk list (if any) */ rdmahdr_i_xdrs = &(p->cku_inxdr); xdrmem_create(rdmahdr_i_xdrs, (caddr_t)(uintptr_t)cl_recvlist->w.c_saddr3, cl_recvlist->c_len, XDR_DECODE); /* * Treat xid as opaque (xid is the first entity * in the rpc rdma message). * Skip xid and set the xdr position accordingly. */ XDR_SETPOS(rdmahdr_i_xdrs, sizeof (uint32_t)); (void) xdr_u_int(rdmahdr_i_xdrs, &vers); (void) xdr_u_int(rdmahdr_i_xdrs, &rdma_credit); (void) xdr_u_int(rdmahdr_i_xdrs, &op); (void) xdr_do_clist(rdmahdr_i_xdrs, &cl); clnt_update_credit(conn, rdma_credit); wlist_exists_reply = FALSE; if (! xdr_decode_wlist(rdmahdr_i_xdrs, &cl_rpcreply_wlist, &wlist_exists_reply)) { DTRACE_PROBE(krpc__e__clntrdma__callit__wlist_decode); p->cku_err.re_status = RPC_CANTDECODERES; p->cku_err.re_errno = EIO; goto done; } /* * The server shouldn't have sent a RDMA_SEND that * the client needs to RDMA_WRITE a reply back to * the server. So silently ignoring what the * server returns in the rdma_reply section of the * header. */ (void) xdr_decode_reply_wchunk(rdmahdr_i_xdrs, &cl_rdma_reply); off = xdr_getpos(rdmahdr_i_xdrs); clnt_decode_long_reply(conn, cl_long_reply, cl_rdma_reply, &replyxdr, &reply_xdrp, cl, cl_recvlist, op, off); if (reply_xdrp == NULL) goto done; if (wlist_exists_reply) { XDR_CONTROL(reply_xdrp, XDR_RDMA_SET_WLIST, cl_rpcreply_wlist); } reply_msg.rm_direction = REPLY; reply_msg.rm_reply.rp_stat = MSG_ACCEPTED; reply_msg.acpted_rply.ar_stat = SUCCESS; reply_msg.acpted_rply.ar_verf = _null_auth; /* * xdr_results will be done in AUTH_UNWRAP. */ reply_msg.acpted_rply.ar_results.where = NULL; reply_msg.acpted_rply.ar_results.proc = xdr_void; /* * Decode and validate the response. */ if (xdr_replymsg(reply_xdrp, &reply_msg)) { enum clnt_stat re_status; _seterr_reply(&reply_msg, &(p->cku_err)); re_status = p->cku_err.re_status; if (re_status == RPC_SUCCESS) { /* * Reply is good, check auth. */ if (!AUTH_VALIDATE(h->cl_auth, &reply_msg.acpted_rply.ar_verf)) { p->cku_err.re_status = RPC_AUTHERROR; p->cku_err.re_why = AUTH_INVALIDRESP; RCSTAT_INCR(rcbadverfs); DTRACE_PROBE( krpc__e__clntrdma__callit__authvalidate); } else if (!AUTH_UNWRAP(h->cl_auth, reply_xdrp, xdr_results, resultsp)) { p->cku_err.re_status = RPC_CANTDECODERES; p->cku_err.re_errno = EIO; DTRACE_PROBE( krpc__e__clntrdma__callit__authunwrap); } } else { /* set errno in case we can't recover */ if (re_status != RPC_VERSMISMATCH && re_status != RPC_AUTHERROR && re_status != RPC_PROGVERSMISMATCH) p->cku_err.re_errno = EIO; if (re_status == RPC_AUTHERROR) { if ((refresh_attempt > 0) && AUTH_REFRESH(h->cl_auth, &reply_msg, p->cku_cred)) { refresh_attempt--; try_call_again = 1; goto done; } try_call_again = 0; /* * We have used the client handle to * do an AUTH_REFRESH and the RPC status may * be set to RPC_SUCCESS; Let's make sure to * set it to RPC_AUTHERROR. */ p->cku_err.re_status = RPC_AUTHERROR; /* * Map recoverable and unrecoverable * authentication errors to appropriate * errno */ switch (p->cku_err.re_why) { case AUTH_BADCRED: case AUTH_BADVERF: case AUTH_INVALIDRESP: case AUTH_TOOWEAK: case AUTH_FAILED: case RPCSEC_GSS_NOCRED: case RPCSEC_GSS_FAILED: p->cku_err.re_errno = EACCES; break; case AUTH_REJECTEDCRED: case AUTH_REJECTEDVERF: default: p->cku_err.re_errno = EIO; break; } } DTRACE_PROBE1(krpc__e__clntrdma__callit__rpcfailed, int, p->cku_err.re_why); } } else {
/* * Callup to the mountd to get access information in the kernel. */ static bool_t nfsauth_retrieve(struct exportinfo *exi, char *req_netid, int flavor, struct netbuf *addr, int *access, uid_t clnt_uid, gid_t clnt_gid, uint_t clnt_gids_cnt, const gid_t *clnt_gids, uid_t *srv_uid, gid_t *srv_gid, uint_t *srv_gids_cnt, gid_t **srv_gids) { varg_t varg = {0}; nfsauth_res_t res = {0}; XDR xdrs; size_t absz; caddr_t abuf; int last = 0; door_arg_t da; door_info_t di; door_handle_t dh; uint_t ntries = 0; /* * No entry in the cache for this client/flavor * so we need to call the nfsauth service in the * mount daemon. */ varg.vers = V_PROTO; varg.arg_u.arg.cmd = NFSAUTH_ACCESS; varg.arg_u.arg.areq.req_client.n_len = addr->len; varg.arg_u.arg.areq.req_client.n_bytes = addr->buf; varg.arg_u.arg.areq.req_netid = req_netid; varg.arg_u.arg.areq.req_path = exi->exi_export.ex_path; varg.arg_u.arg.areq.req_flavor = flavor; varg.arg_u.arg.areq.req_clnt_uid = clnt_uid; varg.arg_u.arg.areq.req_clnt_gid = clnt_gid; varg.arg_u.arg.areq.req_clnt_gids.len = clnt_gids_cnt; varg.arg_u.arg.areq.req_clnt_gids.val = (gid_t *)clnt_gids; DTRACE_PROBE1(nfsserv__func__nfsauth__varg, varg_t *, &varg); /* * Setup the XDR stream for encoding the arguments. Notice that * in addition to the args having variable fields (req_netid and * req_path), the argument data structure is itself versioned, * so we need to make sure we can size the arguments buffer * appropriately to encode all the args. If we can't get sizing * info _or_ properly encode the arguments, there's really no * point in continuting, so we fail the request. */ if ((absz = xdr_sizeof(xdr_varg, &varg)) == 0) { *access = NFSAUTH_DENIED; return (FALSE); } abuf = (caddr_t)kmem_alloc(absz, KM_SLEEP); xdrmem_create(&xdrs, abuf, absz, XDR_ENCODE); if (!xdr_varg(&xdrs, &varg)) { XDR_DESTROY(&xdrs); goto fail; } XDR_DESTROY(&xdrs); /* * Prepare the door arguments * * We don't know the size of the message the daemon * will pass back to us. By setting rbuf to NULL, * we force the door code to allocate a buf of the * appropriate size. We must set rsize > 0, however, * else the door code acts as if no response was * expected and doesn't pass the data to us. */ da.data_ptr = (char *)abuf; da.data_size = absz; da.desc_ptr = NULL; da.desc_num = 0; da.rbuf = NULL; da.rsize = 1; retry: mutex_enter(&mountd_lock); dh = mountd_dh; if (dh != NULL) door_ki_hold(dh); mutex_exit(&mountd_lock); if (dh == NULL) { /* * The rendezvous point has not been established yet! * This could mean that either mountd(1m) has not yet * been started or that _this_ routine nuked the door * handle after receiving an EINTR for a REVOKED door. * * Returning NFSAUTH_DROP will cause the NFS client * to retransmit the request, so let's try to be more * rescillient and attempt for ntries before we bail. */ if (++ntries % NFSAUTH_DR_TRYCNT) { delay(hz); goto retry; } kmem_free(abuf, absz); sys_log("nfsauth: mountd has not established door"); *access = NFSAUTH_DROP; return (FALSE); } ntries = 0; /* * Now that we've got what we need, place the call. */ switch (door_ki_upcall_limited(dh, &da, NULL, SIZE_MAX, 0)) { case 0: /* Success */ door_ki_rele(dh); if (da.data_ptr == NULL && da.data_size == 0) { /* * The door_return that contained the data * failed! We're here because of the 2nd * door_return (w/o data) such that we can * get control of the thread (and exit * gracefully). */ DTRACE_PROBE1(nfsserv__func__nfsauth__door__nil, door_arg_t *, &da); goto fail; } break; case EAGAIN: /* * Server out of resources; back off for a bit */ door_ki_rele(dh); delay(hz); goto retry; /* NOTREACHED */ case EINTR: if (!door_ki_info(dh, &di)) { door_ki_rele(dh); if (di.di_attributes & DOOR_REVOKED) { /* * The server barfed and revoked * the (existing) door on us; we * want to wait to give smf(5) a * chance to restart mountd(1m) * and establish a new door handle. */ mutex_enter(&mountd_lock); if (dh == mountd_dh) { door_ki_rele(mountd_dh); mountd_dh = NULL; } mutex_exit(&mountd_lock); delay(hz); goto retry; } /* * If the door was _not_ revoked on us, * then more than likely we took an INTR, * so we need to fail the operation. */ goto fail; } /* * The only failure that can occur from getting * the door info is EINVAL, so we let the code * below handle it. */ /* FALLTHROUGH */ case EBADF: case EINVAL: default: /* * If we have a stale door handle, give smf a last * chance to start it by sleeping for a little bit. * If we're still hosed, we'll fail the call. * * Since we're going to reacquire the door handle * upon the retry, we opt to sleep for a bit and * _not_ to clear mountd_dh. If mountd restarted * and was able to set mountd_dh, we should see * the new instance; if not, we won't get caught * up in the retry/DELAY loop. */ door_ki_rele(dh); if (!last) { delay(hz); last++; goto retry; } sys_log("nfsauth: stale mountd door handle"); goto fail; }
/*ARGSUSED*/ static void smbd_opipe_dispatch(void *cookie, char *argp, size_t arg_size, door_desc_t *dd, uint_t n_desc) { char buf[SMB_OPIPE_DOOR_BUFSIZE]; smb_doorhdr_t hdr; size_t hdr_size; uint8_t *data; uint32_t datalen; smbd_door_enter(&smbd_opipe_sdh); if (!smbd_online()) smbd_door_return(&smbd_opipe_sdh, NULL, 0, NULL, 0); bzero(&hdr, sizeof (smb_doorhdr_t)); hdr_size = xdr_sizeof(smb_doorhdr_xdr, &hdr); if ((cookie != &smbd_opipe_cookie) || (argp == NULL) || (arg_size < hdr_size)) { smbd_door_return(&smbd_opipe_sdh, NULL, 0, NULL, 0); } if (smb_doorhdr_decode(&hdr, (uint8_t *)argp, hdr_size) == -1) smbd_door_return(&smbd_opipe_sdh, NULL, 0, NULL, 0); if ((hdr.dh_magic != SMB_OPIPE_HDR_MAGIC) || (hdr.dh_fid == 0)) smbd_door_return(&smbd_opipe_sdh, NULL, 0, NULL, 0); if (hdr.dh_datalen > SMB_OPIPE_DOOR_BUFSIZE) hdr.dh_datalen = SMB_OPIPE_DOOR_BUFSIZE; data = (uint8_t *)argp + hdr_size; datalen = hdr.dh_datalen; switch (hdr.dh_op) { case SMB_OPIPE_OPEN: hdr.dh_door_rc = ndr_pipe_open(hdr.dh_fid, data, datalen); hdr.dh_datalen = 0; hdr.dh_resid = 0; datalen = hdr_size; break; case SMB_OPIPE_CLOSE: hdr.dh_door_rc = ndr_pipe_close(hdr.dh_fid); hdr.dh_datalen = 0; hdr.dh_resid = 0; datalen = hdr_size; break; case SMB_OPIPE_READ: data = (uint8_t *)buf + hdr_size; datalen = hdr.dh_datalen; hdr.dh_door_rc = ndr_pipe_read(hdr.dh_fid, data, &datalen, &hdr.dh_resid); hdr.dh_datalen = datalen; datalen += hdr_size; break; case SMB_OPIPE_WRITE: hdr.dh_door_rc = ndr_pipe_write(hdr.dh_fid, data, datalen); hdr.dh_datalen = 0; hdr.dh_resid = 0; datalen = hdr_size; break; case SMB_OPIPE_EXEC: hdr.dh_door_rc = smbd_opipe_exec_async(hdr.dh_fid); hdr.dh_datalen = 0; hdr.dh_resid = 0; datalen = hdr_size; break; default: smbd_door_return(&smbd_opipe_sdh, NULL, 0, NULL, 0); break; } (void) smb_doorhdr_encode(&hdr, (uint8_t *)buf, hdr_size); smbd_door_return(&smbd_opipe_sdh, buf, datalen, NULL, 0); }
/* * Add an entry to the update log. The layout of the update log looks like: * * header log -> [ update header -> xdr(kdb_incr_update_t) ], ... */ krb5_error_code ulog_add_update(krb5_context context, kdb_incr_update_t *upd) { XDR xdrs; kdbe_time_t ktime; kdb_ent_header_t *indx_log; unsigned int i, recsize; unsigned long upd_size; krb5_error_code retval; kdb_sno_t cur_sno; kdb_log_context *log_ctx; kdb_hlog_t *ulog = NULL; uint32_t ulogentries; int ulogfd; INIT_ULOG(context); ulogentries = log_ctx->ulogentries; ulogfd = log_ctx->ulogfd; if (upd == NULL) return KRB5_LOG_ERROR; time_current(&ktime); upd_size = xdr_sizeof((xdrproc_t)xdr_kdb_incr_update_t, upd); recsize = sizeof(kdb_ent_header_t) + upd_size; if (recsize > ulog->kdb_block) { retval = ulog_resize(ulog, ulogentries, ulogfd, recsize); if (retval) return retval; } cur_sno = ulog->kdb_last_sno; /* * If we need to, wrap our sno around to 1. A slaves will do a full resync * since its sno will be out of range of the ulog (or in extreme cases, * its timestamp won't match). */ if (cur_sno == (kdb_sno_t)-1) cur_sno = 1; else cur_sno++; /* Squirrel this away for finish_update() to index. */ upd->kdb_entry_sno = cur_sno; i = (cur_sno - 1) % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); memset(indx_log, 0, ulog->kdb_block); indx_log->kdb_umagic = KDB_ULOG_MAGIC; indx_log->kdb_entry_size = upd_size; indx_log->kdb_entry_sno = cur_sno; indx_log->kdb_time = upd->kdb_time = ktime; indx_log->kdb_commit = upd->kdb_commit = FALSE; ulog->kdb_state = KDB_UNSTABLE; xdrmem_create(&xdrs, (char *)indx_log->entry_data, indx_log->kdb_entry_size, XDR_ENCODE); if (!xdr_kdb_incr_update_t(&xdrs, upd)) return KRB5_LOG_CONV; retval = ulog_sync_update(ulog, indx_log); if (retval) return retval; if (ulog->kdb_num < ulogentries) ulog->kdb_num++; ulog->kdb_last_sno = cur_sno; ulog->kdb_last_time = ktime; if (cur_sno > ulogentries) { /* Once we've circled, kdb_first_sno is the sno of the next entry. */ i = upd->kdb_entry_sno % ulogentries; indx_log = (kdb_ent_header_t *)INDEX(ulog, i); ulog->kdb_first_sno = indx_log->kdb_entry_sno; ulog->kdb_first_time = indx_log->kdb_time; } else if (cur_sno == 1) { /* This is the first update, or we wrapped. */ ulog->kdb_first_sno = 1; ulog->kdb_first_time = indx_log->kdb_time; } ulog_sync_header(ulog); return 0; }