/** * * nfs_ip_name_remove: Tries to remove an entry for ip_name cache * * Tries to remove an entry for ip_name cache. * * @param ipaddr [IN] the ip address to be uncached. * * @return the result previously set if *pstatus == IP_NAME_SUCCESS * */ int nfs_ip_name_remove(sockaddr_t *ipaddr) { hash_buffer_t buffkey, old_value; nfs_ip_name_t *nfs_ip_name = NULL; char ipstring[SOCK_NAME_MAX]; sprint_sockaddr(ipaddr, ipstring, sizeof(ipstring)); buffkey.pdata = (caddr_t) ipaddr; buffkey.len = sizeof(sockaddr_t); if(HashTable_Del(ht_ip_name, &buffkey, NULL, &old_value) == HASHTABLE_SUCCESS) { nfs_ip_name = (nfs_ip_name_t *) old_value.pdata; LogFullDebug(COMPONENT_DISPATCH, "Cache remove hit for %s->%s", ipstring, nfs_ip_name->hostname); gsh_free(nfs_ip_name); return IP_NAME_SUCCESS; } LogFullDebug(COMPONENT_DISPATCH, "Cache remove miss for %s", ipstring); return IP_NAME_NOT_FOUND; } /* nfs_ip_name_remove */
/** * * display_ip_stats_key: displays the ip_stats stored in the buffer. * * displays the ip_stats key stored in the buffer. This function is to be used as 'key_to_str' field. * * @param buff1 [IN] buffer to display * @param buff2 [OUT] output string * * @return number of character written. * */ int display_ip_stats_key(hash_buffer_t * pbuff, char *str) { sockaddr_t *addr = (sockaddr_t *)(pbuff->pdata); sprint_sockaddr(addr, str, HASHTABLE_DISPLAY_STRLEN); return strlen(str); }
/** * * nfs_ip_name_get: Tries to get an entry for ip_name cache. * * Tries to get an entry for ip_name cache. * * @param ipaddr [IN] the ip address requested * @param hostname [OUT] the hostname * * @return the result previously set if *pstatus == IP_NAME_SUCCESS * */ int nfs_ip_name_get(sockaddr_t *ipaddr, char *hostname) { hash_buffer_t buffkey; hash_buffer_t buffval; nfs_ip_name_t *nfs_ip_name; char ipstring[SOCK_NAME_MAX]; sprint_sockaddr(ipaddr, ipstring, sizeof(ipstring)); buffkey.pdata = (caddr_t) ipaddr; buffkey.len = sizeof(sockaddr_t); if(HashTable_Get(ht_ip_name, &buffkey, &buffval) == HASHTABLE_SUCCESS) { nfs_ip_name = (nfs_ip_name_t *) buffval.pdata; strncpy(hostname, nfs_ip_name->hostname, MAXHOSTNAMELEN); LogFullDebug(COMPONENT_DISPATCH, "Cache get hit for %s->%s", ipstring, nfs_ip_name->hostname); return IP_NAME_SUCCESS; } LogFullDebug(COMPONENT_DISPATCH, "Cache get miss for %s", ipstring); return IP_NAME_NOT_FOUND; } /* nfs_ip_name_get */
void LogDupReq(const char *label, sockaddr_t *addr, long xid, u_long rq_prog) { char namebuf[SOCK_NAME_MAX]; sprint_sockaddr(addr, namebuf, sizeof(namebuf)); LogFullDebug(COMPONENT_DUPREQ, "%s addr=%s xid=%ld rq_prog=%ld", label, namebuf, xid, rq_prog); }
/** * * display_req_val: displays the value stored in the buffer. * * displays the value stored in the buffer. This function is to be used as 'val_to_str' field in * the hashtable storing the nfs duplicated requests. * * @param buff1 [IN] buffer to display * @param buff2 [OUT] output string * * @return number of character written. * */ int display_req_val(hash_buffer_t * pbuff, char *str) { dupreq_entry_t *pdupreq = (dupreq_entry_t *)(pbuff->pdata); char namebuf[SOCK_NAME_MAX]; sprint_sockaddr(&pdupreq->addr, namebuf, sizeof(namebuf)); return sprintf("addr=%s xid=%ld checksum=%d rq_prog=%lu rq_vers=%lu rq_proc=%lu", namebuf, pdupreq->xid, pdupreq->checksum, pdupreq->rq_prog, pdupreq->rq_vers, pdupreq->rq_proc); }
/** * * display_req_key: displays the key stored in the buffer. * * displays the key stored in the buffer. This function is to be used as 'key_to_str' field in * the hashtable storing the nfs duplicated requests. * * @param buff1 [IN] buffer to display * @param buff2 [OUT] output string * * @return number of character written. * */ int display_req_key(hash_buffer_t * pbuff, char *str) { dupreq_key_t *pdupkey = (dupreq_key_t *)(pbuff->pdata); char namebuf[SOCK_NAME_MAX]; sprint_sockaddr(&pdupkey->addr, namebuf, sizeof(namebuf)); return sprintf("addr=%s xid=%ld checksum=%d", namebuf, pdupkey->xid, pdupkey->checksum); }
int display_9p_owner(state_owner_t *pkey, char *str) { char *strtmp = str; if(pkey == NULL) return sprintf(str, "<NULL>"); strtmp += sprintf(strtmp, "STATE_LOCK_OWNER_9P %p", pkey); strtmp += sprint_sockaddr( (sockaddr_t *)&(pkey->so_owner.so_9p_owner.client_addr), strtmp, SOCK_NAME_MAX ) ; strtmp += sprintf(strtmp, " proc_id=%u", pkey->so_owner.so_9p_owner.proc_id); strtmp += sprintf(strtmp, " refcount=%d", atomic_fetch_int32_t(&pkey->so_refcount)); return strtmp - str; }
bool_t nlm_block_data_to_fsal_context(state_block_data_t * block_data, fsal_op_context_t * fsal_context) { exportlist_t * pexport = NULL; short exportid; fsal_status_t fsal_status; state_nlm_block_data_t * nlm_block_data = &block_data->sbd_block_data.sbd_nlm_block_data; /* Get export ID from handle */ exportid = nlm4_FhandleToExportId(&nlm_block_data->sbd_nlm_fh); /* Get export matching export ID */ if(exportid < 0 || (pexport = nfs_Get_export_by_id(nfs_param.pexportlist, exportid)) == NULL || (pexport->export_perms.options & EXPORT_OPTION_NFSV3) == 0) { /* Reject the request for authentication reason (incompatible file handle) */ if(isInfo(COMPONENT_NLM)) { char dumpfh[1024]; char *reason; char addrbuf[SOCK_NAME_MAX]; sprint_sockaddr(&nlm_block_data->sbd_nlm_hostaddr, addrbuf, sizeof(addrbuf)); if(exportid < 0) reason = "has badly formed handle"; else if(pexport == NULL) reason = "has invalid export"; else reason = "V3 not allowed on this export"; sprint_fhandle_nlm(dumpfh, &nlm_block_data->sbd_nlm_fh); LogMajor(COMPONENT_NLM, "NLM4 granted lock from host %s %s, FH=%s", addrbuf, reason, dumpfh); } return FALSE; } LogFullDebug(COMPONENT_NLM, "Found export entry for path=%s as exportid=%d", pexport->fullpath, pexport->id); /* Build the credentials */ fsal_status = FSAL_GetClientContext(fsal_context, &pexport->FS_export_context, block_data->sbd_credential.user, block_data->sbd_credential.group, block_data->sbd_credential.alt_groups, block_data->sbd_credential.nbgroups); if(FSAL_IS_ERROR(fsal_status)) { LogEvent(COMPONENT_NLM, "Could not get credentials for (uid=%d,gid=%d), fsal error=(%d,%d)", block_data->sbd_credential.user, block_data->sbd_credential.group, fsal_status.major, fsal_status.minor); return FALSE; } else LogDebug(COMPONENT_NLM, "FSAL Cred acquired for (uid=%d,gid=%d)", block_data->sbd_credential.user, block_data->sbd_credential.group); return TRUE; }
/** * * nfs_ip_stats_dump: Dumps the IP Stats for each client to a file per client * * @param ht_ip_stats [IN] hash table to be dumped * @param path_stat [IN] pattern used to build path used for dumping stats * * @return nothing (void function). * */ void nfs_ip_stats_dump(hash_table_t ** ht_ip_stats, unsigned int nb_worker, char *path_stat) { struct rbt_node *it; struct rbt_head *tete_rbt; hash_data_t *pdata = NULL; unsigned int i = 0; unsigned int j = 0; unsigned int k = 0; nfs_ip_stats_t *pnfs_ip_stats[NB_MAX_WORKER_THREAD]; nfs_ip_stats_t ip_stats_aggreg; // enough to hold an IPv4 or IPv6 address as a string char ipaddrbuf[40]; char ifpathdump[MAXPATHLEN]; sockaddr_t * ipaddr; time_t current_time; struct tm current_time_struct; char strdate[1024]; FILE *flushipstat = NULL; /* Do nothing if configuration disables IP_Stats */ if(nfs_param.core_param.dump_stats_per_client == 0) return; /* Compute the current time */ current_time = time(NULL); memcpy(¤t_time_struct, localtime(¤t_time), sizeof(current_time_struct)); snprintf(strdate, 1024, "%u, %.2d/%.2d/%.4d %.2d:%.2d:%.2d ", (unsigned int)current_time, current_time_struct.tm_mday, current_time_struct.tm_mon + 1, 1900 + current_time_struct.tm_year, current_time_struct.tm_hour, current_time_struct.tm_min, current_time_struct.tm_sec); /* All clients are supposed to have call at least one time worker #0 * we loop on every client in the HashTable */ for(i = 0; i < ht_ip_stats[0]->parameter.index_size; i++) { tete_rbt = &((ht_ip_stats[0]->array_rbt)[i]); RBT_LOOP(tete_rbt, it) { pdata = (hash_data_t *) it->rbt_opaq; ipaddr = (sockaddr_t *) pdata->buffkey.pdata; sprint_sockaddr(ipaddr, ipaddrbuf, sizeof(ipaddrbuf)); snprintf(ifpathdump, MAXPATHLEN, "%s/stats_nfs-%s", path_stat, ipaddrbuf); if((flushipstat = fopen(ifpathdump, "a")) == NULL) return; /* Collect stats for each worker and aggregate them */ memset(&ip_stats_aggreg, 0, sizeof(ip_stats_aggreg)); for(j = 0; j < nb_worker; j++) { if(nfs_ip_stats_get(ht_ip_stats[j], ipaddr, &pnfs_ip_stats[j]) != IP_STATS_SUCCESS) { fclose(flushipstat); return; } ip_stats_aggreg.nb_call += (pnfs_ip_stats[j])->nb_call; ip_stats_aggreg.nb_req_nfs2 += (pnfs_ip_stats[j])->nb_req_nfs2; ip_stats_aggreg.nb_req_nfs3 += (pnfs_ip_stats[j])->nb_req_nfs3; ip_stats_aggreg.nb_req_nfs4 += (pnfs_ip_stats[j])->nb_req_nfs4; ip_stats_aggreg.nb_req_mnt1 += (pnfs_ip_stats[j])->nb_req_mnt1; ip_stats_aggreg.nb_req_mnt3 += (pnfs_ip_stats[j])->nb_req_mnt3; for(k = 0; k < MNT_V1_NB_COMMAND; k++) ip_stats_aggreg.req_mnt1[k] += (pnfs_ip_stats[j])->req_mnt1[k]; for(k = 0; k < MNT_V3_NB_COMMAND; k++) ip_stats_aggreg.req_mnt3[k] += (pnfs_ip_stats[j])->req_mnt3[k]; for(k = 0; k < NFS_V2_NB_COMMAND; k++) ip_stats_aggreg.req_nfs2[k] += (pnfs_ip_stats[j])->req_nfs2[k]; for(k = 0; k < NFS_V3_NB_COMMAND; k++) ip_stats_aggreg.req_nfs3[k] += (pnfs_ip_stats[j])->req_nfs3[k]; } /* Write stats to file */ fprintf(flushipstat, "NFS/MOUNT STATISTICS,%s;%u|%u,%u,%u,%u,%u\n", strdate, ip_stats_aggreg.nb_call, ip_stats_aggreg.nb_req_mnt1, ip_stats_aggreg.nb_req_mnt3, ip_stats_aggreg.nb_req_nfs2, ip_stats_aggreg.nb_req_nfs3, ip_stats_aggreg.nb_req_nfs4); fprintf(flushipstat, "MNT V1 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_mnt1); for(k = 0; k < MNT_V1_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_mnt1[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_mnt1[MNT_V1_NB_COMMAND - 1]); fprintf(flushipstat, "MNT V3 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_mnt3); for(k = 0; k < MNT_V3_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_mnt3[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_mnt3[MNT_V3_NB_COMMAND - 1]); fprintf(flushipstat, "NFS V2 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_nfs2); for(k = 0; k < NFS_V2_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_nfs2[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_nfs2[NFS_V2_NB_COMMAND - 1]); fprintf(flushipstat, "NFS V3 REQUEST,%s;%u|", strdate, ip_stats_aggreg.nb_req_nfs3); for(k = 0; k < NFS_V3_NB_COMMAND - 1; k++) fprintf(flushipstat, "%u,", ip_stats_aggreg.req_nfs3[k]); fprintf(flushipstat, "%u\n", ip_stats_aggreg.req_nfs3[NFS_V3_NB_COMMAND - 1]); fprintf(flushipstat, "END, ----- NO MORE STATS FOR THIS PASS ----\n"); fflush(flushipstat); /* Check next client */ RBT_INCREMENT(it); fclose(flushipstat); } }
/** * @brief Find and reference a DRC to process the supplied svc_req. * * @param[in] req The svc_req being processed. * * @return The ref'd DRC if sucessfully located, else NULL. */ static /* inline */ drc_t * nfs_dupreq_get_drc(struct svc_req *req) { enum drc_type dtype = get_drc_type(req); drc_t *drc = NULL; bool drc_check_expired = false; switch (dtype) { case DRC_UDP_V234: LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC"); drc = &(drc_st->udp_drc); DRC_ST_LOCK(); (void)nfs_dupreq_ref_drc(drc); DRC_ST_UNLOCK(); goto out; case DRC_TCP_V4: case DRC_TCP_V3: /* Idempotent address, no need for lock; * xprt will be valid as long as svc_req. */ drc = (drc_t *)req->rq_xprt->xp_u2; if (drc) { /* found, no danger of removal */ LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p", drc, req->rq_xprt); PTHREAD_MUTEX_lock(&drc->mtx); /* LOCKED */ } else { drc_t drc_k; struct rbtree_x_part *t = NULL; struct opr_rbtree_node *ndrc = NULL; drc_t *tdrc = NULL; memset(&drc_k, 0, sizeof(drc_k)); drc_k.type = dtype; /* Since the drc can last longer than the xprt, * copy the address. Read operation of constant data, * no xprt lock required. */ (void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt); drc_k.d_u.tcp.hk = CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr, sizeof(sockaddr_t), 911); { char str[SOCK_NAME_MAX]; sprint_sockaddr(&drc_k.d_u.tcp.addr, str, sizeof(str)); LogFullDebug(COMPONENT_DUPREQ, "get drc for addr: %s", str); } t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t, drc_k.d_u.tcp.hk); DRC_ST_LOCK(); ndrc = opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k); if (ndrc) { /* reuse old DRC */ tdrc = opr_containerof(ndrc, drc_t, d_u.tcp.recycle_k); PTHREAD_MUTEX_lock(&tdrc->mtx); /* LOCKED */ if (tdrc->flags & DRC_FLAG_RECYCLE) { TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, tdrc, d_u.tcp.recycle_q); --(drc_st->tcp_drc_recycle_qlen); tdrc->flags &= ~DRC_FLAG_RECYCLE; } drc = tdrc; LogFullDebug(COMPONENT_DUPREQ, "recycle TCP DRC=%p for xprt=%p", tdrc, req->rq_xprt); } if (!drc) { drc = alloc_tcp_drc(dtype); LogFullDebug(COMPONENT_DUPREQ, "alloc new TCP DRC=%p for xprt=%p", drc, req->rq_xprt); /* assign addr */ memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr, sizeof(sockaddr_t)); /* assign already-computed hash */ drc->d_u.tcp.hk = drc_k.d_u.tcp.hk; PTHREAD_MUTEX_lock(&drc->mtx); /* LOCKED */ /* xprt ref */ drc->refcnt = 1; /* insert dict */ opr_rbtree_insert(&t->t, &drc->d_u.tcp.recycle_k); } DRC_ST_UNLOCK(); drc->d_u.tcp.recycle_time = 0; (void)nfs_dupreq_ref_drc(drc); /* xprt ref */ /* try to expire unused DRCs somewhat in proportion to * new connection arrivals */ drc_check_expired = true; LogFullDebug(COMPONENT_DUPREQ, "after ref drc %p refcnt==%u ", drc, drc->refcnt); /* Idempotent address, no need for lock; * set once here, never changes. * No other fields are modified. * Assumes address stores are atomic. */ req->rq_xprt->xp_u2 = (void *)drc; } break; default: /* XXX error */ break; } /* call path ref */ (void)nfs_dupreq_ref_drc(drc); PTHREAD_MUTEX_unlock(&drc->mtx); if (drc_check_expired) drc_free_expired(); out: return drc; }
int nfs_ip_name_add(sockaddr_t *ipaddr, char *hostname) { hash_buffer_t buffkey; hash_buffer_t buffdata; nfs_ip_name_t *nfs_ip_name = NULL; sockaddr_t *pipaddr = NULL; struct timeval tv0, tv1, dur; int rc; char ipstring[SOCK_NAME_MAX]; nfs_ip_name = gsh_malloc(sizeof(nfs_ip_name_t)); if(nfs_ip_name == NULL) return IP_NAME_INSERT_MALLOC_ERROR; pipaddr = gsh_malloc(sizeof(sockaddr_t)); if(pipaddr == NULL) { gsh_free(nfs_ip_name); return IP_NAME_INSERT_MALLOC_ERROR; } /* I have to keep an integer as key, I wil use the pointer buffkey->pdata for this, * this also means that buffkey->len will be 0 */ memcpy(pipaddr, ipaddr, sizeof(sockaddr_t)); buffkey.pdata = (caddr_t) pipaddr; buffkey.len = sizeof(sockaddr_t); gettimeofday(&tv0, NULL) ; rc = getnameinfo((struct sockaddr *)pipaddr, sizeof(sockaddr_t), nfs_ip_name->hostname, sizeof(nfs_ip_name->hostname), NULL, 0, 0); gettimeofday(&tv1, NULL) ; timersub(&tv1, &tv0, &dur) ; sprint_sockaddr(pipaddr, ipstring, sizeof(ipstring)); /* display warning if DNS resolution took more that 1.0s */ if (dur.tv_sec >= 1) { LogEvent(COMPONENT_DISPATCH, "Warning: long DNS query for %s: %u.%06u sec", ipstring, (unsigned int)dur.tv_sec, (unsigned int)dur.tv_usec ); } /* Ask for the name to be cached */ if(rc != 0) { LogEvent(COMPONENT_DISPATCH, "Cannot resolve address %s, error %s", ipstring, gai_strerror(rc)); gsh_free(nfs_ip_name); gsh_free(pipaddr); return IP_NAME_NETDB_ERROR; } LogDebug(COMPONENT_DISPATCH, "Inserting %s->%s to addr cache", ipstring, nfs_ip_name->hostname); /* I build the data with the request pointer that should be in state 'IN USE' */ nfs_ip_name->timestamp = time(NULL); buffdata.pdata = (caddr_t) nfs_ip_name; buffdata.len = sizeof(nfs_ip_name_t); if(HashTable_Set(ht_ip_name, &buffkey, &buffdata) != HASHTABLE_SUCCESS) return IP_NAME_INSERT_MALLOC_ERROR; /* Copy the value for the caller */ strncpy(hostname, nfs_ip_name->hostname, MAXHOSTNAMELEN); return IP_NAME_SUCCESS; } /* nfs_ip_name_add */
/** * @brief Find and reference a DRC to process the supplied svc_req. * * @param[in] req The svc_req being processed. * * @return The ref'd DRC if sucessfully located, else NULL. */ static /* inline */ drc_t * nfs_dupreq_get_drc(struct svc_req *req) { enum drc_type dtype = get_drc_type(req); gsh_xprt_private_t *xu = (gsh_xprt_private_t *) req->rq_xprt->xp_u1; drc_t *drc = NULL; bool drc_check_expired = false; switch (dtype) { case DRC_UDP_V234: LogFullDebug(COMPONENT_DUPREQ, "ref shared UDP DRC"); drc = &(drc_st->udp_drc); DRC_ST_LOCK(); (void)nfs_dupreq_ref_drc(drc); DRC_ST_UNLOCK(); goto out; break; case DRC_TCP_V4: case DRC_TCP_V3: pthread_mutex_lock(&req->rq_xprt->xp_lock); if (xu->drc) { drc = xu->drc; LogFullDebug(COMPONENT_DUPREQ, "ref DRC=%p for xprt=%p", drc, req->rq_xprt); pthread_mutex_lock(&drc->mtx); /* LOCKED */ } else { drc_t drc_k; struct rbtree_x_part *t = NULL; struct opr_rbtree_node *ndrc = NULL; drc_t *tdrc = NULL; memset(&drc_k, 0, sizeof(drc_k)); drc_k.type = dtype; (void)copy_xprt_addr(&drc_k.d_u.tcp.addr, req->rq_xprt); drc_k.d_u.tcp.hk = CityHash64WithSeed((char *)&drc_k.d_u.tcp.addr, sizeof(sockaddr_t), 911); { char str[512]; sprint_sockaddr(&drc_k.d_u.tcp.addr, str, 512); LogFullDebug(COMPONENT_DUPREQ, "get drc for addr: %s", str); } t = rbtx_partition_of_scalar(&drc_st->tcp_drc_recycle_t, drc_k.d_u.tcp.hk); DRC_ST_LOCK(); ndrc = opr_rbtree_lookup(&t->t, &drc_k.d_u.tcp.recycle_k); if (ndrc) { /* reuse old DRC */ tdrc = opr_containerof(ndrc, drc_t, d_u.tcp.recycle_k); pthread_mutex_lock(&tdrc->mtx); /* LOCKED */ if (tdrc->flags & DRC_FLAG_RECYCLE) { TAILQ_REMOVE(&drc_st->tcp_drc_recycle_q, tdrc, d_u.tcp.recycle_q); --(drc_st->tcp_drc_recycle_qlen); tdrc->flags &= ~DRC_FLAG_RECYCLE; } drc = tdrc; LogFullDebug(COMPONENT_DUPREQ, "recycle TCP DRC=%p for xprt=%p", tdrc, req->rq_xprt); } if (!drc) { drc = alloc_tcp_drc(dtype); LogFullDebug(COMPONENT_DUPREQ, "alloc new TCP DRC=%p for xprt=%p", drc, req->rq_xprt); /* assign addr */ memcpy(&drc->d_u.tcp.addr, &drc_k.d_u.tcp.addr, sizeof(sockaddr_t)); /* assign already-computed hash */ drc->d_u.tcp.hk = drc_k.d_u.tcp.hk; pthread_mutex_lock(&drc->mtx); /* LOCKED */ /* xprt ref */ drc->refcnt = 1; /* insert dict */ opr_rbtree_insert(&t->t, &drc->d_u.tcp.recycle_k); } DRC_ST_UNLOCK(); drc->d_u.tcp.recycle_time = 0; /* xprt drc */ (void)nfs_dupreq_ref_drc(drc); /* xu ref */ /* try to expire unused DRCs somewhat in proportion to * new connection arrivals */ drc_check_expired = true; LogFullDebug(COMPONENT_DUPREQ, "after ref drc %p refcnt==%u ", drc, drc->refcnt); xu->drc = drc; } pthread_mutex_unlock(&req->rq_xprt->xp_lock); break; default: /* XXX error */ break; } /* call path ref */ (void)nfs_dupreq_ref_drc(drc); pthread_mutex_unlock(&drc->mtx); if (drc_check_expired) drc_free_expired(); out: return drc; }
static bool_t Svcauth_gss_accept_sec_context(struct svc_req *rqst, struct rpc_gss_init_res *gr) { struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; gss_buffer_desc recv_tok, seqbuf; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq; gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gr, 0, sizeof(*gr)); /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if(!svc_getargs(rqst->rq_xprt, (xdrproc_t)xdr_rpc_gss_init_args, (caddr_t) & recv_tok)) return (FALSE); gr->gr_major = gss_accept_sec_context(&gr->gr_minor, &gd->ctx, svcauth_gss_creds, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &gd->client_name, &mech, &gr->gr_token, &ret_flags, NULL, NULL); svc_freeargs(rqst->rq_xprt, (xdrproc_t)xdr_rpc_gss_init_args, (caddr_t) & recv_tok); if(gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { sockaddr_t addr; char ipstring[SOCK_NAME_MAX]; copy_xprt_addr(&addr, rqst->rq_xprt); sprint_sockaddr(&addr, ipstring, sizeof(ipstring)); LogWarn(COMPONENT_RPCSEC_GSS, "Bad authentication major=%u minor=%u addr=%s", gr->gr_major, gr->gr_minor, ipstring); gd->ctx = GSS_C_NO_CONTEXT; goto errout; } /* * ANDROS: krb5 mechglue returns ctx of size 8 - two pointers, * one to the mechanism oid, one to the internal_ctx_id */ if((gr->gr_ctx.value = Mem_Alloc(sizeof(gss_union_ctx_id_desc))) == NULL) { LogCrit(COMPONENT_RPCSEC_GSS, "svcauth_gss_accept_context: out of memory"); goto errout; } memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc)); gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc); /* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */ gr->gr_win = sizeof(gd->seqmask) * 8; /* Save client info. */ gd->sec.mech = mech; gd->sec.qop = GSS_C_QOP_DEFAULT; gd->sec.svc = gc->gc_svc; gd->seq = gc->gc_seq; gd->win = gr->gr_win; if(gr->gr_major == GSS_S_COMPLETE) { #ifdef SPKM /* spkm3: no src_name (anonymous) */ if(!g_OID_equal(gss_mech_spkm3, mech)) { #endif maj_stat = gss_display_name(&min_stat, gd->client_name, &gd->cname, &gd->sec.mech); LogFullDebug(COMPONENT_RPCSEC_GSS, "cname.val: %s cname.len: %d", (char *)gd->cname.value, (int)gd->cname.length); #ifdef SPKM } #endif if(maj_stat != GSS_S_COMPLETE) { } #ifdef HAVE_HEIMDAL #else if(isFullDebug(COMPONENT_RPCSEC_GSS)) { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); gss_release_buffer(&min_stat, &mechname); } #endif seq = htonl(gr->gr_win); seqbuf.value = &seq; seqbuf.length = sizeof(seq); gss_release_buffer(&min_stat, &gd->checksum); LogFullDebug(COMPONENT_RPCSEC_GSS, "gss_sign in sec_accept_context"); maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT, &seqbuf, &gd->checksum); if(maj_stat != GSS_S_COMPLETE) { goto errout; } rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS; rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value; rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length; } return (TRUE); errout: gss_release_buffer(&min_stat, &gr->gr_token); return (FALSE); }