/** * @brief Allocate a duplicate request cache * * @param[in] dtype Style DRC to allocate (e.g., TCP, by enum drc_type) * @param[in] maxsz Upper bound on requests to cache * @param[in] cachesz Number of entries in the closed hash partition * @param[in] flags DRC flags * * @return the drc, if successfully allocated, else NULL. */ static inline drc_t *alloc_tcp_drc(enum drc_type dtype) { drc_t *drc = pool_alloc(tcp_drc_pool, NULL); int ix, code __attribute__ ((unused)) = 0; if (unlikely(!drc)) { LogCrit(COMPONENT_DUPREQ, "alloc TCP DRC failed"); goto out; } drc->type = dtype; /* DRC_TCP_V3 or DRC_TCP_V4 */ drc->refcnt = 0; drc->retwnd = 0; drc->d_u.tcp.recycle_time = 0; drc->maxsize = nfs_param.core_param.drc.tcp.size; drc->cachesz = nfs_param.core_param.drc.tcp.cachesz; drc->npart = nfs_param.core_param.drc.tcp.npart; drc->hiwat = nfs_param.core_param.drc.udp.hiwat; PTHREAD_MUTEX_init(&drc->mtx, NULL); /* init dict */ code = rbtx_init(&drc->xt, dupreq_tcp_cmpf, drc->npart, RBT_X_FLAG_ALLOC | RBT_X_FLAG_CACHE_WT); assert(!code); /* completed requests */ TAILQ_INIT(&drc->dupreq_q); /* recycling DRC */ TAILQ_INIT_ENTRY(drc, d_u.tcp.recycle_q); /* init "cache" partition */ for (ix = 0; ix < drc->npart; ++ix) { struct rbtree_x_part *xp = &(drc->xt.tree[ix]); drc->xt.cachesz = drc->cachesz; xp->cache = gsh_calloc(drc->cachesz, sizeof(struct opr_rbtree_node *)); if (unlikely(!xp->cache)) { LogCrit(COMPONENT_DUPREQ, "TCP DRC hash partition allocation failed (ix=%d)", ix); drc->cachesz = 0; break; } } out: return drc; }
/* Allocate and fill in group_data structure */ static struct group_data *uid2grp_allocate_by_name( const struct gsh_buffdesc *name) { struct passwd p; struct passwd *pp; char *namebuff = alloca(name->len + 1); struct group_data *gdata = NULL; char *buff; long buff_size; memcpy(namebuff, name->addr, name->len); *(namebuff + name->len) = '\0'; buff_size = sysconf(_SC_GETPW_R_SIZE_MAX); if (buff_size == -1) { LogMajor(COMPONENT_IDMAPPER, "sysconf failure: %d", errno); return NULL; } buff = alloca(buff_size); if ((getpwnam_r(namebuff, &p, buff, buff_size, &pp) != 0) || (pp == NULL)) { LogEvent(COMPONENT_IDMAPPER, "getpwnam_r %s failed", namebuff); return gdata; } gdata = gsh_malloc(sizeof(struct group_data) + strlen(p.pw_name)); if (gdata == NULL) { LogEvent(COMPONENT_IDMAPPER, "failed to allocate group data"); return gdata; } gdata->uname.len = strlen(p.pw_name); gdata->uname.addr = (char *)gdata + sizeof(struct group_data); memcpy(gdata->uname.addr, p.pw_name, gdata->uname.len); gdata->uid = p.pw_uid; gdata->gid = p.pw_gid; if (!my_getgrouplist_alloc(p.pw_name, p.pw_gid, gdata)) { gsh_free(gdata); return NULL; } PTHREAD_MUTEX_init(&gdata->lock, NULL); gdata->epoch = time(NULL); gdata->refcount = 0; return gdata; }
int nfs4_op_create_session(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Result of looking up the clientid in the confirmed ID table */ nfs_client_id_t *conf = NULL; /* XXX these are not good names */ /* Result of looking up the clientid in the unconfirmed ID table */ nfs_client_id_t *unconf = NULL; /* The found clientid (either one of the preceding) */ nfs_client_id_t *found = NULL; /* The found client record */ nfs_client_record_t *client_record; /* The created session */ nfs41_session_t *nfs41_session = NULL; /* Client supplied clientid */ clientid4 clientid = 0; /* The client address as a string, for gratuitous logging */ const char *str_client_addr = "(unknown)"; /* The client name, for gratuitous logging */ char str_client[CLIENTNAME_BUFSIZE]; /* Display buffer for client name */ struct display_buffer dspbuf_client = { sizeof(str_client), str_client, str_client }; /* The clientid4 broken down into fields */ char str_clientid4[DISPLAY_CLIENTID_SIZE]; /* Display buffer for clientid4 */ struct display_buffer dspbuf_clientid4 = { sizeof(str_clientid4), str_clientid4, str_clientid4 }; /* Return code from clientid calls */ int i, rc = 0; /* Component for logging */ log_components_t component = COMPONENT_CLIENTID; /* Abbreviated alias for arguments */ CREATE_SESSION4args * const arg_CREATE_SESSION4 = &op->nfs_argop4_u.opcreate_session; /* Abbreviated alias for response */ CREATE_SESSION4res * const res_CREATE_SESSION4 = &resp->nfs_resop4_u.opcreate_session; /* Abbreviated alias for successful response */ CREATE_SESSION4resok * const res_CREATE_SESSION4ok = &res_CREATE_SESSION4->CREATE_SESSION4res_u.csr_resok4; /* Make sure str_client is always printable even * if log level changes midstream. */ display_printf(&dspbuf_client, "(unknown)"); display_reset_buffer(&dspbuf_client); if (op_ctx->client != NULL) str_client_addr = op_ctx->client->hostaddr_str; if (isDebug(COMPONENT_SESSIONS)) component = COMPONENT_SESSIONS; resp->resop = NFS4_OP_CREATE_SESSION; res_CREATE_SESSION4->csr_status = NFS4_OK; clientid = arg_CREATE_SESSION4->csa_clientid; display_clientid(&dspbuf_clientid4, clientid); if (data->minorversion == 0) return res_CREATE_SESSION4->csr_status = NFS4ERR_INVAL; LogDebug(component, "CREATE_SESSION client addr=%s clientid=%s -------------------", str_client_addr, str_clientid4); /* First try to look up unconfirmed record */ rc = nfs_client_id_get_unconfirmed(clientid, &unconf); if (rc == CLIENT_ID_SUCCESS) { client_record = unconf->cid_client_record; found = unconf; } else { rc = nfs_client_id_get_confirmed(clientid, &conf); if (rc != CLIENT_ID_SUCCESS) { /* No record whatsoever of this clientid */ LogDebug(component, "%s clientid=%s", clientid_error_to_str(rc), str_clientid4); if (rc == CLIENT_ID_EXPIRED) rc = CLIENT_ID_STALE; res_CREATE_SESSION4->csr_status = clientid_error_to_nfsstat_no_expire(rc); return res_CREATE_SESSION4->csr_status; } client_record = conf->cid_client_record; found = conf; } PTHREAD_MUTEX_lock(&client_record->cr_mutex); inc_client_record_ref(client_record); if (isFullDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_record(&dspbuf, client_record); LogFullDebug(component, "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p", str, client_record->cr_confirmed_rec, client_record->cr_unconfirmed_rec); } /* At this point one and only one of conf and unconf is * non-NULL, and found also references the single clientid * record that was found. */ LogDebug(component, "CREATE_SESSION clientid=%s csa_sequence=%" PRIu32 " clientid_cs_seq=%" PRIu32 " data_oppos=%d data_use_drc=%d", str_clientid4, arg_CREATE_SESSION4->csa_sequence, found->cid_create_session_sequence, data->oppos, data->use_drc); if (isFullDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_id_rec(&dspbuf, found); LogFullDebug(component, "Found %s", str); } data->use_drc = false; if (data->oppos == 0) { /* Special case : the request is used without use of * OP_SEQUENCE */ if ((arg_CREATE_SESSION4->csa_sequence + 1 == found->cid_create_session_sequence) && (found->cid_create_session_slot.cache_used)) { data->use_drc = true; data->cached_res = &found->cid_create_session_slot.cached_result; res_CREATE_SESSION4->csr_status = NFS4_OK; dec_client_id_ref(found); LogDebug(component, "CREATE_SESSION replay=%p special case", data->cached_res); goto out; } else if (arg_CREATE_SESSION4->csa_sequence != found->cid_create_session_sequence) { res_CREATE_SESSION4->csr_status = NFS4ERR_SEQ_MISORDERED; dec_client_id_ref(found); LogDebug(component, "CREATE_SESSION returning NFS4ERR_SEQ_MISORDERED"); goto out; } } if (unconf != NULL) { /* First must match principal */ if (!nfs_compare_clientcred(&unconf->cid_credential, &data->credential)) { if (isDebug(component)) { char *unconfirmed_addr = "(unknown)"; if (unconf->gsh_client != NULL) unconfirmed_addr = unconf->gsh_client->hostaddr_str; LogDebug(component, "Unconfirmed ClientId %s->'%s': Principals do not match... unconfirmed addr=%s Return NFS4ERR_CLID_INUSE", str_clientid4, str_client_addr, unconfirmed_addr); } dec_client_id_ref(unconf); res_CREATE_SESSION4->csr_status = NFS4ERR_CLID_INUSE; goto out; } } if (conf != NULL) { if (isDebug(component) && conf != NULL) display_clientid_name(&dspbuf_client, conf); /* First must match principal */ if (!nfs_compare_clientcred(&conf->cid_credential, &data->credential)) { if (isDebug(component)) { char *confirmed_addr = "(unknown)"; if (conf->gsh_client != NULL) confirmed_addr = conf->gsh_client->hostaddr_str; LogDebug(component, "Confirmed ClientId %s->%s addr=%s: Principals do not match... confirmed addr=%s Return NFS4ERR_CLID_INUSE", str_clientid4, str_client, str_client_addr, confirmed_addr); } /* Release our reference to the confirmed clientid. */ dec_client_id_ref(conf); res_CREATE_SESSION4->csr_status = NFS4ERR_CLID_INUSE; goto out; } /* In this case, the record was confirmed proceed with CREATE_SESSION */ } /* We don't need to do any further principal checks, we can't * have a confirmed clientid record with a different principal * than the unconfirmed record. */ /* At this point, we need to try and create the session before * we modify the confirmed and/or unconfirmed clientid * records. */ /* Check flags value (test CSESS15) */ if (arg_CREATE_SESSION4->csa_flags & ~(CREATE_SESSION4_FLAG_PERSIST | CREATE_SESSION4_FLAG_CONN_BACK_CHAN | CREATE_SESSION4_FLAG_CONN_RDMA)) { LogDebug(component, "Invalid create session flags %" PRIu32, arg_CREATE_SESSION4->csa_flags); dec_client_id_ref(found); res_CREATE_SESSION4->csr_status = NFS4ERR_INVAL; goto out; } /* Record session related information at the right place */ nfs41_session = pool_alloc(nfs41_session_pool); if (nfs41_session == NULL) { LogCrit(component, "Could not allocate memory for a session"); dec_client_id_ref(found); res_CREATE_SESSION4->csr_status = NFS4ERR_SERVERFAULT; goto out; } nfs41_session->clientid = clientid; nfs41_session->clientid_record = found; nfs41_session->refcount = 2; /* sentinel ref + call path ref */ nfs41_session->fore_channel_attrs = arg_CREATE_SESSION4->csa_fore_chan_attrs; nfs41_session->back_channel_attrs = arg_CREATE_SESSION4->csa_back_chan_attrs; nfs41_session->xprt = data->req->rq_xprt; nfs41_session->flags = false; nfs41_session->cb_program = 0; PTHREAD_MUTEX_init(&nfs41_session->cb_mutex, NULL); PTHREAD_COND_init(&nfs41_session->cb_cond, NULL); for (i = 0; i < NFS41_NB_SLOTS; i++) PTHREAD_MUTEX_init(&nfs41_session->slots[i].lock, NULL); /* Take reference to clientid record on behalf the session. */ inc_client_id_ref(found); /* add to head of session list (encapsulate?) */ PTHREAD_MUTEX_lock(&found->cid_mutex); glist_add(&found->cid_cb.v41.cb_session_list, &nfs41_session->session_link); PTHREAD_MUTEX_unlock(&found->cid_mutex); /* Set ca_maxrequests */ nfs41_session->fore_channel_attrs.ca_maxrequests = NFS41_NB_SLOTS; nfs41_Build_sessionid(&clientid, nfs41_session->session_id); res_CREATE_SESSION4ok->csr_sequence = arg_CREATE_SESSION4->csa_sequence; /* return the input for wanting of something better (will * change in later versions) */ res_CREATE_SESSION4ok->csr_fore_chan_attrs = nfs41_session->fore_channel_attrs; res_CREATE_SESSION4ok->csr_back_chan_attrs = nfs41_session->back_channel_attrs; res_CREATE_SESSION4ok->csr_flags = 0; memcpy(res_CREATE_SESSION4ok->csr_sessionid, nfs41_session->session_id, NFS4_SESSIONID_SIZE); /* Create Session replay cache */ data->cached_res = &found->cid_create_session_slot.cached_result; found->cid_create_session_slot.cache_used = true; LogDebug(component, "CREATE_SESSION replay=%p", data->cached_res); if (!nfs41_Session_Set(nfs41_session)) { LogDebug(component, "Could not insert session into table"); /* Release the session resources by dropping our reference * and the sentinel reference. */ dec_session_ref(nfs41_session); dec_session_ref(nfs41_session); /* Decrement our reference to the clientid record */ dec_client_id_ref(found); /* Maybe a more precise status would be better */ res_CREATE_SESSION4->csr_status = NFS4ERR_SERVERFAULT; goto out; } /* Make sure we have a reference to the confirmed clientid record if any */ if (conf == NULL) { conf = client_record->cr_confirmed_rec; if (isDebug(component) && conf != NULL) display_clientid_name(&dspbuf_client, conf); /* Need a reference to the confirmed record for below */ if (conf != NULL) inc_client_id_ref(conf); } if (conf != NULL && conf->cid_clientid != clientid) { /* Old confirmed record - need to expire it */ if (isDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_id_rec(&dspbuf, conf); LogDebug(component, "Expiring %s", str); } /* Expire clientid and release our reference. */ nfs_client_id_expire(conf, false); dec_client_id_ref(conf); conf = NULL; } if (conf != NULL) { /* At this point we are updating the confirmed * clientid. Update the confirmed record from the * unconfirmed record. */ display_clientid(&dspbuf_clientid4, conf->cid_clientid); LogDebug(component, "Updating clientid %s->%s cb_program=%u", str_clientid4, str_client, arg_CREATE_SESSION4->csa_cb_program); if (unconf != NULL) { /* unhash the unconfirmed clientid record */ remove_unconfirmed_client_id(unconf); /* Release our reference to the unconfirmed entry */ dec_client_id_ref(unconf); } if (isDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_id_rec(&dspbuf, conf); LogDebug(component, "Updated %s", str); } } else { /* This is a new clientid */ if (isFullDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_id_rec(&dspbuf, unconf); LogFullDebug(component, "Confirming new %s", str); } rc = nfs_client_id_confirm(unconf, component); if (rc != CLIENT_ID_SUCCESS) { res_CREATE_SESSION4->csr_status = clientid_error_to_nfsstat_no_expire(rc); /* Need to destroy the session */ if (!nfs41_Session_Del(nfs41_session->session_id)) LogDebug(component, "Oops nfs41_Session_Del failed"); /* Release our reference to the unconfirmed record */ dec_client_id_ref(unconf); goto out; } nfs4_chk_clid(unconf); conf = unconf; unconf = NULL; if (isDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_id_rec(&dspbuf, conf); LogDebug(component, "Confirmed %s", str); } } conf->cid_create_session_sequence++; /* Bump the lease timer */ conf->cid_last_renew = time(NULL); /* Release our reference to the confirmed record */ dec_client_id_ref(conf); if (isFullDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_client_record(&dspbuf, client_record); LogFullDebug(component, "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p", str, client_record->cr_confirmed_rec, client_record->cr_unconfirmed_rec); } /* Handle the creation of the back channel, if the client requested one. */ if (arg_CREATE_SESSION4->csa_flags & CREATE_SESSION4_FLAG_CONN_BACK_CHAN) { nfs41_session->cb_program = arg_CREATE_SESSION4->csa_cb_program; if (nfs_rpc_create_chan_v41( nfs41_session, arg_CREATE_SESSION4->csa_sec_parms.csa_sec_parms_len, arg_CREATE_SESSION4->csa_sec_parms.csa_sec_parms_val) == 0) { res_CREATE_SESSION4ok->csr_flags |= CREATE_SESSION4_FLAG_CONN_BACK_CHAN; } } if (isDebug(component)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_session(&dspbuf, nfs41_session); LogDebug(component, "success %s csa_flags 0x%X csr_flags 0x%X", str, arg_CREATE_SESSION4->csa_flags, res_CREATE_SESSION4ok->csr_flags); } /* Release our reference to the session */ dec_session_ref(nfs41_session); /* Successful exit */ res_CREATE_SESSION4->csr_status = NFS4_OK; out: PTHREAD_MUTEX_unlock(&client_record->cr_mutex); /* Release our reference to the client record and return */ dec_client_record_ref(client_record); return res_CREATE_SESSION4->csr_status; }
/** * @brief Get an NSM client * * @param[in] care Care status * @param[in] xprt RPC transport * @param[in] caller_name Caller name * * @return NSM client or NULL. */ state_nsm_client_t *get_nsm_client(care_t care, SVCXPRT *xprt, char *caller_name) { state_nsm_client_t key; state_nsm_client_t *pclient; char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; struct hash_latch latch; hash_error_t rc; struct gsh_buffdesc buffkey; struct gsh_buffdesc buffval; if (caller_name == NULL) return NULL; memset(&key, 0, sizeof(key)); if (nfs_param.core_param.nsm_use_caller_name) { key.ssc_nlm_caller_name_len = strlen(caller_name); if (key.ssc_nlm_caller_name_len > LM_MAXSTRLEN) return NULL; key.ssc_nlm_caller_name = caller_name; } else if (op_ctx->client == NULL) { LogCrit(COMPONENT_STATE, "No gsh_client for caller_name %s", caller_name); return NULL; } else { key.ssc_nlm_caller_name = op_ctx->client->hostaddr_str; key.ssc_nlm_caller_name_len = strlen(key.ssc_nlm_caller_name); key.ssc_client = op_ctx->client; } if (isFullDebug(COMPONENT_STATE)) { display_nsm_client(&dspbuf, &key); LogFullDebug(COMPONENT_STATE, "Find {%s}", str); } buffkey.addr = &key; buffkey.len = sizeof(key); rc = hashtable_getlatch(ht_nsm_client, &buffkey, &buffval, true, &latch); /* If we found it, return it */ if (rc == HASHTABLE_SUCCESS) { pclient = buffval.addr; /* Return the found NSM Client */ if (isFullDebug(COMPONENT_STATE)) { display_nsm_client(&dspbuf, pclient); LogFullDebug(COMPONENT_STATE, "Found {%s}", str); } /* Increment refcount under hash latch. * This prevents dec ref from removing this entry from hash * if a race occurs. */ inc_nsm_client_ref(pclient); hashtable_releaselatched(ht_nsm_client, &latch); if (care == CARE_MONITOR && !nsm_monitor(pclient)) { dec_nsm_client_ref(pclient); pclient = NULL; } return pclient; } /* An error occurred, return NULL */ if (rc != HASHTABLE_ERROR_NO_SUCH_KEY) { display_nsm_client(&dspbuf, &key); LogCrit(COMPONENT_STATE, "Error %s, could not find {%s}", hash_table_err_to_str(rc), str); return NULL; } /* Not found, but we don't care, return NULL */ if (care == CARE_NOT) { /* Return the found NSM Client */ if (isFullDebug(COMPONENT_STATE)) { display_nsm_client(&dspbuf, &key); LogFullDebug(COMPONENT_STATE, "Ignoring {%s}", str); } hashtable_releaselatched(ht_nsm_client, &latch); return NULL; } pclient = gsh_malloc(sizeof(*pclient)); if (pclient == NULL) { display_nsm_client(&dspbuf, &key); LogCrit(COMPONENT_STATE, "No memory for {%s}", str); return NULL; } /* Copy everything over */ memcpy(pclient, &key, sizeof(key)); PTHREAD_MUTEX_init(&pclient->ssc_mutex, NULL); pclient->ssc_nlm_caller_name = gsh_strdup(key.ssc_nlm_caller_name); if (pclient->ssc_nlm_caller_name == NULL) { /* Discard the created client */ PTHREAD_MUTEX_destroy(&pclient->ssc_mutex); free_nsm_client(pclient); return NULL; } glist_init(&pclient->ssc_lock_list); glist_init(&pclient->ssc_share_list); pclient->ssc_refcount = 1; if (op_ctx->client != NULL) { pclient->ssc_client = op_ctx->client; inc_gsh_client_refcount(op_ctx->client); } if (isFullDebug(COMPONENT_STATE)) { display_nsm_client(&dspbuf, pclient); LogFullDebug(COMPONENT_STATE, "New {%s}", str); } buffkey.addr = pclient; buffkey.len = sizeof(*pclient); buffval.addr = pclient; buffval.len = sizeof(*pclient); rc = hashtable_setlatched(ht_nsm_client, &buffval, &buffval, &latch, false, NULL, NULL); /* An error occurred, return NULL */ if (rc != HASHTABLE_SUCCESS) { display_nsm_client(&dspbuf, pclient); LogCrit(COMPONENT_STATE, "Error %s, inserting {%s}", hash_table_err_to_str(rc), str); PTHREAD_MUTEX_destroy(&pclient->ssc_mutex); free_nsm_client(pclient); return NULL; } if (care != CARE_MONITOR || nsm_monitor(pclient)) return pclient; /* Failed to monitor, release client reference * and almost certainly remove it from the hash table. */ dec_nsm_client_ref(pclient); return NULL; }
/** * @brief adds a new state to a file * * This version of the function does not take the state lock on the * entry. It exists to allow callers to integrate state into a larger * operation. * * The caller may have already allocated a state, in which case state * need not be NULL. * * @note state_lock MUST be held for write * * @param[in,out] obj file to operate on * @param[in] state_type State to be defined * @param[in] state_data Data related to this state * @param[in] owner_input Related open_owner * @param[in,out] state The new state * @param[in] refer Reference to compound creating state * * @return Operation status */ state_status_t state_add_impl(struct fsal_obj_handle *obj, enum state_type state_type, union state_data *state_data, state_owner_t *owner_input, state_t **state, struct state_refer *refer) { state_t *pnew_state = *state; struct state_hdl *ostate = obj->state_hdl; char str[DISPLAY_STATEID_OTHER_SIZE]; struct display_buffer dspbuf = {sizeof(str), str, str}; bool str_valid = false; bool got_export_ref = false; state_status_t status = 0; bool mutex_init = false; struct state_t *openstate = NULL; struct gsh_buffdesc fh_desc; if (isFullDebug(COMPONENT_STATE) && pnew_state != NULL) { display_stateid(&dspbuf, pnew_state); LogFullDebug(COMPONENT_STATE, "pnew_state=%s", str); display_reset_buffer(&dspbuf); } /* Attempt to get a reference to the export. */ if (!export_ready(op_ctx->ctx_export)) { /* If we could not get a reference, return stale. * Release attr_lock */ LogDebug(COMPONENT_STATE, "Stale export"); status = STATE_ESTALE; goto errout; } get_gsh_export_ref(op_ctx->ctx_export); got_export_ref = true; if (pnew_state == NULL) { if (state_type == STATE_TYPE_LOCK) openstate = state_data->lock.openstate; pnew_state = op_ctx->fsal_export->exp_ops.alloc_state( op_ctx->fsal_export, state_type, openstate); } PTHREAD_MUTEX_init(&pnew_state->state_mutex, NULL); mutex_init = true; /* Add the stateid.other, this will increment cid_stateid_counter */ nfs4_BuildStateId_Other(owner_input->so_owner.so_nfs4_owner. so_clientrec, pnew_state->stateid_other); /* Set the type and data for this state */ memcpy(&(pnew_state->state_data), state_data, sizeof(*state_data)); pnew_state->state_type = state_type; pnew_state->state_seqid = 0; /* will be incremented to 1 later */ pnew_state->state_refcount = 2; /* sentinel plus returned ref */ if (refer) pnew_state->state_refer = *refer; if (isFullDebug(COMPONENT_STATE)) { display_stateid_other(&dspbuf, pnew_state->stateid_other); str_valid = true; LogFullDebug(COMPONENT_STATE, "About to call nfs4_State_Set for %s", str); } glist_init(&pnew_state->state_list); /* We need to initialize state_owner, state_export, and state_obj now so * that the state can be indexed by owner/entry. We don't insert into * lists and take references yet since no one else can see this state * until we are completely done since we hold the state_lock. Might as * well grab export now also... */ pnew_state->state_export = op_ctx->ctx_export; pnew_state->state_owner = owner_input; fh_desc.addr = &pnew_state->state_obj.digest; fh_desc.len = sizeof(pnew_state->state_obj.digest); obj->obj_ops.handle_digest(obj, FSAL_DIGEST_NFSV4, &fh_desc); pnew_state->state_obj.len = fh_desc.len; /* Add the state to the related hashtable */ if (!nfs4_State_Set(pnew_state)) { if (!str_valid) display_stateid_other(&dspbuf, pnew_state->stateid_other); LogCrit(COMPONENT_STATE, "Can't create a new state id %s for the obj %p (F)", str, obj); /* Return STATE_MALLOC_ERROR since most likely the * nfs4_State_Set failed to allocate memory. */ status = STATE_MALLOC_ERROR; goto errout; } /* Each of the following blocks takes the state_mutex and releases it * because we always want state_mutex to be the last lock taken. * * NOTE: We don't have to worry about state_del/state_del_locked being * called in the midst of things because the state_lock is held. */ /* Attach this to an export */ PTHREAD_RWLOCK_wrlock(&op_ctx->ctx_export->lock); PTHREAD_MUTEX_lock(&pnew_state->state_mutex); glist_add_tail(&op_ctx->ctx_export->exp_state_list, &pnew_state->state_export_list); PTHREAD_MUTEX_unlock(&pnew_state->state_mutex); PTHREAD_RWLOCK_unlock(&op_ctx->ctx_export->lock); /* Add state to list for file */ PTHREAD_MUTEX_lock(&pnew_state->state_mutex); glist_add_tail(&ostate->file.list_of_states, &pnew_state->state_list); PTHREAD_MUTEX_unlock(&pnew_state->state_mutex); /* Add state to list for owner */ PTHREAD_MUTEX_lock(&owner_input->so_mutex); PTHREAD_MUTEX_lock(&pnew_state->state_mutex); inc_state_owner_ref(owner_input); glist_add_tail(&owner_input->so_owner.so_nfs4_owner.so_state_list, &pnew_state->state_owner_list); PTHREAD_MUTEX_unlock(&pnew_state->state_mutex); PTHREAD_MUTEX_unlock(&owner_input->so_mutex); #ifdef DEBUG_SAL PTHREAD_MUTEX_lock(&all_state_v4_mutex); glist_add_tail(&state_v4_all, &pnew_state->state_list_all); PTHREAD_MUTEX_unlock(&all_state_v4_mutex); #endif if (pnew_state->state_type == STATE_TYPE_DELEG && pnew_state->state_data.deleg.sd_type == OPEN_DELEGATE_WRITE) ostate->file.write_delegated = true; /* Copy the result */ *state = pnew_state; if (str_valid) LogFullDebug(COMPONENT_STATE, "Add State: %p: %s", pnew_state, str); /* Regular exit */ status = STATE_SUCCESS; return status; errout: if (mutex_init) PTHREAD_MUTEX_destroy(&pnew_state->state_mutex); if (pnew_state != NULL) { /* Make sure the new state is closed (may have been passed in * with file open). */ (void) obj->obj_ops.close2(obj, pnew_state); pnew_state->state_exp->exp_ops.free_state(pnew_state); } if (got_export_ref) put_gsh_export(op_ctx->ctx_export); *state = NULL; return status; } /* state_add */
void *_9p_socket_thread(void *Arg) { long int tcp_sock = (long int)Arg; int rc = -1; struct pollfd fds[1]; int fdcount = 1; static char my_name[MAXNAMLEN + 1]; char strcaller[INET6_ADDRSTRLEN]; request_data_t *req = NULL; int tag; unsigned long sequence = 0; unsigned int i = 0; char *_9pmsg = NULL; uint32_t msglen; struct _9p_conn _9p_conn; socklen_t addrpeerlen; int readlen = 0; int total_readlen = 0; snprintf(my_name, MAXNAMLEN, "9p_sock_mgr#fd=%ld", tcp_sock); SetNameFunction(my_name); /* Init the struct _9p_conn structure */ memset(&_9p_conn, 0, sizeof(_9p_conn)); PTHREAD_MUTEX_init(&_9p_conn.sock_lock, NULL); _9p_conn.trans_type = _9P_TCP; _9p_conn.trans_data.sockfd = tcp_sock; for (i = 0; i < FLUSH_BUCKETS; i++) { PTHREAD_MUTEX_init(&_9p_conn.flush_buckets[i].lock, NULL); glist_init(&_9p_conn.flush_buckets[i].list); } atomic_store_uint32_t(&_9p_conn.refcount, 0); /* Init the fids pointers array */ memset(&_9p_conn.fids, 0, _9P_FID_PER_CONN * sizeof(struct _9p_fid *)); /* Set initial msize. * Client may request a lower value during TVERSION */ _9p_conn.msize = _9p_param._9p_tcp_msize; if (gettimeofday(&_9p_conn.birth, NULL) == -1) LogFatal(COMPONENT_9P, "Cannot get connection's time of birth"); addrpeerlen = sizeof(_9p_conn.addrpeer); rc = getpeername(tcp_sock, (struct sockaddr *)&_9p_conn.addrpeer, &addrpeerlen); if (rc == -1) { LogMajor(COMPONENT_9P, "Cannot get peername to tcp socket for 9p, error %d (%s)", errno, strerror(errno)); /* XXX */ strncpy(strcaller, "(unresolved)", INET6_ADDRSTRLEN); strcaller[12] = '\0'; } else { switch (_9p_conn.addrpeer.ss_family) { case AF_INET: inet_ntop(_9p_conn.addrpeer.ss_family, &((struct sockaddr_in *)&_9p_conn.addrpeer)-> sin_addr, strcaller, INET6_ADDRSTRLEN); break; case AF_INET6: inet_ntop(_9p_conn.addrpeer.ss_family, &((struct sockaddr_in6 *)&_9p_conn.addrpeer)-> sin6_addr, strcaller, INET6_ADDRSTRLEN); break; default: snprintf(strcaller, INET6_ADDRSTRLEN, "BAD ADDRESS"); break; } LogEvent(COMPONENT_9P, "9p socket #%ld is connected to %s", tcp_sock, strcaller); } _9p_conn.client = get_gsh_client(&_9p_conn.addrpeer, false); /* Set up the structure used by poll */ memset((char *)fds, 0, sizeof(struct pollfd)); fds[0].fd = tcp_sock; fds[0].events = POLLIN | POLLPRI | POLLRDBAND | POLLRDNORM | POLLRDHUP | POLLHUP | POLLERR | POLLNVAL; for (;;) { total_readlen = 0; /* new message */ rc = poll(fds, fdcount, -1); if (rc == -1) { /* timeout = -1 => Wait indefinitely for events */ /* Interruption if not an issue */ if (errno == EINTR) continue; LogCrit(COMPONENT_9P, "Got error %u (%s) on fd %ld connect to %s while polling on socket", errno, strerror(errno), tcp_sock, strcaller); } if (fds[0].revents & POLLNVAL) { LogEvent(COMPONENT_9P, "Client %s on socket %lu produced POLLNVAL", strcaller, tcp_sock); goto end; } if (fds[0].revents & (POLLERR | POLLHUP | POLLRDHUP)) { LogEvent(COMPONENT_9P, "Client %s on socket %lu has shut down and closed", strcaller, tcp_sock); goto end; } if (!(fds[0].revents & (POLLIN | POLLRDNORM))) continue; /* Prepare to read the message */ _9pmsg = gsh_malloc(_9p_conn.msize); if (_9pmsg == NULL) { LogCrit(COMPONENT_9P, "Could not allocate 9pmsg buffer for client %s on socket %lu", strcaller, tcp_sock); goto end; } /* An incoming 9P request: the msg has a 4 bytes header showing the size of the msg including the header */ readlen = recv(fds[0].fd, _9pmsg, _9P_HDR_SIZE, MSG_WAITALL); if (readlen != _9P_HDR_SIZE) goto badmsg; msglen = *(uint32_t *) _9pmsg; if (msglen > _9p_conn.msize) { LogCrit(COMPONENT_9P, "Message size too big! got %u, max = %u", msglen, _9p_conn.msize); goto end; } LogFullDebug(COMPONENT_9P, "Received 9P/TCP message of size %u from client %s on socket %lu", msglen, strcaller, tcp_sock); total_readlen += readlen; while (total_readlen < msglen) { readlen = recv(fds[0].fd, _9pmsg + total_readlen, msglen - total_readlen, 0); if (readlen > 0) { total_readlen += readlen; continue; } if (readlen == 0 || (readlen < 0 && errno != EINTR)) goto badmsg; } /* while */ server_stats_transport_done(_9p_conn.client, total_readlen, 1, 0, 0, 0, 0); /* Message is good. */ req = pool_alloc(request_pool, NULL); if (req == NULL) { LogCrit(COMPONENT_9P, "Could not allocate memory from request pool"); goto end; } req->rtype = _9P_REQUEST; req->r_u._9p._9pmsg = _9pmsg; req->r_u._9p.pconn = &_9p_conn; /* Add this request to the request list, * should it be flushed later. */ tag = *(u16 *) (_9pmsg + _9P_HDR_SIZE + _9P_TYPE_SIZE); _9p_AddFlushHook(&req->r_u._9p, tag, sequence++); LogFullDebug(COMPONENT_9P, "Request tag is %d\n", tag); /* Message was OK push it */ DispatchWork9P(req); /* Not our buffer anymore */ _9pmsg = NULL; continue; badmsg: if (readlen == 0) LogEvent(COMPONENT_9P, "Premature end for Client %s on socket %lu, total read = %u", strcaller, tcp_sock, total_readlen); else if (readlen < 0) { LogEvent(COMPONENT_9P, "Read error client %s on socket %lu errno=%d, total read = %u", strcaller, tcp_sock, errno, total_readlen); } else LogEvent(COMPONENT_9P, "Header too small! for client %s on socket %lu: readlen=%u expected=%u", strcaller, tcp_sock, readlen, _9P_HDR_SIZE); /* Either way, we close the connection. * It is not possible to survive * once we get out of sync in the TCP stream * with the client */ break; /* bail out */ } /* for( ;; ) */ end: LogEvent(COMPONENT_9P, "Closing connection on socket %lu", tcp_sock); close(tcp_sock); /* Free buffer if we encountered an error * before we could give it to a worker */ if (_9pmsg) gsh_free(_9pmsg); while (atomic_fetch_uint32_t(&_9p_conn.refcount)) { LogEvent(COMPONENT_9P, "Waiting for workers to release pconn"); sleep(1); } _9p_cleanup_fids(&_9p_conn); if (_9p_conn.client != NULL) put_gsh_client(_9p_conn.client); pthread_exit(NULL); } /* _9p_socket_thread */