/* One pool can be used for all FSAL_UP used for exports. */ void nfs_Init_FSAL_UP() { /* DEBUGGING */ LogFullDebug(COMPONENT_INIT, "FSAL_UP: Initializing FSAL UP data pool"); /* Allocation of the FSAL UP pool */ fsal_up_event_pool = pool_init("FSAL UP Data Pool", sizeof(fsal_up_event_t), pool_basic_substrate, NULL, NULL, NULL); if(fsal_up_event_pool == NULL) { LogFatal(COMPONENT_INIT, "Error while allocating FSAL UP event pool"); } init_glist(&fsal_up_process_queue); tcb_new(&fsal_up_process_tcb, "FSAL_UP Process Thread"); return; }
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t * data, struct nfs_resop4 *resp) { char __attribute__ ((__unused__)) funcname[] = "nfs4_op_lock"; #ifndef _WITH_NFSV4_LOCKS /* Lock are not supported */ resp->resop = NFS4_OP_LOCK; res_LOCK4.status = NFS4ERR_LOCK_NOTSUPP; return res_LOCK4.status; #else state_status_t state_status; state_data_t candidate_data; state_type_t candidate_type; int rc = 0; seqid4 seqid; state_t * plock_state; /* state for the lock */ state_t * pstate_open; /* state for the open owner */ state_owner_t * plock_owner; state_owner_t * popen_owner; state_owner_t * presp_owner; /* Owner to store response in */ state_owner_t * conflict_owner = NULL; state_nfs4_owner_name_t owner_name; nfs_client_id_t nfs_client_id; state_lock_desc_t lock_desc, conflict_desc; state_blocking_t blocking = STATE_NON_BLOCKING; const char * tag = "LOCK"; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 LOCK handler -----------------------------------------------------"); /* Initialize to sane starting values */ resp->resop = NFS4_OP_LOCK; /* If there is no FH */ if(nfs4_Is_Fh_Empty(&(data->currentFH))) { res_LOCK4.status = NFS4ERR_NOFILEHANDLE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Is_Fh_Empty"); return res_LOCK4.status; } /* If the filehandle is invalid */ if(nfs4_Is_Fh_Invalid(&(data->currentFH))) { res_LOCK4.status = NFS4ERR_BADHANDLE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Is_Fh_Invalid"); return res_LOCK4.status; } /* Tests if the Filehandle is expired (for volatile filehandle) */ if(nfs4_Is_Fh_Expired(&(data->currentFH))) { res_LOCK4.status = NFS4ERR_FHEXPIRED; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Is_Fh_Expired"); return res_LOCK4.status; } /* Lock is done only on a file */ if(data->current_filetype != REGULAR_FILE) { /* Type of the entry is not correct */ switch (data->current_filetype) { case DIRECTORY: res_LOCK4.status = NFS4ERR_ISDIR; break; default: res_LOCK4.status = NFS4ERR_INVAL; break; } LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed wrong file type"); return res_LOCK4.status; } /* Convert lock parameters to internal types */ switch(arg_LOCK4.locktype) { case READ_LT: lock_desc.sld_type = STATE_LOCK_R; blocking = STATE_NON_BLOCKING; break; case WRITE_LT: lock_desc.sld_type = STATE_LOCK_W; blocking = STATE_NON_BLOCKING; break; case READW_LT: lock_desc.sld_type = STATE_LOCK_R; blocking = STATE_NFSV4_BLOCKING; break; case WRITEW_LT: lock_desc.sld_type = STATE_LOCK_W; blocking = STATE_NFSV4_BLOCKING; break; } lock_desc.sld_offset = arg_LOCK4.offset; if(arg_LOCK4.length != STATE_LOCK_OFFSET_EOF) lock_desc.sld_length = arg_LOCK4.length; else lock_desc.sld_length = 0; if(arg_LOCK4.locker.new_lock_owner) { /* New lock owner, Find the open owner */ tag = "LOCK (new owner)"; /* Check stateid correctness and get pointer to state */ if((rc = nfs4_Check_Stateid(&arg_LOCK4.locker.locker4_u.open_owner.open_stateid, data->current_entry, 0LL, &pstate_open, data, STATEID_SPECIAL_FOR_LOCK, tag)) != NFS4_OK) { res_LOCK4.status = rc; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for open owner"); return res_LOCK4.status; } popen_owner = pstate_open->state_powner; plock_state = NULL; plock_owner = NULL; presp_owner = popen_owner; seqid = arg_LOCK4.locker.locker4_u.open_owner.open_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK New lock owner from open owner", data->current_entry, data->pcontext, popen_owner, &lock_desc); /* Check is the clientid is known or not */ if(nfs_client_id_get(arg_LOCK4.locker.locker4_u.open_owner.lock_owner.clientid, &nfs_client_id) == CLIENT_ID_NOT_FOUND) { res_LOCK4.status = NFS4ERR_STALE_CLIENTID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs_client_id_get"); return res_LOCK4.status; } /* The related stateid is already stored in pstate_open */ /* An open state has been found. Check its type */ if(pstate_open->state_type != STATE_TYPE_SHARE) { res_LOCK4.status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed open stateid is not a SHARE"); return res_LOCK4.status; } /* Lock seqid (seqid wanted for new lock) should be 0 (see newpynfs test LOCK8c) */ if(arg_LOCK4.locker.locker4_u.open_owner.lock_seqid != 0) { res_LOCK4.status = NFS4ERR_BAD_SEQID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed new lock stateid is not 0"); return res_LOCK4.status; } /* Is this lock_owner known ? */ convert_nfs4_lock_owner(&arg_LOCK4.locker.locker4_u.open_owner.lock_owner, &owner_name); } else { /* Existing lock owner * Find the lock stateid * From that, get the open_owner */ tag = "LOCK (existing owner)"; /* There was code here before to handle all-0 stateid, but that * really doesn't apply - when we handle temporary locks for * I/O operations (which is where we will see all-0 or all-1 * stateid, those will not come in through nfs4_op_lock. */ /* Check stateid correctness and get pointer to state */ if((rc = nfs4_Check_Stateid(&arg_LOCK4.locker.locker4_u.lock_owner.lock_stateid, data->current_entry, 0LL, &plock_state, data, STATEID_SPECIAL_FOR_LOCK, tag)) != NFS4_OK) { res_LOCK4.status = rc; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for existing lock owner"); return res_LOCK4.status; } /* An lock state has been found. Check its type */ if(plock_state->state_type != STATE_TYPE_LOCK) { res_LOCK4.status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed existing lock owner, state type is not LOCK"); return res_LOCK4.status; } /* Get the old lockowner. We can do the following 'cast', in NFSv4 lock_owner4 and open_owner4 * are different types but with the same definition*/ plock_owner = plock_state->state_powner; popen_owner = plock_owner->so_owner.so_nfs4_owner.so_related_owner; pstate_open = plock_state->state_data.lock.popenstate; presp_owner = plock_owner; seqid = arg_LOCK4.locker.locker4_u.lock_owner.lock_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK Existing lock owner", data->current_entry, data->pcontext, plock_owner, &lock_desc); #ifdef _CONFORM_TO_TEST_LOCK8c /* Check validity of the seqid */ if(arg_LOCK4.locker.locker4_u.lock_owner.lock_seqid != 0) { res_LOCK4.status = NFS4ERR_BAD_SEQID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed existing lock owner, lock seqid != 0"); return res_LOCK4.status; } #endif } /* if( arg_LOCK4.locker.new_lock_owner ) */ /* Check seqid (lock_seqid or open_seqid) */ if(!Check_nfs4_seqid(presp_owner, seqid, op, data, resp, tag)) { /* Response is all setup for us and LogDebug told what was wrong */ return res_LOCK4.status; } /* Lock length should not be 0 */ if(arg_LOCK4.length == 0LL) { res_LOCK4.status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length == 0"); /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); return res_LOCK4.status; } /* Check for range overflow. * Comparing beyond 2^64 is not possible int 64 bits precision, * but off+len > 2^64-1 is equivalent to len > 2^64-1 - off */ if(lock_desc.sld_length > (STATE_LOCK_OFFSET_EOF - lock_desc.sld_offset)) { res_LOCK4.status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length overflow"); /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); return res_LOCK4.status; } /* check if open state has correct access for type of lock. * Don't need to check for conflicting states since this open * state assures there are no conflicting states. */ if(((arg_LOCK4.locktype == WRITE_LT || arg_LOCK4.locktype == WRITEW_LT) && ((pstate_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0)) || ((arg_LOCK4.locktype == READ_LT || arg_LOCK4.locktype == READW_LT) && ((pstate_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_READ) == 0))) { /* The open state doesn't allow access based on the type of lock */ LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, SHARE doesn't allow access", data->current_entry, data->pcontext, plock_owner, &lock_desc); res_LOCK4.status = NFS4ERR_OPENMODE; /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); return res_LOCK4.status; } if(arg_LOCK4.locker.new_lock_owner) { /* A lock owner is always associated with a previously made open * which has itself a previously made stateid */ /* Get reference to open owner */ inc_state_owner_ref(popen_owner); if(nfs4_owner_Get_Pointer(&owner_name, &plock_owner)) { /* Lock owner already existsc, check lock_seqid if it's not 0 */ if(!Check_nfs4_seqid(plock_owner, arg_LOCK4.locker.locker4_u.open_owner.lock_seqid, op, data, resp, "LOCK (new owner but owner exists)")) { /* Response is all setup for us and LogDebug told what was wrong */ return res_LOCK4.status; } } else { /* This lock owner is not known yet, allocated and set up a new one */ plock_owner = create_nfs4_owner(data->pclient, &owner_name, STATE_LOCK_OWNER_NFSV4, popen_owner, 0); if(plock_owner == NULL) { res_LOCK4.status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to create new lock owner", data->current_entry, data->pcontext, popen_owner, &lock_desc); return res_LOCK4.status; } } /* Prepare state management structure */ memset(&candidate_type, 0, sizeof(candidate_type)); candidate_type = STATE_TYPE_LOCK; candidate_data.lock.popenstate = pstate_open; /* Add the lock state to the lock table */ if(state_add(data->current_entry, candidate_type, &candidate_data, plock_owner, data->pclient, data->pcontext, &plock_state, &state_status) != STATE_SUCCESS) { res_LOCK4.status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to add new stateid", data->current_entry, data->pcontext, plock_owner, &lock_desc); dec_state_owner_ref(plock_owner, data->pclient); return res_LOCK4.status; } init_glist(&plock_state->state_data.lock.state_locklist); /* Add lock state to the list of lock states belonging to the open state */ glist_add_tail(&pstate_open->state_data.share.share_lockstates, &plock_state->state_data.lock.state_sharelist); } /* if( arg_LOCK4.locker.new_lock_owner ) */ /* Now we have a lock owner and a stateid. * Go ahead and push lock into SAL (and FSAL). */ if(state_lock(data->current_entry, data->pcontext, plock_owner, plock_state, blocking, NULL, /* No block data for now */ &lock_desc, &conflict_owner, &conflict_desc, data->pclient, &state_status) != STATE_SUCCESS) { if(state_status == STATE_LOCK_CONFLICT) { /* A conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */ Process_nfs4_conflict(&res_LOCK4.LOCK4res_u.denied, conflict_owner, &conflict_desc, data->pclient); } LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed with status %s", state_err_str(state_status)); res_LOCK4.status = nfs4_Errno_state(state_status); /* Save the response in the lock or open owner */ if(res_LOCK4.status != NFS4ERR_RESOURCE && res_LOCK4.status != NFS4ERR_BAD_STATEID) Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); if(arg_LOCK4.locker.new_lock_owner) { /* Need to destroy lock owner and state */ if(state_del(plock_state, data->pclient, &state_status) != STATE_SUCCESS) LogDebug(COMPONENT_NFS_V4_LOCK, "state_del failed with status %s", state_err_str(state_status)); } return res_LOCK4.status; } res_LOCK4.status = NFS4_OK; /* Handle stateid/seqid for success */ update_stateid(plock_state, &res_LOCK4.LOCK4res_u.resok4.lock_stateid, data, tag); LogFullDebug(COMPONENT_NFS_V4_LOCK, "LOCK state_seqid = %u, plock_state = %p", plock_state->state_seqid, plock_state); /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK applied", data->current_entry, data->pcontext, plock_owner, &lock_desc); return res_LOCK4.status; #endif } /* nfs4_op_lock */
state_owner_t *create_nfs4_owner(state_nfs4_owner_name_t * pname, nfs_client_id_t * pclientid, state_owner_type_t type, state_owner_t * related_owner, unsigned int init_seqid) { state_owner_t * powner; state_nfs4_owner_name_t * powner_name; /* This lock owner is not known yet, allocated and set up a new one */ powner = pool_alloc(state_owner_pool, NULL); if(powner == NULL) return NULL; powner_name = pool_alloc(state_nfs4_owner_name_pool, NULL); if(powner_name == NULL) { pool_free(state_owner_pool, powner); return NULL; } *powner_name = *pname; /* set up the content of the open_owner */ memset(powner, 0, sizeof(*powner)); powner->so_type = type; powner->so_owner.so_nfs4_owner.so_seqid = init_seqid; powner->so_owner.so_nfs4_owner.so_related_owner = related_owner; powner->so_owner.so_nfs4_owner.so_clientid = pname->son_clientid; powner->so_owner.so_nfs4_owner.so_pclientid = pclientid; powner->so_owner_len = pname->son_owner_len; powner->so_owner.so_nfs4_owner.so_resp.resop = NFS4_OP_ILLEGAL; powner->so_owner.so_nfs4_owner.so_args.argop = NFS4_OP_ILLEGAL; powner->so_refcount = 1; #if 0 /* WAITING FOR COMMUNITY FIX */ /* setting lock owner confirmed */ if ( type == STATE_LOCK_OWNER_NFSV4) powner->so_owner.so_nfs4_owner.so_confirmed = 1; #endif init_glist(&powner->so_lock_list); init_glist(&powner->so_owner.so_nfs4_owner.so_state_list); memcpy(powner->so_owner_val, pname->son_owner_val, pname->son_owner_len); powner->so_owner_val[powner->so_owner_len] = '\0'; if(pthread_mutex_init(&powner->so_mutex, NULL) == -1) { pool_free(state_owner_pool, powner); pool_free(state_nfs4_owner_name_pool, powner_name); return NULL; } if(!nfs4_owner_Set(powner_name, powner)) { pool_free(state_owner_pool, powner); pool_free(state_nfs4_owner_name_pool, powner_name); return NULL; } if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; DisplayOwner(powner, str); LogFullDebug(COMPONENT_STATE, "New Owner %s", str); } /* Increment refcount on related owner */ if(related_owner != NULL) inc_state_owner_ref(related_owner); P(pclientid->cid_mutex); if (type == STATE_OPEN_OWNER_NFSV4) { /* If open owner, add to clientid lock owner list */ powner->so_refcount++; glist_add_tail(&pclientid->cid_openowners, &powner->so_owner.so_nfs4_owner.so_perclient); } else if(type == STATE_LOCK_OWNER_NFSV4) { /* If lock owner, add to clientid open owner list */ powner->so_refcount++; glist_add_tail(&pclientid->cid_lockowners, &powner->so_owner.so_nfs4_owner.so_perclient); } /* Increment reference count for clientid record */ inc_client_id_ref(pclientid); V(pclientid->cid_mutex); return powner; }
state_nsm_client_t *get_nsm_client(care_t care, SVCXPRT * xprt, char * caller_name) { state_nsm_client_t key; state_nsm_client_t * pclient; char sock_name[SOCK_NAME_MAX]; char str[HASHTABLE_DISPLAY_STRLEN]; struct hash_latch latch; hash_error_t rc; hash_buffer_t buffkey; hash_buffer_t buffval; if(caller_name == NULL) return NULL; memset(&key, 0, sizeof(key)); if(nfs_param.core_param.nsm_use_caller_name) { key.ssc_nlm_caller_name_len = strlen(caller_name); if(key.ssc_nlm_caller_name_len > LM_MAXSTRLEN) { return NULL; } key.ssc_nlm_caller_name = caller_name; } else if(xprt == NULL) { int rc = ipstring_to_sockaddr(caller_name, &key.ssc_client_addr); if(rc != 0) { LogEvent(COMPONENT_STATE, "Error %s, converting caller_name %s to an ipaddress", gai_strerror(rc), caller_name); return NULL; } key.ssc_nlm_caller_name_len = strlen(caller_name); if(key.ssc_nlm_caller_name_len > LM_MAXSTRLEN) { return NULL; } key.ssc_nlm_caller_name = caller_name; } else { key.ssc_nlm_caller_name = sock_name; if(copy_xprt_addr(&key.ssc_client_addr, xprt) == 0) { LogCrit(COMPONENT_STATE, "Error converting caller_name %s to an ipaddress", caller_name); return NULL; } if(sprint_sockip(&key.ssc_client_addr, key.ssc_nlm_caller_name, sizeof(sock_name)) == 0) { LogCrit(COMPONENT_STATE, "Error converting caller_name %s to an ipaddress", caller_name); return NULL; } key.ssc_nlm_caller_name_len = strlen(key.ssc_nlm_caller_name); } if(isFullDebug(COMPONENT_STATE)) { display_nsm_client(&key, str); LogFullDebug(COMPONENT_STATE, "Find {%s}", str); } buffkey.pdata = &key; buffkey.len = sizeof(key); rc = HashTable_GetLatch(ht_nsm_client, &buffkey, &buffval, TRUE, &latch); /* If we found it, return it */ if(rc == HASHTABLE_SUCCESS) { pclient = buffval.pdata; /* Return the found NSM Client */ if(isFullDebug(COMPONENT_STATE)) { display_nsm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "Found {%s}", str); } /* Increment refcount under hash latch. * This prevents dec ref from removing this entry from hash if a race * occurs. */ inc_nsm_client_ref(pclient); HashTable_ReleaseLatched(ht_nsm_client, &latch); if(care == CARE_MONITOR && !nsm_monitor(pclient)) { dec_nsm_client_ref(pclient); pclient = NULL; } return pclient; } /* An error occurred, return NULL */ if(rc != HASHTABLE_ERROR_NO_SUCH_KEY) { display_nsm_client(&key, str); LogCrit(COMPONENT_STATE, "Error %s, could not find {%s}", hash_table_err_to_str(rc), str); return NULL; } /* Not found, but we don't care, return NULL */ if(care == CARE_NOT) { /* Return the found NSM Client */ if(isFullDebug(COMPONENT_STATE)) { display_nsm_client(&key, str); LogFullDebug(COMPONENT_STATE, "Ignoring {%s}", str); } HashTable_ReleaseLatched(ht_nsm_client, &latch); return NULL; } pclient = gsh_malloc(sizeof(*pclient)); if(pclient == NULL) { display_nsm_client(&key, str); LogCrit(COMPONENT_STATE, "No memory for {%s}", str); return NULL; } /* Copy everything over */ memcpy(pclient, &key, sizeof(key)); if(pthread_mutex_init(&pclient->ssc_mutex, NULL) == -1) { /* Mutex initialization failed, free the created client */ display_nsm_client(&key, str); LogCrit(COMPONENT_STATE, "Could not init mutex for {%s}", str); gsh_free(pclient); return NULL; } pclient->ssc_nlm_caller_name = gsh_strdup(key.ssc_nlm_caller_name); if(pclient->ssc_nlm_caller_name == NULL) { /* Discard the created client */ free_nsm_client(pclient); return NULL; } init_glist(&pclient->ssc_lock_list); init_glist(&pclient->ssc_share_list); pclient->ssc_refcount = 1; if(isFullDebug(COMPONENT_STATE)) { display_nsm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "New {%s}", str); } buffkey.pdata = pclient; buffkey.len = sizeof(*pclient); buffval.pdata = pclient; buffval.len = sizeof(*pclient); rc = HashTable_SetLatched(ht_nsm_client, &buffval, &buffval, &latch, FALSE, NULL, NULL); /* An error occurred, return NULL */ if(rc != HASHTABLE_SUCCESS) { display_nsm_client(pclient, str); LogCrit(COMPONENT_STATE, "Error %s, inserting {%s}", hash_table_err_to_str(rc), str); free_nsm_client(pclient); return NULL; } if(care != CARE_MONITOR || nsm_monitor(pclient)) return pclient; /* Failed to monitor, release client reference * and almost certainly remove it from the hash table. */ dec_nsm_client_ref(pclient); return NULL; }
static void init_nlm_owner(state_owner_t * powner) { inc_nlm_client_ref(powner->so_owner.so_nlm_owner.so_client); init_glist(&powner->so_owner.so_nlm_owner.so_nlm_shares); }
state_nsm_client_t *get_nsm_client(care_t care, SVCXPRT * xprt, const char * caller_name) { state_nsm_client_t *pkey, *pclient; if(caller_name == NULL) return NULL; pkey = (state_nsm_client_t *)Mem_Alloc(sizeof(*pkey)); if(pkey == NULL) return NULL; memset(pkey, 0, sizeof(*pkey)); pkey->ssc_refcount = 1; if(nfs_param.core_param.nsm_use_caller_name) { pkey->ssc_nlm_caller_name_len = strlen(caller_name); if(pkey->ssc_nlm_caller_name_len > LM_MAXSTRLEN) { /* Discard the key we created */ free_nsm_client(pkey); return NULL; } pkey->ssc_nlm_caller_name = Mem_Alloc(pkey->ssc_nlm_caller_name_len + 1); if(pkey->ssc_nlm_caller_name == NULL) { /* Discard the key we created */ free_nsm_client(pkey); return NULL; } memcpy(pkey->ssc_nlm_caller_name, caller_name, pkey->ssc_nlm_caller_name_len); pkey->ssc_nlm_caller_name[pkey->ssc_nlm_caller_name_len] = '\0'; } else { pkey->ssc_nlm_caller_name_len = SOCK_NAME_MAX; pkey->ssc_nlm_caller_name = Mem_Alloc(SOCK_NAME_MAX); if(pkey->ssc_nlm_caller_name == NULL) { /* Discard the key we created */ free_nsm_client(pkey); return NULL; } if(copy_xprt_addr(&pkey->ssc_client_addr, xprt) == 0) { /* Discard the key we created */ free_nsm_client(pkey); return NULL; } if(sprint_sockip(&pkey->ssc_client_addr, pkey->ssc_nlm_caller_name, SOCK_NAME_MAX) == 0) { /* Discard the key we created */ free_nsm_client(pkey); return NULL; } pkey->ssc_nlm_caller_name_len = strlen(pkey->ssc_nlm_caller_name); } if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nsm_client(pkey, str); LogFullDebug(COMPONENT_STATE, "Find NSM Client pkey {%s}", str); } /* If we found it, return it, if we don't care, return NULL */ if(nsm_client_Get_Pointer(pkey, &pclient) == 1 || care == CARE_NOT) { /* Discard the key we created and return the found NSM Client */ free_nsm_client(pkey); if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nsm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "Found NSM Client {%s}", str); } if(care == CARE_MONITOR) if(!nsm_monitor(pclient)) { dec_nsm_client_ref(pclient); return NULL; } return pclient; } pclient = (state_nsm_client_t *)Mem_Alloc(sizeof(*pkey)); if(pclient == NULL) { free_nsm_client(pkey); return NULL; } /* Copy everything over */ *pclient = *pkey; pclient->ssc_nlm_caller_name = Mem_Alloc(pkey->ssc_nlm_caller_name_len + 1); if(pclient->ssc_nlm_caller_name == NULL) { /* Discard the key and created client */ free_nsm_client(pkey); free_nsm_client(pclient); return NULL; } memcpy(pclient->ssc_nlm_caller_name, pkey->ssc_nlm_caller_name, pclient->ssc_nlm_caller_name_len); pclient->ssc_nlm_caller_name[pclient->ssc_nlm_caller_name_len] = '\0'; init_glist(&pclient->ssc_lock_list); if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nsm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "New NSM Client {%s}", str); } if(pthread_mutex_init(&pclient->ssc_mutex, NULL) == -1) { /* Mutex initialization failed, free the key and created client */ free_nsm_client(pkey); free_nsm_client(pclient); return NULL; } if(nsm_client_Set(pkey, pclient) == 1) { if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nsm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "Set NSM Client {%s}", str); } if(care != CARE_MONITOR || nsm_monitor(pclient)) return pclient; dec_nsm_client_ref(pclient); return NULL; } free_nsm_client(pkey); free_nsm_client(pclient); return NULL; }
state_owner_t *get_nlm_owner(care_t care, state_nlm_client_t * pclient, netobj * oh, uint32_t svid) { state_owner_t * pkey, *powner; if(pclient == NULL || oh == NULL || oh->n_len > MAX_NETOBJ_SZ) return NULL; pkey = (state_owner_t *)Mem_Alloc(sizeof(*pkey)); if(pkey == NULL) return NULL; memset(pkey, 0, sizeof(*pkey)); pkey->so_type = STATE_LOCK_OWNER_NLM; pkey->so_refcount = 1; pkey->so_owner.so_nlm_owner.so_client = pclient; pkey->so_owner.so_nlm_owner.so_nlm_svid = svid; pkey->so_owner_len = oh->n_len; memcpy(pkey->so_owner_val, oh->n_bytes, oh->n_len); if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_owner(pkey, str); LogFullDebug(COMPONENT_STATE, "Find NLM Owner KEY {%s}", str); } /* If we found it, return it, if we don't care, return NULL */ if(nlm_owner_Get_Pointer(pkey, &powner) == 1 || care == CARE_NOT) { /* Discard the key we created and return the found NLM Owner */ Mem_Free(pkey); if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_owner(powner, str); LogFullDebug(COMPONENT_STATE, "Found {%s}", str); } return powner; } powner = (state_owner_t *)Mem_Alloc(sizeof(*pkey)); if(powner == NULL) { Mem_Free(pkey); return NULL; } /* Copy everything over */ *powner = *pkey; init_glist(&powner->so_lock_list); if(pthread_mutex_init(&powner->so_mutex, NULL) == -1) { /* Mutex initialization failed, free the key and created owner */ Mem_Free(pkey); Mem_Free(powner); return NULL; } if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_owner(powner, str); LogFullDebug(COMPONENT_STATE, "New {%s}", str); } /* Ref count the client as being used by this owner */ inc_nlm_client_ref(pclient); if(nlm_owner_Set(pkey, powner) == 1) { if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_owner(powner, str); LogFullDebug(COMPONENT_STATE, "Set NLM Owner {%s}", str); } return powner; } dec_nlm_client_ref(pclient); Mem_Free(pkey); Mem_Free(powner); return NULL; }
state_nlm_client_t *get_nlm_client(care_t care, const char * caller_name) { state_nlm_client_t *pkey, *pclient; if(caller_name == NULL) return NULL; pkey = (state_nlm_client_t *)Mem_Alloc(sizeof(*pkey)); if(pkey == NULL) return NULL; memset(pkey, 0, sizeof(*pkey)); pkey->slc_refcount = 1; pkey->slc_nlm_caller_name_len = strlen(caller_name); if(pkey->slc_nlm_caller_name_len > LM_MAXSTRLEN) return NULL; memcpy(pkey->slc_nlm_caller_name, caller_name, pkey->slc_nlm_caller_name_len); pkey->slc_nlm_caller_name[pkey->slc_nlm_caller_name_len] = '\0'; if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_client(pkey, str); LogFullDebug(COMPONENT_STATE, "Find NLM Client pkey {%s}", str); } /* If we found it, return it, if we don't care, return NULL */ if(nlm_client_Get_Pointer(pkey, &pclient) == 1 || care == CARE_NOT) { /* Discard the key we created and return the found NLM Client */ Mem_Free(pkey); if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "Found NLM Client pclient=%p {%s}, refcount = %d", pclient, str, pclient != NULL ? pclient->slc_refcount : 0); } if(care == CARE_MONITOR) if(!nsm_monitor(pclient)) { dec_nlm_client_ref(pclient); return NULL; } return pclient; } pclient = (state_nlm_client_t *)Mem_Alloc(sizeof(*pkey)); if(pclient == NULL) { Mem_Free(pkey); return NULL; } /* Copy everything over */ *pclient = *pkey; init_glist(&pclient->slc_lock_list); if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "New pclient=%p {%s}", pclient, str); } if(pthread_mutex_init(&pclient->slc_mutex, NULL) == -1) { /* Mutex initialization failed, free the key and created owner */ Mem_Free(pkey); Mem_Free(pclient); return NULL; } if(nlm_client_Set(pkey, pclient) == 1) { if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; display_nlm_client(pclient, str); LogFullDebug(COMPONENT_STATE, "Set NLM Client pclient=%p {%s}, refcount = %d", pclient, str, pclient->slc_refcount); } if(care != CARE_MONITOR || nsm_monitor(pclient)) return pclient; dec_nlm_client_ref(pclient); return NULL; } Mem_Free(pkey); Mem_Free(pclient); return NULL; }
state_owner_t *create_nfs4_owner(cache_inode_client_t * pclient, state_nfs4_owner_name_t * pname, state_owner_type_t type, state_owner_t * related_owner, unsigned int init_seqid) { state_owner_t * powner; state_nfs4_owner_name_t * powner_name; /* This lock owner is not known yet, allocated and set up a new one */ GetFromPool(powner, &pclient->pool_state_owner, state_owner_t); if(powner == NULL) return NULL; GetFromPool(powner_name, &pclient->pool_nfs4_owner_name, state_nfs4_owner_name_t); if(powner_name == NULL) { ReleaseToPool(powner, &pclient->pool_state_owner); return NULL; } *powner_name = *pname; /* set up the content of the open_owner */ memset(powner, 0, sizeof(*powner)); powner->so_type = type; powner->so_owner.so_nfs4_owner.so_seqid = init_seqid; powner->so_owner.so_nfs4_owner.so_related_owner = related_owner; powner->so_owner.so_nfs4_owner.so_clientid = pname->son_clientid; powner->so_owner_len = pname->son_owner_len; powner->so_owner.so_nfs4_owner.so_resp.resop = NFS4_OP_ILLEGAL; powner->so_owner.so_nfs4_owner.so_args.argop = NFS4_OP_ILLEGAL; powner->so_refcount = 1; init_glist(&powner->so_lock_list); memcpy(powner->so_owner_val, pname->son_owner_val, pname->son_owner_len); powner->so_owner_val[powner->so_owner_len] = '\0'; if(pthread_mutex_init(&powner->so_mutex, NULL) == -1) { ReleaseToPool(powner, &pclient->pool_state_owner); ReleaseToPool(powner_name, &pclient->pool_nfs4_owner_name); return NULL; } if(!nfs4_owner_Set(powner_name, powner)) { ReleaseToPool(powner, &pclient->pool_state_owner); ReleaseToPool(powner_name, &pclient->pool_nfs4_owner_name); return NULL; } if(isFullDebug(COMPONENT_STATE)) { char str[HASHTABLE_DISPLAY_STRLEN]; DisplayOwner(powner, str); LogFullDebug(COMPONENT_STATE, "New Open Owner %s", str); } return powner; }
/** * build the export entry */ fsal_status_t GPFSFSAL_BuildExportContext(fsal_export_context_t *export_context, /* OUT */ fsal_path_t * p_export_path, /* IN */ char *fs_specific_options /* IN */ ) { int rc, fd, mntexists; FILE * fp; struct mntent * p_mnt; char * mnt_dir = NULL; struct statfs stat_buf; gpfs_fsal_up_ctx_t * gpfs_fsal_up_ctx; bool_t start_fsal_up_thread = FALSE; fsal_status_t status; fsal_op_context_t op_context; gpfsfsal_export_context_t *p_export_context = (gpfsfsal_export_context_t *)export_context; /* Make sure the FSAL UP context list is initialized */ if(glist_null(&gpfs_fsal_up_ctx_list)) init_glist(&gpfs_fsal_up_ctx_list); /* sanity check */ if((p_export_context == NULL) || (p_export_path == NULL)) { LogCrit(COMPONENT_FSAL, "NULL mandatory argument passed to %s()", __FUNCTION__); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_BuildExportContext); } /* open mnt file */ fp = setmntent(MOUNTED, "r"); if(fp == NULL) { rc = errno; LogCrit(COMPONENT_FSAL, "Error %d in setmntent(%s): %s", rc, MOUNTED, strerror(rc)); Return(posix2fsal_error(rc), rc, INDEX_FSAL_BuildExportContext); } /* Check if mount point is really a gpfs share. If not, we can't continue.*/ mntexists = 0; while((p_mnt = getmntent(fp)) != NULL) if(p_mnt->mnt_dir != NULL && p_mnt->mnt_type != NULL) /* There is probably a macro for "gpfs" type ... not sure where it is. */ if (strncmp(p_mnt->mnt_type, "gpfs", 4) == 0) { LogFullDebug(COMPONENT_FSAL, "Checking Export Path %s against GPFS fs %s", p_export_path->path, p_mnt->mnt_dir); /* If export path is shorter than fs path, then this isn't a match */ if(strlen(p_export_path->path) < strlen(p_mnt->mnt_dir)) continue; /* If export path doesn't have a path separator after mnt_dir, then it * isn't a proper sub-directory of mnt_dir. */ if((p_export_path->path[strlen(p_mnt->mnt_dir)] != '/') && (p_export_path->path[strlen(p_mnt->mnt_dir)] != '\0')) continue; if (strncmp(p_mnt->mnt_dir, p_export_path->path, strlen(p_mnt->mnt_dir)) == 0) { mnt_dir = gsh_strdup(p_mnt->mnt_dir); mntexists = 1; break; } } endmntent(fp); if (mntexists == 0) { LogMajor(COMPONENT_FSAL, "GPFS mount point %s does not exist.", p_export_path->path); gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } /* save file descriptor to root of GPFS share */ fd = open(p_export_path->path, O_RDONLY | O_DIRECTORY); if(fd < 0) { if(errno == ENOENT) LogMajor(COMPONENT_FSAL, "GPFS export path %s does not exist.", p_export_path->path); else if (errno == ENOTDIR) LogMajor(COMPONENT_FSAL, "GPFS export path %s is not a directory.", p_export_path->path); else LogMajor(COMPONENT_FSAL, "Could not open GPFS export path %s: rc = %d(%s)", p_export_path->path, errno, strerror(errno)); if(mnt_dir != NULL) gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } p_export_context->mount_root_fd = fd; LogFullDebug(COMPONENT_FSAL, "GPFSFSAL_BuildExportContext: %d", p_export_context->mount_root_fd); /* Save pointer to fsal_staticfsinfo_t in export context */ p_export_context->fe_static_fs_info = &global_fs_info; /* save filesystem ID */ rc = statfs(p_export_path->path, &stat_buf); if(rc) { close(fd); LogMajor(COMPONENT_FSAL, "statfs call failed on file %s: %d(%s)", p_export_path->path, errno, strerror(errno)); if(mnt_dir != NULL) gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } p_export_context->fsid[0] = stat_buf.f_fsid.__val[0]; p_export_context->fsid[1] = stat_buf.f_fsid.__val[1]; /* save file handle to root of GPFS share */ op_context.export_context = export_context; // op_context.credential = ??? status = fsal_internal_get_handle(&op_context, p_export_path, (fsal_handle_t *)(&(p_export_context->mount_root_handle))); if(FSAL_IS_ERROR(status)) { close(p_export_context->mount_root_fd); LogMajor(COMPONENT_FSAL, "FSAL BUILD EXPORT CONTEXT: ERROR: Conversion from gpfs filesystem root path to handle failed : %d", status.minor); if(mnt_dir != NULL) gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } gpfs_fsal_up_ctx = gpfsfsal_find_fsal_up_context(p_export_context); if(gpfs_fsal_up_ctx == NULL) { gpfs_fsal_up_ctx = gsh_calloc(1, sizeof(gpfs_fsal_up_ctx_t)); if(gpfs_fsal_up_ctx == NULL || mnt_dir == NULL) { LogFatal(COMPONENT_FSAL, "Out of memory can not continue."); } /* Initialize the gpfs_fsal_up_ctx */ init_glist(&gpfs_fsal_up_ctx->gf_exports); gpfs_fsal_up_ctx->gf_fs = mnt_dir; gpfs_fsal_up_ctx->gf_fsid[0] = p_export_context->fsid[0]; gpfs_fsal_up_ctx->gf_fsid[1] = p_export_context->fsid[1]; /* Add it to the list of contexts */ glist_add_tail(&gpfs_fsal_up_ctx_list, &gpfs_fsal_up_ctx->gf_list); start_fsal_up_thread = TRUE; } else { if(mnt_dir != NULL) gsh_free(mnt_dir); } /* Add this export context to the list for it's gpfs_fsal_up_ctx */ glist_add_tail(&gpfs_fsal_up_ctx->gf_exports, &p_export_context->fe_list); p_export_context->fe_fsal_up_ctx = gpfs_fsal_up_ctx; if(start_fsal_up_thread) { pthread_attr_t attr_thr; memset(&attr_thr, 0, sizeof(attr_thr)); /* Initialization of thread attributes borrowed from nfs_init.c */ if(pthread_attr_init(&attr_thr) != 0) LogCrit(COMPONENT_THREAD, "can't init pthread's attributes"); if(pthread_attr_setscope(&attr_thr, PTHREAD_SCOPE_SYSTEM) != 0) LogCrit(COMPONENT_THREAD, "can't set pthread's scope"); if(pthread_attr_setdetachstate(&attr_thr, PTHREAD_CREATE_JOINABLE) != 0) LogCrit(COMPONENT_THREAD, "can't set pthread's join state"); if(pthread_attr_setstacksize(&attr_thr, 2116488) != 0) LogCrit(COMPONENT_THREAD, "can't set pthread's stack size"); rc = pthread_create(&gpfs_fsal_up_ctx->gf_thread, &attr_thr, GPFSFSAL_UP_Thread, gpfs_fsal_up_ctx); if(rc != 0) { LogFatal(COMPONENT_THREAD, "Could not create GPFSFSAL_UP_Thread, error = %d (%s)", errno, strerror(errno)); } } Return(ERR_FSAL_NO_ERROR, 0, INDEX_FSAL_BuildExportContext); }