void remove_nfs4_owner(cache_inode_client_t * pclient, state_owner_t * powner, const char * str) { hash_buffer_t buffkey, old_key, old_value; state_nfs4_owner_name_t oname; int rc; oname.son_clientid = powner->so_owner.so_nfs4_owner.so_clientid; oname.son_owner_len = powner->so_owner_len; oname.son_islock = powner->so_type == STATE_LOCK_OWNER_NFSV4; memcpy(oname.son_owner_val, powner->so_owner_val, powner->so_owner_len); buffkey.pdata = (caddr_t) &oname; buffkey.len = sizeof(*powner); rc = HashTable_DelRef(ht_nfs4_owner, &buffkey, &old_key, &old_value, Hash_dec_state_owner_ref); switch(rc) { case HASHTABLE_SUCCESS: if(powner->so_type == STATE_LOCK_OWNER_NFSV4) dec_state_owner_ref(powner->so_owner.so_nfs4_owner.so_related_owner, pclient); /* Release the owner_name (key) and owner (data) back to appropriate pools */ LogFullDebug(COMPONENT_STATE, "Free %s", str); nfs4_Compound_FreeOne(&powner->so_owner.so_nfs4_owner.so_resp); ReleaseToPool(old_value.pdata, &pclient->pool_state_owner); ReleaseToPool(old_key.pdata, &pclient->pool_nfs4_owner_name); break; case HASHTABLE_NOT_DELETED: /* ref count didn't end up at 0, don't free. */ LogDebug(COMPONENT_STATE, "HashTable_DelRef didn't reduce refcount to 0 for %s", str); break; default: /* some problem occurred */ LogDebug(COMPONENT_STATE, "HashTable_DelRef failed (%s) for %s", hash_table_err_to_str(rc), str); break; } }
void Process_nfs4_conflict(LOCK4denied * denied, /* NFS v4 LOck4denied structure to fill in */ state_owner_t * holder, /* owner that holds conflicting lock */ state_lock_desc_t * conflict, /* description of conflicting lock */ cache_inode_client_t * pclient) { /* A conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */ denied->offset = conflict->sld_offset; denied->length = conflict->sld_length; if(conflict->sld_type == STATE_LOCK_R) denied->locktype = READ_LT; else denied->locktype = WRITE_LT; if(holder != NULL && holder->so_owner_len != 0) denied->owner.owner.owner_val = Mem_Alloc(holder->so_owner_len); else denied->owner.owner.owner_val = NULL; LogFullDebug(COMPONENT_STATE, "denied->owner.owner.owner_val = %p", denied->owner.owner.owner_val); if(denied->owner.owner.owner_val != NULL) { denied->owner.owner.owner_len = holder->so_owner_len; memcpy(denied->owner.owner.owner_val, holder->so_owner_val, holder->so_owner_len); } else { denied->owner.owner.owner_len = unknown_owner.so_owner_len; denied->owner.owner.owner_val = unknown_owner.so_owner_val; } if(holder->so_type == STATE_LOCK_OWNER_NFSV4) denied->owner.clientid = holder->so_owner.so_nfs4_owner.so_clientid; else denied->owner.clientid = 0; /* Release any lock owner reference passed back from SAL */ if(holder != NULL) dec_state_owner_ref(holder, pclient); }
void free_nfs4_owner(state_owner_t * owner) { if(owner->so_owner.so_nfs4_owner.so_related_owner != NULL) dec_state_owner_ref(owner->so_owner.so_nfs4_owner.so_related_owner); /* Release the saved response. */ nfs4_Compound_FreeOne(&owner->so_owner.so_nfs4_owner.so_resp); /* Remove the owner from the owners per clientid list. */ P(owner->so_owner.so_nfs4_owner.so_clientrec->cid_mutex); glist_del(&owner->so_owner.so_nfs4_owner.so_perclient); V(owner->so_owner.so_nfs4_owner.so_clientrec->cid_mutex); dec_client_id_ref(owner->so_owner.so_nfs4_owner.so_clientrec); }
void Process_nfs4_conflict(LOCK4denied *denied, state_owner_t *holder, fsal_lock_param_t *conflict) { /* A conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */ denied->offset = conflict->lock_start; denied->length = conflict->lock_length; if(conflict->lock_type == FSAL_LOCK_R) denied->locktype = READ_LT; else denied->locktype = WRITE_LT; if(holder != NULL && holder->so_owner_len != 0) denied->owner.owner.owner_val = gsh_malloc(holder->so_owner_len); else denied->owner.owner.owner_val = NULL; LogFullDebug(COMPONENT_STATE, "denied->owner.owner.owner_val = %p", denied->owner.owner.owner_val); if(denied->owner.owner.owner_val != NULL) { denied->owner.owner.owner_len = holder->so_owner_len; memcpy(denied->owner.owner.owner_val, holder->so_owner_val, holder->so_owner_len); } else { denied->owner.owner.owner_len = unknown_owner.so_owner_len; denied->owner.owner.owner_val = unknown_owner.so_owner_val; } if(holder != NULL && holder->so_type == STATE_LOCK_OWNER_NFSV4) denied->owner.clientid = holder->so_owner.so_nfs4_owner.so_clientid; else denied->owner.clientid = 0; /* Release any lock owner reference passed back from SAL */ if(holder != NULL) dec_state_owner_ref(holder); }
void nlm_process_conflict(nlm4_holder * nlm_holder, state_owner_t * holder, state_lock_desc_t * conflict, cache_inode_client_t * pclient) { if(conflict != NULL) { nlm_holder->exclusive = conflict->sld_type == STATE_LOCK_W; nlm_holder->l_offset = conflict->sld_offset; nlm_holder->l_len = conflict->sld_length; } else { /* For some reason, don't have an actual conflict, * just make it exclusive over the whole file * (which would conflict with any lock requested). */ nlm_holder->exclusive = TRUE; nlm_holder->l_offset = 0; nlm_holder->l_len = 0; } if(holder != NULL) { if(holder->so_type == STATE_LOCK_OWNER_NLM) nlm_holder->svid = holder->so_owner.so_nlm_owner.so_nlm_svid; else nlm_holder->svid = 0; fill_netobj(&nlm_holder->oh, holder->so_owner_val, holder->so_owner_len); } else { /* If we don't have an NLM owner, not much we can do. */ nlm_holder->svid = 0; fill_netobj(&nlm_holder->oh, unknown_owner.so_owner_val, unknown_owner.so_owner_len); } /* Release any lock owner reference passed back from SAL */ if(holder != NULL) dec_state_owner_ref(holder, pclient); }
void free_nfs4_owner(state_owner_t *owner) { state_nfs4_owner_t *nfs4_owner = &owner->so_owner.so_nfs4_owner; if (nfs4_owner->so_related_owner != NULL) dec_state_owner_ref(nfs4_owner->so_related_owner); /* Release the saved response. */ nfs4_Compound_FreeOne(&nfs4_owner->so_resp); /* Remove the owner from the owners per clientid list. */ PTHREAD_MUTEX_lock(&nfs4_owner->so_clientrec->cid_mutex); glist_del(&nfs4_owner->so_perclient); PTHREAD_MUTEX_unlock(&nfs4_owner->so_clientrec->cid_mutex); dec_client_id_ref(nfs4_owner->so_clientrec); }
/** * @brief Create an NFSv4 state owner * * @param[in] name Owner name * @param[in] clientid Client record * @param[in] type Owner type * @param[in] related_owner For lock owners, the related open owner * @param[in] init_seqid The starting seqid (for NFSv4.0) * @param[out] pisnew Whether the owner actually is new * @param[in] care Care flag (to unify v3/v4 owners?) * * @return A new state owner or NULL. */ state_owner_t *create_nfs4_owner(state_nfs4_owner_name_t *name, nfs_client_id_t *clientid, state_owner_type_t type, state_owner_t *related_owner, unsigned int init_seqid, bool_t *pisnew, care_t care) { state_owner_t key; state_owner_t *owner; bool_t isnew; /* set up the content of the open_owner */ memset(&key, 0, sizeof(key)); key.so_type = type; key.so_owner.so_nfs4_owner.so_seqid = init_seqid; key.so_owner.so_nfs4_owner.so_related_owner = related_owner; key.so_owner.so_nfs4_owner.so_clientid = clientid->cid_clientid; key.so_owner.so_nfs4_owner.so_clientrec = clientid; key.so_owner_len = name->son_owner_len; key.so_owner_val = name->son_owner_val; key.so_owner.so_nfs4_owner.so_resp.resop = NFS4_OP_ILLEGAL; key.so_owner.so_nfs4_owner.so_args.argop = NFS4_OP_ILLEGAL; key.so_refcount = 1; #if 0 /* WAITING FOR COMMUNITY FIX */ /* setting lock owner confirmed */ if (type == STATE_LOCK_OWNER_NFSV4) key.so_owner.so_nfs4_owner.so_confirmed = 1; #endif if (isFullDebug(COMPONENT_STATE)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_owner(&dspbuf, &key); LogFullDebug(COMPONENT_STATE, "Key=%s", str); } owner = get_state_owner(care, &key, init_nfs4_owner, &isnew); if (owner != NULL && related_owner != NULL) { PTHREAD_MUTEX_lock(&owner->so_mutex); /* Related owner already exists. */ if (owner->so_owner.so_nfs4_owner.so_related_owner == NULL) { /* Attach related owner to owner now that we know it. */ inc_state_owner_ref(related_owner); owner->so_owner.so_nfs4_owner.so_related_owner = related_owner; } else if (owner->so_owner.so_nfs4_owner.so_related_owner != related_owner) { char str1[LOG_BUFF_LEN / 2]; char str2[LOG_BUFF_LEN / 2]; struct display_buffer dspbuf1 = { sizeof(str1), str1, str1}; struct display_buffer dspbuf2 = { sizeof(str2), str2, str2}; display_owner(&dspbuf1, related_owner); display_owner(&dspbuf2, owner); LogCrit(COMPONENT_NFS_V4_LOCK, "Related {%s} doesn't match for {%s}", str1, str2); PTHREAD_MUTEX_unlock(&owner->so_mutex); /* Release the reference to the owner. */ dec_state_owner_ref(owner); return NULL; } PTHREAD_MUTEX_unlock(&owner->so_mutex); } if (!isnew && owner != NULL && pisnew != NULL) { if (isDebug(COMPONENT_STATE)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_owner(&dspbuf, owner); LogDebug(COMPONENT_STATE, "Previously known owner {%s} is being reused", str); } } if (pisnew != NULL) *pisnew = isnew; return owner; }
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Shorter alias for arguments */ LOCK4args * const arg_LOCK4 = &op->nfs_argop4_u.oplock; /* Shorter alias for response */ LOCK4res * const res_LOCK4 = &resp->nfs_resop4_u.oplock; /* Status code from state calls */ state_status_t state_status = STATE_SUCCESS; /* Data for lock state to be created */ union state_data candidate_data; /* Status code for protocol functions */ nfsstat4 nfs_status = 0; /* Created or found lock state */ state_t *lock_state = NULL; /* Associated open state */ state_t *state_open = NULL; /* The lock owner */ state_owner_t *lock_owner = NULL; /* The open owner */ state_owner_t *open_owner = NULL; /* The owner of a conflicting lock */ state_owner_t *conflict_owner = NULL; /* The owner in which to store the response for NFSv4.0 */ state_owner_t *resp_owner = NULL; /* Sequence ID, for NFSv4.0 */ seqid4 seqid = 0; /* The client performing these operations */ nfs_client_id_t *clientid = NULL; /* Name for the lock owner */ state_nfs4_owner_name_t owner_name; /* Description of requrested lock */ fsal_lock_param_t lock_desc; /* Description of conflicting lock */ fsal_lock_param_t conflict_desc; /* Whether to block */ state_blocking_t blocking = STATE_NON_BLOCKING; /* Tracking data for the lock state */ struct state_refer refer; /* Indicate if we let FSAL to handle requests during grace. */ bool_t fsal_grace = false; int rc; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 LOCK handler ----------------------"); /* Initialize to sane starting values */ resp->resop = NFS4_OP_LOCK; res_LOCK4->status = NFS4_OK; /* Record the sequence info */ if (data->minorversion > 0) { memcpy(refer.session, data->session->session_id, sizeof(sessionid4)); refer.sequence = data->sequence; refer.slot = data->slot; } res_LOCK4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false); if (res_LOCK4->status != NFS4_OK) return res_LOCK4->status; /* Convert lock parameters to internal types */ switch (arg_LOCK4->locktype) { case READW_LT: blocking = STATE_NFSV4_BLOCKING; /* Fall through */ case READ_LT: lock_desc.lock_type = FSAL_LOCK_R; break; case WRITEW_LT: blocking = STATE_NFSV4_BLOCKING; /* Fall through */ case WRITE_LT: lock_desc.lock_type = FSAL_LOCK_W; break; default: LogDebug(COMPONENT_NFS_V4_LOCK, "Invalid lock type"); res_LOCK4->status = NFS4ERR_INVAL; return res_LOCK4->status; } lock_desc.lock_start = arg_LOCK4->offset; lock_desc.lock_sle_type = FSAL_POSIX_LOCK; lock_desc.lock_reclaim = arg_LOCK4->reclaim; if (arg_LOCK4->length != STATE_LOCK_OFFSET_EOF) lock_desc.lock_length = arg_LOCK4->length; else lock_desc.lock_length = 0; if (arg_LOCK4->locker.new_lock_owner) { /* Check stateid correctness and get pointer to state */ nfs_status = nfs4_Check_Stateid( &arg_LOCK4->locker.locker4_u.open_owner.open_stateid, data->current_obj, &state_open, data, STATEID_SPECIAL_FOR_LOCK, arg_LOCK4->locker.locker4_u.open_owner.open_seqid, data->minorversion == 0, lock_tag); if (nfs_status != NFS4_OK) { if (nfs_status == NFS4ERR_REPLAY) { open_owner = get_state_owner_ref(state_open); LogStateOwner("Open: ", open_owner); if (open_owner != NULL) { resp_owner = open_owner; seqid = arg_LOCK4->locker.locker4_u .open_owner.open_seqid; goto check_seqid; } } res_LOCK4->status = nfs_status; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for open owner"); return res_LOCK4->status; } open_owner = get_state_owner_ref(state_open); LogStateOwner("Open: ", open_owner); if (open_owner == NULL) { /* State is going stale. */ res_LOCK4->status = NFS4ERR_STALE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid, stale open owner"); goto out2; } lock_state = NULL; lock_owner = NULL; resp_owner = open_owner; seqid = arg_LOCK4->locker.locker4_u.open_owner.open_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK New lock owner from open owner", data->current_obj, open_owner, &lock_desc); /* Check is the clientid is known or not */ rc = nfs_client_id_get_confirmed( data->minorversion == 0 ? arg_LOCK4->locker. locker4_u.open_owner.lock_owner.clientid : data->session->clientid, &clientid); if (rc != CLIENT_ID_SUCCESS) { res_LOCK4->status = clientid_error_to_nfsstat(rc); LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs_client_id_get"); goto out2; } if (isDebug(COMPONENT_CLIENTID) && (clientid != open_owner->so_owner.so_nfs4_owner.so_clientrec)) { char str_open[LOG_BUFF_LEN / 2]; struct display_buffer dspbuf_open = { sizeof(str_open), str_open, str_open}; char str_lock[LOG_BUFF_LEN / 2]; struct display_buffer dspbuf_lock = { sizeof(str_lock), str_lock, str_lock}; display_client_id_rec(&dspbuf_open, open_owner->so_owner .so_nfs4_owner.so_clientrec); display_client_id_rec(&dspbuf_lock, clientid); LogDebug(COMPONENT_CLIENTID, "Unexpected, new lock owner clientid {%s} doesn't match open owner clientid {%s}", str_lock, str_open); } /* The related stateid is already stored in state_open */ /* An open state has been found. Check its type */ if (state_open->state_type != STATE_TYPE_SHARE) { res_LOCK4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed open stateid is not a SHARE"); goto out2; } /* Is this lock_owner known ? */ convert_nfs4_lock_owner(&arg_LOCK4->locker.locker4_u.open_owner. lock_owner, &owner_name); LogStateOwner("Lock: ", lock_owner); } else { /* Existing lock owner Find the lock stateid From * that, get the open_owner * * There was code here before to handle all-0 stateid, * but that really doesn't apply - when we handle * temporary locks for I/O operations (which is where * we will see all-0 or all-1 stateid, those will not * come in through nfs4_op_lock. * * Check stateid correctness and get pointer to state */ nfs_status = nfs4_Check_Stateid( &arg_LOCK4->locker.locker4_u.lock_owner.lock_stateid, data->current_obj, &lock_state, data, STATEID_SPECIAL_FOR_LOCK, arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid, data->minorversion == 0, lock_tag); if (nfs_status != NFS4_OK) { if (nfs_status == NFS4ERR_REPLAY) { lock_owner = get_state_owner_ref(lock_state); LogStateOwner("Lock: ", lock_owner); if (lock_owner != NULL) { open_owner = lock_owner->so_owner .so_nfs4_owner.so_related_owner; inc_state_owner_ref(open_owner); resp_owner = lock_owner; seqid = arg_LOCK4->locker.locker4_u .lock_owner.lock_seqid; goto check_seqid; } } res_LOCK4->status = nfs_status; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for existing lock owner"); return res_LOCK4->status; } /* Check if lock state belongs to same export */ if (!state_same_export(lock_state, op_ctx->ctx_export)) { LogEvent(COMPONENT_STATE, "Lock Owner Export Conflict, Lock held for export %" PRIu16" request for export %"PRIu16, state_export_id(lock_state), op_ctx->ctx_export->export_id); res_LOCK4->status = NFS4ERR_INVAL; goto out2; } /* A lock state has been found. Check its type */ if (lock_state->state_type != STATE_TYPE_LOCK) { res_LOCK4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed existing lock owner, state type is not LOCK"); goto out2; } /* Get the old lockowner. We can do the following * 'cast', in NFSv4 lock_owner4 and open_owner4 are * different types but with the same definition */ lock_owner = get_state_owner_ref(lock_state); LogStateOwner("Lock: ", lock_owner); if (lock_owner == NULL) { /* State is going stale. */ res_LOCK4->status = NFS4ERR_STALE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid, stale open owner"); goto out2; } open_owner = lock_owner->so_owner.so_nfs4_owner.so_related_owner; LogStateOwner("Open: ", open_owner); inc_state_owner_ref(open_owner); state_open = lock_state->state_data.lock.openstate; inc_state_t_ref(state_open); resp_owner = lock_owner; seqid = arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK Existing lock owner", data->current_obj, lock_owner, &lock_desc); /* Get the client for this open owner */ clientid = open_owner->so_owner.so_nfs4_owner.so_clientrec; inc_client_id_ref(clientid); } check_seqid: /* Check seqid (lock_seqid or open_seqid) */ if (data->minorversion == 0) { if (!Check_nfs4_seqid(resp_owner, seqid, op, data->current_obj, resp, lock_tag)) { /* Response is all setup for us and LogDebug * told what was wrong */ goto out2; } } /* Lock length should not be 0 */ if (arg_LOCK4->length == 0LL) { res_LOCK4->status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length == 0"); goto out; } /* Check for range overflow. Comparing beyond 2^64 is not * possible int 64 bits precision, but off+len > 2^64-1 is * equivalent to len > 2^64-1 - off */ if (lock_desc.lock_length > (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) { res_LOCK4->status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length overflow"); goto out; } /* Check if open state has correct access for type of lock. * * Don't need to check for conflicting states since this open * state assures there are no conflicting states. */ if (((arg_LOCK4->locktype == WRITE_LT || arg_LOCK4->locktype == WRITEW_LT) && ((state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0)) || ((arg_LOCK4->locktype == READ_LT || arg_LOCK4->locktype == READW_LT) && ((state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_READ) == 0))) { /* The open state doesn't allow access based on the * type of lock */ LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, SHARE doesn't allow access", data->current_obj, lock_owner, &lock_desc); res_LOCK4->status = NFS4ERR_OPENMODE; goto out; } /* Do grace period checking (use resp_owner below since a new * lock request with a new lock owner doesn't have a lock owner * yet, but does have an open owner - resp_owner is always one or * the other and non-NULL at this point - so makes for a better log). */ if (nfs_in_grace()) { if (op_ctx->fsal_export->exp_ops. fs_supports(op_ctx->fsal_export, fso_grace_method)) fsal_grace = true; if (!fsal_grace && !arg_LOCK4->reclaim) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, non-reclaim while in grace", data->current_obj, resp_owner, &lock_desc); res_LOCK4->status = NFS4ERR_GRACE; goto out; } if (!fsal_grace && arg_LOCK4->reclaim && !clientid->cid_allow_reclaim) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, invalid reclaim while in grace", data->current_obj, resp_owner, &lock_desc); res_LOCK4->status = NFS4ERR_NO_GRACE; goto out; } } else { if (arg_LOCK4->reclaim) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, reclaim while not in grace", data->current_obj, resp_owner, &lock_desc); res_LOCK4->status = NFS4ERR_NO_GRACE; goto out; } } /* Test if this request is attempting to create a new lock owner */ if (arg_LOCK4->locker.new_lock_owner) { bool_t isnew; /* A lock owner is always associated with a previously made open which has itself a previously made stateid */ /* This lock owner is not known yet, allocated and set up a new one */ lock_owner = create_nfs4_owner(&owner_name, clientid, STATE_LOCK_OWNER_NFSV4, open_owner, 0, &isnew, CARE_ALWAYS); LogStateOwner("Lock: ", lock_owner); if (lock_owner == NULL) { res_LOCK4->status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_EVENT, "LOCK failed to create new lock owner", data->current_obj, open_owner, &lock_desc); goto out2; } if (!isnew) { PTHREAD_MUTEX_lock(&lock_owner->so_mutex); /* Check lock_seqid if it has attached locks. */ if (!glist_empty(&lock_owner->so_lock_list) && (data->minorversion == 0) && !Check_nfs4_seqid(lock_owner, arg_LOCK4->locker.locker4_u. open_owner.lock_seqid, op, data->current_obj, resp, lock_tag)) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to create new lock owner, re-use", data->current_obj, open_owner, &lock_desc); dump_all_locks( "All locks (re-use of lock owner)"); PTHREAD_MUTEX_unlock(&lock_owner->so_mutex); /* Response is all setup for us and * LogDebug told what was wrong */ goto out2; } PTHREAD_MUTEX_unlock(&lock_owner->so_mutex); /* Lock owner is known, see if we also already have * a stateid. Do this here since it's impossible for * there to be such a state if the lock owner was * previously unknown. */ lock_state = nfs4_State_Get_Obj(data->current_obj, lock_owner); } if (lock_state == NULL) { /* Prepare state management structure */ memset(&candidate_data, 0, sizeof(candidate_data)); candidate_data.lock.openstate = state_open; /* Add the lock state to the lock table */ state_status = state_add(data->current_obj, STATE_TYPE_LOCK, &candidate_data, lock_owner, &lock_state, data->minorversion > 0 ? &refer : NULL); if (state_status != STATE_SUCCESS) { res_LOCK4->status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to add new stateid", data->current_obj, lock_owner, &lock_desc); goto out2; } glist_init(&lock_state->state_data.lock.state_locklist); /* Add lock state to the list of lock states belonging to the open state */ glist_add_tail( &state_open->state_data.share.share_lockstates, &lock_state->state_data.lock.state_sharelist); } } if (data->minorversion == 0) { op_ctx->clientid = &lock_owner->so_owner.so_nfs4_owner.so_clientid; } /* Now we have a lock owner and a stateid. Go ahead and push * lock into SAL (and FSAL). */ state_status = state_lock(data->current_obj, lock_owner, lock_state, blocking, NULL, /* No block data for now */ &lock_desc, &conflict_owner, &conflict_desc); if (state_status != STATE_SUCCESS) { if (state_status == STATE_LOCK_CONFLICT) { /* A conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */ Process_nfs4_conflict(&res_LOCK4->LOCK4res_u.denied, conflict_owner, &conflict_desc); } LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed with status %s", state_err_str(state_status)); res_LOCK4->status = nfs4_Errno_state(state_status); /* Save the response in the lock or open owner */ if (res_LOCK4->status != NFS4ERR_RESOURCE && res_LOCK4->status != NFS4ERR_BAD_STATEID && data->minorversion == 0) { Copy_nfs4_state_req(resp_owner, seqid, op, data->current_obj, resp, lock_tag); } if (arg_LOCK4->locker.new_lock_owner) { /* Need to destroy new state */ state_del(lock_state); } goto out2; } if (data->minorversion == 0) op_ctx->clientid = NULL; res_LOCK4->status = NFS4_OK; /* Handle stateid/seqid for success */ update_stateid(lock_state, &res_LOCK4->LOCK4res_u.resok4.lock_stateid, data, lock_tag); if (arg_LOCK4->locker.new_lock_owner) { /* Also save the response in the lock owner */ Copy_nfs4_state_req(lock_owner, arg_LOCK4->locker.locker4_u.open_owner. lock_seqid, op, data->current_obj, resp, lock_tag); } if (isFullDebug(COMPONENT_NFS_V4_LOCK)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_stateid(&dspbuf, lock_state); LogFullDebug(COMPONENT_NFS_V4_LOCK, "LOCK stateid %s", str); } LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK applied", data->current_obj, lock_owner, &lock_desc); out: if (data->minorversion == 0) { /* Save the response in the lock or open owner */ Copy_nfs4_state_req(resp_owner, seqid, op, data->current_obj, resp, lock_tag); } out2: if (state_open != NULL) dec_state_t_ref(state_open); if (lock_state != NULL) dec_state_t_ref(lock_state); LogStateOwner("Open: ", open_owner); LogStateOwner("Lock: ", lock_owner); if (open_owner != NULL) dec_state_owner_ref(open_owner); if (lock_owner != NULL) dec_state_owner_ref(lock_owner); if (clientid != NULL) dec_client_id_ref(clientid); return res_LOCK4->status; } /* nfs4_op_lock */
int nlm4_Share(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, nfs_worker_data_t * pworker, struct svc_req * preq, nfs_res_t * pres) { nlm4_shareargs * arg = &parg->arg_nlm4_share; cache_entry_t * pentry; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t * nsm_client; state_nlm_client_t * nlm_client; state_owner_t * nlm_owner; int rc; int grace = nfs_in_grace(); pres->res_nlm4share.sequence = 0; netobj_to_string(&arg->cookie, buffer, 1024); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Share cookie=%s reclaim=%s", buffer, arg->reclaim ? "yes" : "no"); if(!copy_netobj(&pres->res_nlm4share.cookie, &arg->cookie)) { pres->res_nlm4share.stat = NLM4_FAILED; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; } /* Allow only reclaim share request during recovery and visa versa. * Note: NLM_SHARE is indicated to be non-monitored, however, it does * have a reclaim flag, so we will honor the reclaim flag if used. */ if((grace && !arg->reclaim) || (!grace && arg->reclaim)) { pres->res_nlm4share.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; } rc = nlm_process_share_parms(preq, &arg->share, &pentry, pcontext, CARE_NO_MONITOR, &nsm_client, &nlm_client, &nlm_owner); if(rc >= 0) { /* Present the error back to the client */ pres->res_nlm4share.stat = (nlm4_stats)rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; } if(state_nlm_share(pentry, pcontext, pexport, arg->share.access, arg->share.mode, nlm_owner, &state_status) != STATE_SUCCESS) { pres->res_nlm4share.stat = nlm_convert_state_error(state_status); } else { pres->res_nlm4share.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); cache_inode_put(pentry); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; }
int nfs4_op_release_lockowner(struct nfs_argop4 * op, compound_data_t * data, struct nfs_resop4 * resp) { nfs_client_id_t * pnfs_client_id; state_owner_t * plock_owner; state_nfs4_owner_name_t owner_name; int rc; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 RELEASE_LOCKOWNER handler -----------------------------------------------------"); resp->resop = NFS4_OP_RELEASE_LOCKOWNER; res_RELEASE_LOCKOWNER4.status = NFS4_OK; /* Check clientid */ rc = nfs_client_id_get_confirmed(arg_RELEASE_LOCKOWNER4.lock_owner.clientid, &pnfs_client_id); if(rc != CLIENT_ID_SUCCESS) { res_RELEASE_LOCKOWNER4.status = clientid_error_to_nfsstat(rc); goto out2; } P(pnfs_client_id->cid_mutex); if(!reserve_lease(pnfs_client_id)) { V(pnfs_client_id->cid_mutex); dec_client_id_ref(pnfs_client_id); res_RELEASE_LOCKOWNER4.status = NFS4ERR_EXPIRED; goto out2; } V(pnfs_client_id->cid_mutex); /* look up the lock owner and see if we can find it */ convert_nfs4_lock_owner(&arg_RELEASE_LOCKOWNER4.lock_owner, &owner_name); /* If this open owner is not known yet, allocated and set up a new one */ plock_owner = create_nfs4_owner(&owner_name, pnfs_client_id, STATE_OPEN_OWNER_NFSV4, NULL, 0, NULL, CARE_NOT); if(plock_owner == NULL) { /* the owner doesn't exist, we are done */ LogDebug(COMPONENT_NFS_V4_LOCK, "lock owner does not exist"); res_RELEASE_LOCKOWNER4.status = NFS4_OK; goto out1; } P(plock_owner->so_mutex); /* got the owner, does it still have any locks being held */ if(!glist_empty(&plock_owner->so_lock_list)) { V(plock_owner->so_mutex); res_RELEASE_LOCKOWNER4.status = NFS4ERR_LOCKS_HELD; } else { V(plock_owner->so_mutex); /* found the lock owner and it doesn't have any locks, release it */ release_lockstate(plock_owner); res_RELEASE_LOCKOWNER4.status = NFS4_OK; } /* Release the reference to the lock owner acquired via create_nfs4_owner */ dec_state_owner_ref(plock_owner); out1: /* Update the lease before exit */ P(pnfs_client_id->cid_mutex); update_lease(pnfs_client_id); V(pnfs_client_id->cid_mutex); dec_client_id_ref(pnfs_client_id); out2: LogDebug(COMPONENT_NFS_V4_LOCK, "Leaving NFS v4 RELEASE_LOCKOWNER handler -----------------------------------------------------"); return res_RELEASE_LOCKOWNER4.status; } /* nfs4_op_release_lock_owner */
int nfs4_op_open(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Shorter alias for OPEN4 arguments */ OPEN4args * const arg_OPEN4 = &(op->nfs_argop4_u.opopen); /* Shorter alias for OPEN4 response */ OPEN4res * const res_OPEN4 = &(resp->nfs_resop4_u.opopen); /* The handle from which the change_info4 is to be * generated. Every mention of change_info4 in RFC5661 * speaks of the parent directory of the file being opened. * However, with CLAIM_FH, CLAIM_DELEG_CUR_FH, and * CLAIM_DELEG_PREV_FH, there is no way to derive the parent * directory from the file handle. It is Unclear what the * correct behavior is. In our implementation, we take the * change_info4 of whatever filehandle is current when the * OPEN operation is invoked. */ struct fsal_obj_handle *obj_change = NULL; /* The found client record */ nfs_client_id_t *clientid = NULL; /* The found or created state owner for this open */ state_owner_t *owner = NULL; /* The supplied calim type */ open_claim_type4 claim = arg_OPEN4->claim.claim; /* The open state for the file */ state_t *file_state = NULL; /* True if the state was newly created */ bool new_state = false; int retval; LogDebug(COMPONENT_STATE, "Entering NFS v4 OPEN handler -----------------------------"); /* What kind of open is it ? */ LogFullDebug(COMPONENT_STATE, "OPEN: Claim type = %d, Open Type = %d, Share Deny = %d, Share Access = %d ", arg_OPEN4->claim.claim, arg_OPEN4->openhow.opentype, arg_OPEN4->share_deny, arg_OPEN4->share_access); resp->resop = NFS4_OP_OPEN; res_OPEN4->status = NFS4_OK; res_OPEN4->OPEN4res_u.resok4.rflags = 0; /* Check export permissions if OPEN4_CREATE */ if ((arg_OPEN4->openhow.opentype == OPEN4_CREATE) && ((op_ctx->export_perms->options & EXPORT_OPTION_MD_WRITE_ACCESS) == 0)) { res_OPEN4->status = NFS4ERR_ROFS; LogDebug(COMPONENT_NFS_V4, "Status of OP_OPEN due to export permissions = %s", nfsstat4_to_str(res_OPEN4->status)); return res_OPEN4->status; } /* Check export permissions if OPEN4_SHARE_ACCESS_WRITE */ if (((arg_OPEN4->share_access & OPEN4_SHARE_ACCESS_WRITE) != 0) && ((op_ctx->export_perms->options & EXPORT_OPTION_WRITE_ACCESS) == 0)) { res_OPEN4->status = NFS4ERR_ROFS; LogDebug(COMPONENT_NFS_V4, "Status of OP_OPEN due to export permissions = %s", nfsstat4_to_str(res_OPEN4->status)); return res_OPEN4->status; } /* Do basic checks on a filehandle */ res_OPEN4->status = nfs4_sanity_check_FH(data, NO_FILE_TYPE, false); if (res_OPEN4->status != NFS4_OK) return res_OPEN4->status; if (data->current_obj == NULL) { /* This should be impossible, as PUTFH fills in the * current entry and previous checks weed out handles * in the PseudoFS and DS handles. */ res_OPEN4->status = NFS4ERR_SERVERFAULT; LogCrit(COMPONENT_NFS_V4, "Impossible condition in compound data at %s:%u.", __FILE__, __LINE__); goto out3; } /* It this a known client id? */ LogDebug(COMPONENT_STATE, "OPEN Client id = %" PRIx64, arg_OPEN4->owner.clientid); retval = nfs_client_id_get_confirmed( data->minorversion == 0 ? arg_OPEN4->owner.clientid : data->session->clientid, &clientid); if (retval != CLIENT_ID_SUCCESS) { res_OPEN4->status = clientid_error_to_nfsstat(retval); LogDebug(COMPONENT_NFS_V4, "nfs_client_id_get_confirmed failed"); return res_OPEN4->status; } /* Check if lease is expired and reserve it */ PTHREAD_MUTEX_lock(&clientid->cid_mutex); if (data->minorversion == 0 && !reserve_lease(clientid)) { PTHREAD_MUTEX_unlock(&clientid->cid_mutex); res_OPEN4->status = NFS4ERR_EXPIRED; LogDebug(COMPONENT_NFS_V4, "Lease expired"); goto out3; } PTHREAD_MUTEX_unlock(&clientid->cid_mutex); /* Get the open owner */ if (!open4_open_owner(op, data, resp, clientid, &owner)) { LogDebug(COMPONENT_NFS_V4, "open4_open_owner failed"); goto out2; } /* Do the claim check here, so we can save the result in the * owner for NFSv4.0. */ res_OPEN4->status = open4_validate_claim(data, claim, clientid); if (res_OPEN4->status != NFS4_OK) { LogDebug(COMPONENT_NFS_V4, "open4_validate_claim failed"); goto out; } /* After this point we know we have only CLAIM_NULL, * CLAIM_FH, or CLAIM_PREVIOUS, and that our grace period and * minor version are appropriate for the claim specified. */ if ((arg_OPEN4->openhow.opentype == OPEN4_CREATE) && (claim != CLAIM_NULL)) { res_OPEN4->status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4, "OPEN4_CREATE but not CLAIM_NULL"); goto out2; } /* So we still have a reference even after we repalce the * current FH. */ obj_change = data->current_obj; obj_change->obj_ops->get_ref(obj_change); /* Update the change info for entry_change. */ res_OPEN4->OPEN4res_u.resok4.cinfo.before = fsal_get_changeid4(obj_change); /* Check if share_access does not have any access set, or has * invalid bits that are set. check that share_deny doesn't * have any invalid bits set. */ if (!(arg_OPEN4->share_access & OPEN4_SHARE_ACCESS_BOTH) || (data->minorversion == 0 && arg_OPEN4->share_access & ~OPEN4_SHARE_ACCESS_BOTH) || (arg_OPEN4->share_access & (~OPEN4_SHARE_ACCESS_WANT_DELEG_MASK & ~OPEN4_SHARE_ACCESS_BOTH)) || (arg_OPEN4->share_deny & ~OPEN4_SHARE_DENY_BOTH)) { res_OPEN4->status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4, "Invalid SHARE_ACCESS or SHARE_DENY"); goto out; } /* Utilize the extended FSAL APU functionality to perform the open. */ open4_ex(arg_OPEN4, data, res_OPEN4, clientid, owner, &file_state, &new_state); if (res_OPEN4->status != NFS4_OK) goto out; memset(&res_OPEN4->OPEN4res_u.resok4.attrset, 0, sizeof(struct bitmap4)); if (arg_OPEN4->openhow.openflag4_u.how.mode == EXCLUSIVE4 || arg_OPEN4->openhow.openflag4_u.how.mode == EXCLUSIVE4_1) { struct bitmap4 *bits = &res_OPEN4->OPEN4res_u.resok4.attrset; set_attribute_in_bitmap(bits, FATTR4_TIME_ACCESS); set_attribute_in_bitmap(bits, FATTR4_TIME_MODIFY); } /* If server use OPEN_CONFIRM4, set the correct flag, * but not for 4.1 */ if (owner->so_owner.so_nfs4_owner.so_confirmed == false) res_OPEN4->OPEN4res_u.resok4.rflags |= OPEN4_RESULT_CONFIRM; res_OPEN4->OPEN4res_u.resok4.rflags |= OPEN4_RESULT_LOCKTYPE_POSIX; LogFullDebug(COMPONENT_STATE, "NFS4 OPEN returning NFS4_OK"); /* regular exit */ res_OPEN4->status = NFS4_OK; /* Update change_info4 */ res_OPEN4->OPEN4res_u.resok4.cinfo.after = fsal_get_changeid4(obj_change); res_OPEN4->OPEN4res_u.resok4.cinfo.atomic = FALSE; /* Handle open stateid/seqid for success */ update_stateid(file_state, &res_OPEN4->OPEN4res_u.resok4.stateid, data, open_tag); out: if (res_OPEN4->status != NFS4_OK) { LogDebug(COMPONENT_STATE, "failed with status %s", nfsstat4_to_str(res_OPEN4->status)); } /* Save the response in the open owner. * obj_change is either the parent directory or for a CLAIM_PREV is * the entry itself. In either case, it's the right entry to use in * saving the request results. */ if (data->minorversion == 0) { Copy_nfs4_state_req(owner, arg_OPEN4->seqid, op, obj_change, resp, open_tag); } out2: /* Update the lease before exit */ if (data->minorversion == 0) { PTHREAD_MUTEX_lock(&clientid->cid_mutex); update_lease(clientid); PTHREAD_MUTEX_unlock(&clientid->cid_mutex); } if (file_state != NULL) dec_state_t_ref(file_state); /* Clean up if we have an error exit */ if ((file_state != NULL) && new_state && (res_OPEN4->status != NFS4_OK)) { /* Need to destroy open owner and state */ state_del(file_state); } if (obj_change) obj_change->obj_ops->put_ref(obj_change); if (owner != NULL) { /* Need to release the open owner for this call */ dec_state_owner_ref(owner); } out3: dec_client_id_ref(clientid); return res_OPEN4->status; } /* nfs4_op_open */
static int nfs4_write(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp, fsal_io_direction_t io, struct io_info *info) { WRITE4args * const arg_WRITE4 = &op->nfs_argop4_u.opwrite; WRITE4res * const res_WRITE4 = &resp->nfs_resop4_u.opwrite; uint64_t size = 0; size_t written_size = 0; uint64_t offset; bool eof_met; bool sync = false; void *bufferdata; stable_how4 stable_how; state_t *state_found = NULL; state_t *state_open = NULL; fsal_status_t fsal_status = {0, 0}; struct fsal_obj_handle *obj = NULL; bool anonymous_started = false; struct gsh_buffdesc verf_desc; state_owner_t *owner = NULL; uint64_t MaxWrite = atomic_fetch_uint64_t(&op_ctx->ctx_export->MaxWrite); uint64_t MaxOffsetWrite = atomic_fetch_uint64_t(&op_ctx->ctx_export->MaxOffsetWrite); /* Lock are not supported */ resp->resop = NFS4_OP_WRITE; res_WRITE4->status = NFS4_OK; if ((data->minorversion > 0) && (nfs4_Is_Fh_DSHandle(&data->currentFH))) { if (io == FSAL_IO_WRITE) return op_dswrite(op, data, resp); else return op_dswrite_plus(op, data, resp, info); } /* * Do basic checks on a filehandle * Only files can be written */ res_WRITE4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, true); if (res_WRITE4->status != NFS4_OK) return res_WRITE4->status; /* if quota support is active, then we should check is the FSAL allows inode creation or not */ fsal_status = op_ctx->fsal_export->exp_ops.check_quota( op_ctx->fsal_export, op_ctx->ctx_export->fullpath, FSAL_QUOTA_INODES); if (FSAL_IS_ERROR(fsal_status)) { res_WRITE4->status = NFS4ERR_DQUOT; return res_WRITE4->status; } /* vnode to manage is the current one */ obj = data->current_obj; /* Check stateid correctness and get pointer to state * (also checks for special stateids) */ res_WRITE4->status = nfs4_Check_Stateid(&arg_WRITE4->stateid, obj, &state_found, data, STATEID_SPECIAL_ANY, 0, false, "WRITE"); if (res_WRITE4->status != NFS4_OK) return res_WRITE4->status; /* NB: After this points, if state_found == NULL, then * the stateid is all-0 or all-1 */ if (state_found != NULL) { struct state_deleg *sdeleg; if (info) info->io_advise = state_found->state_data.io_advise; switch (state_found->state_type) { case STATE_TYPE_SHARE: state_open = state_found; /* Note this causes an extra refcount, but it * simplifies logic below. */ inc_state_t_ref(state_open); /** @todo FSF: need to check against existing locks */ break; case STATE_TYPE_LOCK: state_open = state_found->state_data.lock.openstate; inc_state_t_ref(state_open); /** * @todo FSF: should check that write is in range of an * exclusive lock... */ break; case STATE_TYPE_DELEG: /* Check if the delegation state allows READ */ sdeleg = &state_found->state_data.deleg; if (!(sdeleg->sd_type & OPEN_DELEGATE_WRITE) || (sdeleg->sd_state != DELEG_GRANTED)) { /* Invalid delegation for this operation. */ LogDebug(COMPONENT_STATE, "Delegation type:%d state:%d", sdeleg->sd_type, sdeleg->sd_state); res_WRITE4->status = NFS4ERR_BAD_STATEID; return res_WRITE4->status; } state_open = NULL; break; case STATE_TYPE_LAYOUT: state_open = NULL; break; default: res_WRITE4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "WRITE with invalid stateid of type %d", (int)state_found->state_type); return res_WRITE4->status; } /* This is a write operation, this means that the file * MUST have been opened for writing */ if (state_open != NULL && (state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0) { /* Bad open mode, return NFS4ERR_OPENMODE */ res_WRITE4->status = NFS4ERR_OPENMODE; if (isDebug(COMPONENT_NFS_V4_LOCK)) { char str[LOG_BUFF_LEN] = "\0"; struct display_buffer dspbuf = { sizeof(str), str, str}; display_stateid(&dspbuf, state_found); LogDebug(COMPONENT_NFS_V4_LOCK, "WRITE %s doesn't have OPEN4_SHARE_ACCESS_WRITE", str); } goto out; } } else { /* Special stateid, no open state, check to see if any * share conflicts */ state_open = NULL; /* Special stateid, no open state, check to see if any share * conflicts The stateid is all-0 or all-1 */ res_WRITE4->status = nfs4_Errno_state( state_share_anonymous_io_start( obj, OPEN4_SHARE_ACCESS_WRITE, SHARE_BYPASS_NONE)); if (res_WRITE4->status != NFS4_OK) goto out; anonymous_started = true; } /* Need to permission check the write. */ fsal_status = obj->obj_ops.test_access(obj, FSAL_WRITE_ACCESS, NULL, NULL, true); if (FSAL_IS_ERROR(fsal_status)) { res_WRITE4->status = nfs4_Errno_status(fsal_status); goto done; } /* Get the characteristics of the I/O to be made */ offset = arg_WRITE4->offset; size = arg_WRITE4->data.data_len; stable_how = arg_WRITE4->stable; LogFullDebug(COMPONENT_NFS_V4, "offset = %" PRIu64 " length = %" PRIu64 " stable = %d", offset, size, stable_how); if (MaxOffsetWrite < UINT64_MAX) { LogFullDebug(COMPONENT_NFS_V4, "Write offset=%" PRIu64 " count=%" PRIu64 " MaxOffSet=%" PRIu64, offset, size, MaxOffsetWrite); if ((offset + size) > MaxOffsetWrite) { LogEvent(COMPONENT_NFS_V4, "A client tryed to violate max file size %" PRIu64 " for exportid #%hu", MaxOffsetWrite, op_ctx->ctx_export->export_id); res_WRITE4->status = NFS4ERR_FBIG; goto done; } } if (size > MaxWrite) { /* * The client asked for too much data, we * must restrict him */ if (info == NULL || info->io_content.what != NFS4_CONTENT_HOLE) { LogFullDebug(COMPONENT_NFS_V4, "write requested size = %" PRIu64 " write allowed size = %" PRIu64, size, MaxWrite); size = MaxWrite; } } /* Where are the data ? */ bufferdata = arg_WRITE4->data.data_val; LogFullDebug(COMPONENT_NFS_V4, "offset = %" PRIu64 " length = %" PRIu64, offset, size); /* if size == 0 , no I/O) are actually made and everything is alright */ if (size == 0) { res_WRITE4->WRITE4res_u.resok4.count = 0; res_WRITE4->WRITE4res_u.resok4.committed = FILE_SYNC4; verf_desc.addr = res_WRITE4->WRITE4res_u.resok4.writeverf; verf_desc.len = sizeof(verifier4); op_ctx->fsal_export->exp_ops.get_write_verifier( op_ctx->fsal_export, &verf_desc); res_WRITE4->status = NFS4_OK; goto done; } if (arg_WRITE4->stable == UNSTABLE4) sync = false; else sync = true; if (!anonymous_started && data->minorversion == 0) { owner = get_state_owner_ref(state_found); if (owner != NULL) { op_ctx->clientid = &owner->so_owner.so_nfs4_owner.so_clientid; } } if (obj->fsal->m_ops.support_ex(obj)) { /* Call the new fsal_write */ fsal_status = fsal_write2(obj, false, state_found, offset, size, &written_size, bufferdata, &sync, info); } else { /* Call legacy fsal_rdwr */ fsal_status = fsal_rdwr(obj, io, offset, size, &written_size, bufferdata, &eof_met, &sync, info); } if (FSAL_IS_ERROR(fsal_status)) { LogDebug(COMPONENT_NFS_V4, "write returned %s", fsal_err_txt(fsal_status)); res_WRITE4->status = nfs4_Errno_status(fsal_status); goto done; } if (!anonymous_started && data->minorversion == 0) op_ctx->clientid = NULL; /* Set the returned value */ if (sync) res_WRITE4->WRITE4res_u.resok4.committed = FILE_SYNC4; else res_WRITE4->WRITE4res_u.resok4.committed = UNSTABLE4; res_WRITE4->WRITE4res_u.resok4.count = written_size; verf_desc.addr = res_WRITE4->WRITE4res_u.resok4.writeverf; verf_desc.len = sizeof(verifier4); op_ctx->fsal_export->exp_ops.get_write_verifier(op_ctx->fsal_export, &verf_desc); res_WRITE4->status = NFS4_OK; done: if (anonymous_started) state_share_anonymous_io_done(obj, OPEN4_SHARE_ACCESS_WRITE); server_stats_io_done(size, written_size, (res_WRITE4->status == NFS4_OK) ? true : false, true); out: if (owner != NULL) dec_state_owner_ref(owner); if (state_found != NULL) dec_state_t_ref(state_found); if (state_open != NULL) dec_state_t_ref(state_open); return res_WRITE4->status; } /* nfs4_op_write */
/** * @brief NFS4_OP_OPEN_CONFIRM * * This function implements the NFS4_OP_OPEN_CONFIRM operation. * * @param[in] op Arguments for nfs4_op * @param[in,out] data Compound request's data * @param[out] resp Results for nfs4_op * * @retval NFS4_OK or errors for NFSv4.0 * @retval NFS4ERR_NOTSUPP for NFSv4.1 * */ int nfs4_op_open_confirm(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { OPEN_CONFIRM4args * const arg_OPEN_CONFIRM4 = &op->nfs_argop4_u.opopen_confirm; OPEN_CONFIRM4res * const res_OPEN_CONFIRM4 = &resp->nfs_resop4_u.opopen_confirm; OPEN_CONFIRM4resok *resok = &res_OPEN_CONFIRM4->OPEN_CONFIRM4res_u.resok4; int rc = 0; state_t *state_found = NULL; state_owner_t *open_owner; const char *tag = "OPEN_CONFIRM"; resp->resop = NFS4_OP_OPEN_CONFIRM; res_OPEN_CONFIRM4->status = NFS4_OK; if (data->minorversion > 0) { res_OPEN_CONFIRM4->status = NFS4ERR_NOTSUPP; return res_OPEN_CONFIRM4->status; } /* Do basic checks on a filehandle * Should not operate on non-file objects */ res_OPEN_CONFIRM4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false); if (res_OPEN_CONFIRM4->status != NFS4_OK) return res_OPEN_CONFIRM4->status; /* Check stateid correctness and get pointer to state */ rc = nfs4_Check_Stateid(&arg_OPEN_CONFIRM4->open_stateid, data->current_obj, &state_found, data, STATEID_SPECIAL_FOR_LOCK, arg_OPEN_CONFIRM4->seqid, data->minorversion == 0, tag); if (rc != NFS4_OK && rc != NFS4ERR_REPLAY) { res_OPEN_CONFIRM4->status = rc; return res_OPEN_CONFIRM4->status; } open_owner = get_state_owner_ref(state_found); if (open_owner == NULL) { /* State is going stale. */ res_OPEN_CONFIRM4->status = NFS4ERR_STALE; LogDebug(COMPONENT_NFS_V4, "OPEN CONFIRM failed nfs4_Check_Stateid, stale open owner"); goto out2; } PTHREAD_MUTEX_lock(&open_owner->so_mutex); /* Check seqid */ if (!Check_nfs4_seqid(open_owner, arg_OPEN_CONFIRM4->seqid, op, data->current_obj, resp, tag)) { /* Response is all setup for us and LogDebug * told what was wrong */ PTHREAD_MUTEX_unlock(&open_owner->so_mutex); goto out; } /* If opened file is already confirmed, retrun NFS4ERR_BAD_STATEID */ if (open_owner->so_owner.so_nfs4_owner.so_confirmed) { PTHREAD_MUTEX_unlock(&open_owner->so_mutex); res_OPEN_CONFIRM4->status = NFS4ERR_BAD_STATEID; goto out; } /* Set the state as confirmed */ open_owner->so_owner.so_nfs4_owner.so_confirmed = true; PTHREAD_MUTEX_unlock(&open_owner->so_mutex); /* Handle stateid/seqid for success */ update_stateid(state_found, &resok->open_stateid, data, tag); /* Save the response in the open owner */ Copy_nfs4_state_req(open_owner, arg_OPEN_CONFIRM4->seqid, op, data->current_obj, resp, tag); out: dec_state_owner_ref(open_owner); out2: dec_state_t_ref(state_found); return res_OPEN_CONFIRM4->status; } /* nfs4_op_open_confirm */
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t * data, struct nfs_resop4 *resp) { char __attribute__ ((__unused__)) funcname[] = "nfs4_op_lock"; #ifndef _WITH_NFSV4_LOCKS /* Lock are not supported */ resp->resop = NFS4_OP_LOCK; res_LOCK4.status = NFS4ERR_LOCK_NOTSUPP; return res_LOCK4.status; #else state_status_t state_status; state_data_t candidate_data; state_type_t candidate_type; int rc = 0; seqid4 seqid; state_t * plock_state; /* state for the lock */ state_t * pstate_open; /* state for the open owner */ state_owner_t * plock_owner; state_owner_t * popen_owner; state_owner_t * presp_owner; /* Owner to store response in */ state_owner_t * conflict_owner = NULL; state_nfs4_owner_name_t owner_name; nfs_client_id_t nfs_client_id; state_lock_desc_t lock_desc, conflict_desc; state_blocking_t blocking = STATE_NON_BLOCKING; const char * tag = "LOCK"; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 LOCK handler -----------------------------------------------------"); /* Initialize to sane starting values */ resp->resop = NFS4_OP_LOCK; /* If there is no FH */ if(nfs4_Is_Fh_Empty(&(data->currentFH))) { res_LOCK4.status = NFS4ERR_NOFILEHANDLE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Is_Fh_Empty"); return res_LOCK4.status; } /* If the filehandle is invalid */ if(nfs4_Is_Fh_Invalid(&(data->currentFH))) { res_LOCK4.status = NFS4ERR_BADHANDLE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Is_Fh_Invalid"); return res_LOCK4.status; } /* Tests if the Filehandle is expired (for volatile filehandle) */ if(nfs4_Is_Fh_Expired(&(data->currentFH))) { res_LOCK4.status = NFS4ERR_FHEXPIRED; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Is_Fh_Expired"); return res_LOCK4.status; } /* Lock is done only on a file */ if(data->current_filetype != REGULAR_FILE) { /* Type of the entry is not correct */ switch (data->current_filetype) { case DIRECTORY: res_LOCK4.status = NFS4ERR_ISDIR; break; default: res_LOCK4.status = NFS4ERR_INVAL; break; } LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed wrong file type"); return res_LOCK4.status; } /* Convert lock parameters to internal types */ switch(arg_LOCK4.locktype) { case READ_LT: lock_desc.sld_type = STATE_LOCK_R; blocking = STATE_NON_BLOCKING; break; case WRITE_LT: lock_desc.sld_type = STATE_LOCK_W; blocking = STATE_NON_BLOCKING; break; case READW_LT: lock_desc.sld_type = STATE_LOCK_R; blocking = STATE_NFSV4_BLOCKING; break; case WRITEW_LT: lock_desc.sld_type = STATE_LOCK_W; blocking = STATE_NFSV4_BLOCKING; break; } lock_desc.sld_offset = arg_LOCK4.offset; if(arg_LOCK4.length != STATE_LOCK_OFFSET_EOF) lock_desc.sld_length = arg_LOCK4.length; else lock_desc.sld_length = 0; if(arg_LOCK4.locker.new_lock_owner) { /* New lock owner, Find the open owner */ tag = "LOCK (new owner)"; /* Check stateid correctness and get pointer to state */ if((rc = nfs4_Check_Stateid(&arg_LOCK4.locker.locker4_u.open_owner.open_stateid, data->current_entry, 0LL, &pstate_open, data, STATEID_SPECIAL_FOR_LOCK, tag)) != NFS4_OK) { res_LOCK4.status = rc; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for open owner"); return res_LOCK4.status; } popen_owner = pstate_open->state_powner; plock_state = NULL; plock_owner = NULL; presp_owner = popen_owner; seqid = arg_LOCK4.locker.locker4_u.open_owner.open_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK New lock owner from open owner", data->current_entry, data->pcontext, popen_owner, &lock_desc); /* Check is the clientid is known or not */ if(nfs_client_id_get(arg_LOCK4.locker.locker4_u.open_owner.lock_owner.clientid, &nfs_client_id) == CLIENT_ID_NOT_FOUND) { res_LOCK4.status = NFS4ERR_STALE_CLIENTID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs_client_id_get"); return res_LOCK4.status; } /* The related stateid is already stored in pstate_open */ /* An open state has been found. Check its type */ if(pstate_open->state_type != STATE_TYPE_SHARE) { res_LOCK4.status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed open stateid is not a SHARE"); return res_LOCK4.status; } /* Lock seqid (seqid wanted for new lock) should be 0 (see newpynfs test LOCK8c) */ if(arg_LOCK4.locker.locker4_u.open_owner.lock_seqid != 0) { res_LOCK4.status = NFS4ERR_BAD_SEQID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed new lock stateid is not 0"); return res_LOCK4.status; } /* Is this lock_owner known ? */ convert_nfs4_lock_owner(&arg_LOCK4.locker.locker4_u.open_owner.lock_owner, &owner_name); } else { /* Existing lock owner * Find the lock stateid * From that, get the open_owner */ tag = "LOCK (existing owner)"; /* There was code here before to handle all-0 stateid, but that * really doesn't apply - when we handle temporary locks for * I/O operations (which is where we will see all-0 or all-1 * stateid, those will not come in through nfs4_op_lock. */ /* Check stateid correctness and get pointer to state */ if((rc = nfs4_Check_Stateid(&arg_LOCK4.locker.locker4_u.lock_owner.lock_stateid, data->current_entry, 0LL, &plock_state, data, STATEID_SPECIAL_FOR_LOCK, tag)) != NFS4_OK) { res_LOCK4.status = rc; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for existing lock owner"); return res_LOCK4.status; } /* An lock state has been found. Check its type */ if(plock_state->state_type != STATE_TYPE_LOCK) { res_LOCK4.status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed existing lock owner, state type is not LOCK"); return res_LOCK4.status; } /* Get the old lockowner. We can do the following 'cast', in NFSv4 lock_owner4 and open_owner4 * are different types but with the same definition*/ plock_owner = plock_state->state_powner; popen_owner = plock_owner->so_owner.so_nfs4_owner.so_related_owner; pstate_open = plock_state->state_data.lock.popenstate; presp_owner = plock_owner; seqid = arg_LOCK4.locker.locker4_u.lock_owner.lock_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK Existing lock owner", data->current_entry, data->pcontext, plock_owner, &lock_desc); #ifdef _CONFORM_TO_TEST_LOCK8c /* Check validity of the seqid */ if(arg_LOCK4.locker.locker4_u.lock_owner.lock_seqid != 0) { res_LOCK4.status = NFS4ERR_BAD_SEQID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed existing lock owner, lock seqid != 0"); return res_LOCK4.status; } #endif } /* if( arg_LOCK4.locker.new_lock_owner ) */ /* Check seqid (lock_seqid or open_seqid) */ if(!Check_nfs4_seqid(presp_owner, seqid, op, data, resp, tag)) { /* Response is all setup for us and LogDebug told what was wrong */ return res_LOCK4.status; } /* Lock length should not be 0 */ if(arg_LOCK4.length == 0LL) { res_LOCK4.status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length == 0"); /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); return res_LOCK4.status; } /* Check for range overflow. * Comparing beyond 2^64 is not possible int 64 bits precision, * but off+len > 2^64-1 is equivalent to len > 2^64-1 - off */ if(lock_desc.sld_length > (STATE_LOCK_OFFSET_EOF - lock_desc.sld_offset)) { res_LOCK4.status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length overflow"); /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); return res_LOCK4.status; } /* check if open state has correct access for type of lock. * Don't need to check for conflicting states since this open * state assures there are no conflicting states. */ if(((arg_LOCK4.locktype == WRITE_LT || arg_LOCK4.locktype == WRITEW_LT) && ((pstate_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0)) || ((arg_LOCK4.locktype == READ_LT || arg_LOCK4.locktype == READW_LT) && ((pstate_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_READ) == 0))) { /* The open state doesn't allow access based on the type of lock */ LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, SHARE doesn't allow access", data->current_entry, data->pcontext, plock_owner, &lock_desc); res_LOCK4.status = NFS4ERR_OPENMODE; /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); return res_LOCK4.status; } if(arg_LOCK4.locker.new_lock_owner) { /* A lock owner is always associated with a previously made open * which has itself a previously made stateid */ /* Get reference to open owner */ inc_state_owner_ref(popen_owner); if(nfs4_owner_Get_Pointer(&owner_name, &plock_owner)) { /* Lock owner already existsc, check lock_seqid if it's not 0 */ if(!Check_nfs4_seqid(plock_owner, arg_LOCK4.locker.locker4_u.open_owner.lock_seqid, op, data, resp, "LOCK (new owner but owner exists)")) { /* Response is all setup for us and LogDebug told what was wrong */ return res_LOCK4.status; } } else { /* This lock owner is not known yet, allocated and set up a new one */ plock_owner = create_nfs4_owner(data->pclient, &owner_name, STATE_LOCK_OWNER_NFSV4, popen_owner, 0); if(plock_owner == NULL) { res_LOCK4.status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to create new lock owner", data->current_entry, data->pcontext, popen_owner, &lock_desc); return res_LOCK4.status; } } /* Prepare state management structure */ memset(&candidate_type, 0, sizeof(candidate_type)); candidate_type = STATE_TYPE_LOCK; candidate_data.lock.popenstate = pstate_open; /* Add the lock state to the lock table */ if(state_add(data->current_entry, candidate_type, &candidate_data, plock_owner, data->pclient, data->pcontext, &plock_state, &state_status) != STATE_SUCCESS) { res_LOCK4.status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to add new stateid", data->current_entry, data->pcontext, plock_owner, &lock_desc); dec_state_owner_ref(plock_owner, data->pclient); return res_LOCK4.status; } init_glist(&plock_state->state_data.lock.state_locklist); /* Add lock state to the list of lock states belonging to the open state */ glist_add_tail(&pstate_open->state_data.share.share_lockstates, &plock_state->state_data.lock.state_sharelist); } /* if( arg_LOCK4.locker.new_lock_owner ) */ /* Now we have a lock owner and a stateid. * Go ahead and push lock into SAL (and FSAL). */ if(state_lock(data->current_entry, data->pcontext, plock_owner, plock_state, blocking, NULL, /* No block data for now */ &lock_desc, &conflict_owner, &conflict_desc, data->pclient, &state_status) != STATE_SUCCESS) { if(state_status == STATE_LOCK_CONFLICT) { /* A conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */ Process_nfs4_conflict(&res_LOCK4.LOCK4res_u.denied, conflict_owner, &conflict_desc, data->pclient); } LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed with status %s", state_err_str(state_status)); res_LOCK4.status = nfs4_Errno_state(state_status); /* Save the response in the lock or open owner */ if(res_LOCK4.status != NFS4ERR_RESOURCE && res_LOCK4.status != NFS4ERR_BAD_STATEID) Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); if(arg_LOCK4.locker.new_lock_owner) { /* Need to destroy lock owner and state */ if(state_del(plock_state, data->pclient, &state_status) != STATE_SUCCESS) LogDebug(COMPONENT_NFS_V4_LOCK, "state_del failed with status %s", state_err_str(state_status)); } return res_LOCK4.status; } res_LOCK4.status = NFS4_OK; /* Handle stateid/seqid for success */ update_stateid(plock_state, &res_LOCK4.LOCK4res_u.resok4.lock_stateid, data, tag); LogFullDebug(COMPONENT_NFS_V4_LOCK, "LOCK state_seqid = %u, plock_state = %p", plock_state->state_seqid, plock_state); /* Save the response in the lock or open owner */ Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag); LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK applied", data->current_entry, data->pcontext, plock_owner, &lock_desc); return res_LOCK4.status; #endif } /* nfs4_op_lock */
int nfs4_op_open_downgrade(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { OPEN_DOWNGRADE4args * const arg_OPEN_DOWNGRADE4 = &op->nfs_argop4_u.opopen_downgrade; OPEN_DOWNGRADE4res * const res_OPEN_DOWNGRADE4 = &resp->nfs_resop4_u.opopen_downgrade; OPEN_DOWNGRADE4resok *resok = &res_OPEN_DOWNGRADE4->OPEN_DOWNGRADE4res_u.resok4; state_t *state_found = NULL; state_owner_t *open_owner; int rc; const char *tag = "OPEN_DOWNGRADE"; char *cause = ""; resp->resop = NFS4_OP_OPEN_DOWNGRADE; res_OPEN_DOWNGRADE4->status = NFS4_OK; /* Do basic checks on a filehandle */ res_OPEN_DOWNGRADE4->status = nfs4_sanity_check_FH(data, NO_FILE_TYPE, false); if (res_OPEN_DOWNGRADE4->status != NFS4_OK) return res_OPEN_DOWNGRADE4->status; /* Open downgrade is done only on a file */ if (data->current_filetype != REGULAR_FILE) { res_OPEN_DOWNGRADE4->status = NFS4ERR_INVAL; return res_OPEN_DOWNGRADE4->status; } /* Check stateid correctness and get pointer to state */ rc = nfs4_Check_Stateid(&arg_OPEN_DOWNGRADE4->open_stateid, data->current_obj, &state_found, data, STATEID_SPECIAL_FOR_LOCK, arg_OPEN_DOWNGRADE4->seqid, data->minorversion == 0, tag); if (rc != NFS4_OK && rc != NFS4ERR_REPLAY) { res_OPEN_DOWNGRADE4->status = rc; LogDebug(COMPONENT_STATE, "OPEN_DOWNGRADE failed nfs4_Check_Stateid"); return res_OPEN_DOWNGRADE4->status; } open_owner = get_state_owner_ref(state_found); if (open_owner == NULL) { /* Unexpected, but something just went stale. */ res_OPEN_DOWNGRADE4->status = NFS4ERR_STALE; goto out2; } PTHREAD_MUTEX_lock(&open_owner->so_mutex); /* Check seqid */ if (data->minorversion == 0 && !Check_nfs4_seqid(open_owner, arg_OPEN_DOWNGRADE4->seqid, op, data->current_obj, resp, tag)) { /* Response is all setup for us and LogDebug told what was wrong */ PTHREAD_MUTEX_unlock(&open_owner->so_mutex); goto out; } PTHREAD_MUTEX_unlock(&open_owner->so_mutex); /* What kind of open is it ? */ LogFullDebug(COMPONENT_STATE, "OPEN_DOWNGRADE: Share Deny = %d Share Access = %d ", arg_OPEN_DOWNGRADE4->share_deny, arg_OPEN_DOWNGRADE4->share_access); res_OPEN_DOWNGRADE4->status = nfs4_do_open_downgrade(op, data, open_owner, state_found, &cause); if (res_OPEN_DOWNGRADE4->status != NFS4_OK) { LogEvent(COMPONENT_STATE, "Failed to open downgrade: %s", cause); goto out; } /* Successful exit */ res_OPEN_DOWNGRADE4->status = NFS4_OK; /* Handle stateid/seqid for success */ update_stateid(state_found, &resok->open_stateid, data, tag); /* Save the response in the open owner */ if (data->minorversion == 0) { Copy_nfs4_state_req(open_owner, arg_OPEN_DOWNGRADE4->seqid, op, data->current_obj, resp, tag); } out: dec_state_owner_ref(open_owner); out2: dec_state_t_ref(state_found); return res_OPEN_DOWNGRADE4->status; } /* nfs4_op_opendowngrade */
nfsstat4 Process_nfs4_conflict(LOCK4denied *denied, state_owner_t *holder, fsal_lock_param_t *conflict, compound_data_t *data) { nfsstat4 status; size_t owner_len; if (holder != NULL && holder->so_owner_len != 0) owner_len = holder->so_owner_len; else owner_len = unknown_owner.so_owner_len; /* First check if the response will fit, this is a response to a * LOCK or LOCKT operation. */ status = check_resp_room(data, BASE_RESP_SIZE + owner_len); if (status != NFS4_OK) return status; /* Now set the op_resp_size. */ data->op_resp_size = BASE_RESP_SIZE + owner_len; /* A conflicting lock from a different lock_owner, * returns NFS4ERR_DENIED */ denied->offset = conflict->lock_start; denied->length = conflict->lock_length; if (conflict->lock_type == FSAL_LOCK_R) denied->locktype = READ_LT; else denied->locktype = WRITE_LT; if (holder != NULL && holder->so_owner_len != 0) { denied->owner.owner.owner_val = gsh_malloc(holder->so_owner_len); denied->owner.owner.owner_len = holder->so_owner_len; memcpy(denied->owner.owner.owner_val, holder->so_owner_val, holder->so_owner_len); } else { denied->owner.owner.owner_len = unknown_owner.so_owner_len; denied->owner.owner.owner_val = unknown_owner.so_owner_val; } LogFullDebug(COMPONENT_STATE, "denied->owner.owner.owner_val = %p", denied->owner.owner.owner_val); if (holder != NULL && holder->so_type == STATE_LOCK_OWNER_NFSV4) denied->owner.clientid = holder->so_owner.so_nfs4_owner.so_clientid; else denied->owner.clientid = 0; /* Release any lock owner reference passed back from SAL */ if (holder != NULL) dec_state_owner_ref(holder); return NFS4ERR_DENIED; }
static int nfs4_read(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp, fsal_io_direction_t io, struct io_info *info) { READ4args * const arg_READ4 = &op->nfs_argop4_u.opread; READ4res * const res_READ4 = &resp->nfs_resop4_u.opread; uint64_t size = 0; size_t read_size = 0; uint64_t offset = 0; bool eof_met = false; void *bufferdata = NULL; fsal_status_t fsal_status = {0, 0}; state_t *state_found = NULL; state_t *state_open = NULL; struct fsal_obj_handle *obj = NULL; bool sync = false; bool anonymous_started = false; state_owner_t *owner = NULL; bool bypass = false; uint64_t MaxRead = atomic_fetch_uint64_t(&op_ctx->ctx_export->MaxRead); uint64_t MaxOffsetRead = atomic_fetch_uint64_t( &op_ctx->ctx_export->MaxOffsetRead); /* Say we are managing NFS4_OP_READ */ resp->resop = NFS4_OP_READ; res_READ4->status = NFS4_OK; /* Do basic checks on a filehandle Only files can be read */ if ((data->minorversion > 0) && nfs4_Is_Fh_DSHandle(&data->currentFH)) { if (io == FSAL_IO_READ) return op_dsread(op, data, resp); else return op_dsread_plus(op, data, resp, info); } res_READ4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, true); if (res_READ4->status != NFS4_OK) return res_READ4->status; obj = data->current_obj; /* Check stateid correctness and get pointer to state (also checks for special stateids) */ res_READ4->status = nfs4_Check_Stateid(&arg_READ4->stateid, obj, &state_found, data, STATEID_SPECIAL_ANY, 0, false, "READ"); if (res_READ4->status != NFS4_OK) return res_READ4->status; /* NB: After this point, if state_found == NULL, then the stateid is all-0 or all-1 */ if (state_found != NULL) { struct state_deleg *sdeleg; if (info) info->io_advise = state_found->state_data.io_advise; switch (state_found->state_type) { case STATE_TYPE_SHARE: state_open = state_found; /* Note this causes an extra refcount, but it * simplifies logic below. */ inc_state_t_ref(state_open); /** * @todo FSF: need to check against existing locks */ break; case STATE_TYPE_LOCK: state_open = state_found->state_data.lock.openstate; inc_state_t_ref(state_open); /** * @todo FSF: should check that write is in * range of an byte range lock... */ break; case STATE_TYPE_DELEG: /* Check if the delegation state allows READ */ sdeleg = &state_found->state_data.deleg; if (!(sdeleg->sd_type & OPEN_DELEGATE_READ) || (sdeleg->sd_state != DELEG_GRANTED)) { /* Invalid delegation for this operation. */ LogDebug(COMPONENT_STATE, "Delegation type:%d state:%d", sdeleg->sd_type, sdeleg->sd_state); res_READ4->status = NFS4ERR_BAD_STATEID; goto out; } state_open = NULL; break; default: res_READ4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "READ with invalid statid of type %d", state_found->state_type); goto out; } /* This is a read operation, this means that the file MUST have been opened for reading */ if (state_open != NULL && (state_open->state_data.share. share_access & OPEN4_SHARE_ACCESS_READ) == 0) { /* Even if file is open for write, the client * may do accidently read operation (caching). * Because of this, READ is allowed if not * explicitly denied. See page 112 in RFC 7530 * for more details. */ if (state_open->state_data.share. share_deny & OPEN4_SHARE_DENY_READ) { /* Bad open mode, return NFS4ERR_OPENMODE */ res_READ4->status = NFS4ERR_OPENMODE; if (isDebug(COMPONENT_NFS_V4_LOCK)) { char str[LOG_BUFF_LEN] = "\0"; struct display_buffer dspbuf = { sizeof(str), str, str}; display_stateid(&dspbuf, state_found); LogDebug(COMPONENT_NFS_V4_LOCK, "READ %s doesn't have OPEN4_SHARE_ACCESS_READ", str); } goto out; } } /** * @todo : this piece of code looks a bit suspicious * (see Rong's mail) * * @todo: ACE: This works for now. How do we want to * handle owner confirmation across NFSv4.0/NFSv4.1? * Do we want to mark every NFSv4.1 owner * pre-confirmed, or make the check conditional on * minorversion like we do here? */ switch (state_found->state_type) { case STATE_TYPE_SHARE: if (data->minorversion == 0 && !state_owner_confirmed(state_found)) { res_READ4->status = NFS4ERR_BAD_STATEID; goto out; } break; case STATE_TYPE_LOCK: case STATE_TYPE_DELEG: break; default: /* Sanity check: all other types are illegal. * we should not got that place (similar check * above), anyway it costs nothing to add this * test */ res_READ4->status = NFS4ERR_BAD_STATEID; goto out; } } else { /* Special stateid, no open state, check to see if any share conflicts */ state_open = NULL; /* Special stateid, no open state, check to see if any share * conflicts The stateid is all-0 or all-1 */ bypass = arg_READ4->stateid.seqid != 0; res_READ4->status = nfs4_Errno_state( state_share_anonymous_io_start( obj, OPEN4_SHARE_ACCESS_READ, arg_READ4->stateid.seqid != 0 ? SHARE_BYPASS_READ : SHARE_BYPASS_NONE)); if (res_READ4->status != NFS4_OK) goto out; anonymous_started = true; } /* Need to permission check the read. */ fsal_status = obj->obj_ops.test_access(obj, FSAL_READ_ACCESS, NULL, NULL, true); if (fsal_status.major == ERR_FSAL_ACCESS) { /* Test for execute permission */ fsal_status = fsal_access(obj, FSAL_MODE_MASK_SET(FSAL_X_OK) | FSAL_ACE4_MASK_SET (FSAL_ACE_PERM_EXECUTE)); } if (FSAL_IS_ERROR(fsal_status)) { res_READ4->status = nfs4_Errno_status(fsal_status); goto done; } /* Get the size and offset of the read operation */ offset = arg_READ4->offset; size = arg_READ4->count; if (MaxOffsetRead < UINT64_MAX) { LogFullDebug(COMPONENT_NFS_V4, "Read offset=%" PRIu64 " size=%" PRIu64 " MaxOffSet=%" PRIu64, offset, size, MaxOffsetRead); if ((offset + size) > MaxOffsetRead) { LogEvent(COMPONENT_NFS_V4, "A client tryed to violate max file size %" PRIu64 " for exportid #%hu", MaxOffsetRead, op_ctx->ctx_export->export_id); res_READ4->status = NFS4ERR_FBIG; goto done; } } if (size > MaxRead) { /* the client asked for too much data, this should normally not happen because client will get FATTR4_MAXREAD value at mount time */ if (info == NULL || info->io_content.what != NFS4_CONTENT_HOLE) { LogFullDebug(COMPONENT_NFS_V4, "read requested size = %"PRIu64 " read allowed size = %" PRIu64, size, MaxRead); size = MaxRead; } } /* If size == 0, no I/O is to be made and everything is alright */ if (size == 0) { /* A size = 0 can not lead to EOF */ res_READ4->READ4res_u.resok4.eof = false; res_READ4->READ4res_u.resok4.data.data_len = 0; res_READ4->READ4res_u.resok4.data.data_val = NULL; res_READ4->status = NFS4_OK; goto done; } /* Some work is to be done */ bufferdata = gsh_malloc_aligned(4096, size); if (!anonymous_started && data->minorversion == 0) { owner = get_state_owner_ref(state_found); if (owner != NULL) { op_ctx->clientid = &owner->so_owner.so_nfs4_owner.so_clientid; } } if (obj->fsal->m_ops.support_ex(obj)) { /* Call the new fsal_read2 */ fsal_status = fsal_read2(obj, bypass, state_found, offset, size, &read_size, bufferdata, &eof_met, info); } else { /* Call legacy fsal_rdwr */ fsal_status = fsal_rdwr(obj, io, offset, size, &read_size, bufferdata, &eof_met, &sync, info); } if (FSAL_IS_ERROR(fsal_status)) { res_READ4->status = nfs4_Errno_status(fsal_status); gsh_free(bufferdata); res_READ4->READ4res_u.resok4.data.data_val = NULL; goto done; } if (!eof_met) { /** @todo FSF: add a config option for this behavior? */ /* Need to check against filesize for ESXi clients */ struct attrlist attrs; fsal_prepare_attrs(&attrs, ATTR_SIZE); if (!FSAL_IS_ERROR(obj->obj_ops.getattrs(obj, &attrs))) eof_met = (offset + read_size) >= attrs.filesize; /* Done with the attrs */ fsal_release_attrs(&attrs); } if (!anonymous_started && data->minorversion == 0) op_ctx->clientid = NULL; res_READ4->READ4res_u.resok4.data.data_len = read_size; res_READ4->READ4res_u.resok4.data.data_val = bufferdata; LogFullDebug(COMPONENT_NFS_V4, "NFS4_OP_READ: offset = %" PRIu64 " read length = %zu eof=%u", offset, read_size, eof_met); /* Is EOF met or not ? */ res_READ4->READ4res_u.resok4.eof = eof_met; /* Say it is ok */ res_READ4->status = NFS4_OK; done: if (anonymous_started) state_share_anonymous_io_done(obj, OPEN4_SHARE_ACCESS_READ); server_stats_io_done(size, read_size, (res_READ4->status == NFS4_OK) ? true : false, false); out: if (owner != NULL) dec_state_owner_ref(owner); if (state_found != NULL) dec_state_t_ref(state_found); if (state_open != NULL) dec_state_t_ref(state_open); return res_READ4->status; } /* nfs4_op_read */
int nlm4_Unlock(nfs_arg_t *args, struct svc_req *req, nfs_res_t *res) { nlm4_unlockargs *arg = &args->arg_nlm4_unlock; struct fsal_obj_handle *obj; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2] = "\0"; state_nsm_client_t *nsm_client; state_nlm_client_t *nlm_client; state_owner_t *nlm_owner; fsal_lock_param_t lock; int rc; state_t *state; /* NLM doesn't have a BADHANDLE error, nor can rpc_execute deal with * responding to an NLM_*_MSG call, so we check here if the export is * NULL and if so, handle the response. */ if (op_ctx->ctx_export == NULL) { res->res_nlm4.stat.stat = NLM4_STALE_FH; LogInfo(COMPONENT_NLM, "INVALID HANDLE: nlm4_Unlock"); return NFS_REQ_OK; } netobj_to_string(&arg->cookie, buffer, sizeof(buffer)); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Unlock svid=%d off=%llx len=%llx cookie=%s", (int)arg->alock.svid, (unsigned long long)arg->alock.l_offset, (unsigned long long)arg->alock.l_len, buffer); copy_netobj(&res->res_nlm4test.cookie, &arg->cookie); if (nfs_in_grace()) { res->res_nlm4.stat.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(res->res_nlm4.stat.stat)); return NFS_REQ_OK; } /* unlock doesn't care if owner is found */ rc = nlm_process_parameters(req, false, &arg->alock, &lock, &obj, CARE_NOT, &nsm_client, &nlm_client, &nlm_owner, NULL, 0, &state); if (rc >= 0) { /* resent the error back to the client */ res->res_nlm4.stat.stat = (nlm4_stats) rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(res->res_nlm4.stat.stat)); return NFS_REQ_OK; } if (state != NULL) state_status = state_unlock(obj, state, nlm_owner, false, 0, &lock); if (state_status != STATE_SUCCESS) { /* Unlock could fail in the FSAL and make a bit of a mess, * especially if we are in out of memory situation. Such an * error is logged by Cache Inode. */ res->res_nlm4test.test_stat.stat = nlm_convert_state_error(state_status); } else { res->res_nlm4.stat.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ if (state != NULL) dec_state_t_ref(state); dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); obj->obj_ops.put_ref(obj); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(res->res_nlm4.stat.stat)); return NFS_REQ_OK; }
int nfs4_op_lockt(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Alias for arguments */ LOCKT4args * const arg_LOCKT4 = &op->nfs_argop4_u.oplockt; /* Alias for response */ LOCKT4res * const res_LOCKT4 = &resp->nfs_resop4_u.oplockt; /* Return code from state calls */ state_status_t state_status = STATE_SUCCESS; /* Client id record */ nfs_client_id_t *clientid = NULL; /* Lock owner name */ state_nfs4_owner_name_t owner_name; /* Lock owner record */ state_owner_t *lock_owner = NULL; /* Owner of conflicting lock */ state_owner_t *conflict_owner = NULL; /* Description of lock to test */ fsal_lock_param_t lock_desc = { FSAL_NO_LOCK, 0, 0 }; /* Description of conflicting lock */ fsal_lock_param_t conflict_desc; /* return code from id confirm calls */ int rc; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 LOCKT handler ----------------------------"); /* Initialize to sane default */ resp->resop = NFS4_OP_LOCKT; res_LOCKT4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false); if (res_LOCKT4->status != NFS4_OK) return res_LOCKT4->status; /* Lock length should not be 0 */ if (arg_LOCKT4->length == 0LL) { res_LOCKT4->status = NFS4ERR_INVAL; return res_LOCKT4->status; } if (nfs_in_grace()) { res_LOCKT4->status = NFS4ERR_GRACE; return res_LOCKT4->status; } /* Convert lock parameters to internal types */ switch (arg_LOCKT4->locktype) { case READ_LT: case READW_LT: lock_desc.lock_type = FSAL_LOCK_R; break; case WRITE_LT: case WRITEW_LT: lock_desc.lock_type = FSAL_LOCK_W; break; default: LogDebug(COMPONENT_NFS_V4_LOCK, "Invalid lock type"); res_LOCKT4->status = NFS4ERR_INVAL; return res_LOCKT4->status; } lock_desc.lock_start = arg_LOCKT4->offset; if (arg_LOCKT4->length != STATE_LOCK_OFFSET_EOF) lock_desc.lock_length = arg_LOCKT4->length; else lock_desc.lock_length = 0; /* Check for range overflow. Comparing beyond 2^64 is not * possible in 64 bit precision, but off+len > 2^64-1 is * equivalent to len > 2^64-1 - off */ if (lock_desc.lock_length > (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) { res_LOCKT4->status = NFS4ERR_INVAL; return res_LOCKT4->status; } /* Check clientid */ rc = nfs_client_id_get_confirmed(data->minorversion == 0 ? arg_LOCKT4->owner.clientid : data->session->clientid, &clientid); if (rc != CLIENT_ID_SUCCESS) { res_LOCKT4->status = clientid_error_to_nfsstat(rc); return res_LOCKT4->status; } PTHREAD_MUTEX_lock(&clientid->cid_mutex); if (data->minorversion == 0 && !reserve_lease(clientid)) { PTHREAD_MUTEX_unlock(&clientid->cid_mutex); dec_client_id_ref(clientid); res_LOCKT4->status = NFS4ERR_EXPIRED; return res_LOCKT4->status; } PTHREAD_MUTEX_unlock(&clientid->cid_mutex); /* Is this lock_owner known ? */ convert_nfs4_lock_owner(&arg_LOCKT4->owner, &owner_name); /* This lock owner is not known yet, allocated and set up a new one */ lock_owner = create_nfs4_owner(&owner_name, clientid, STATE_LOCK_OWNER_NFSV4, NULL, 0, NULL, CARE_ALWAYS); LogStateOwner("Lock: ", lock_owner); if (lock_owner == NULL) { LogEvent(COMPONENT_NFS_V4_LOCK, "LOCKT unable to create lock owner"); res_LOCKT4->status = NFS4ERR_SERVERFAULT; goto out; } LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCKT", data->current_entry, lock_owner, &lock_desc); if (data->minorversion == 0) { op_ctx->clientid = &lock_owner->so_owner.so_nfs4_owner.so_clientid; } /* Now we have a lock owner and a stateid. Go ahead and test * the lock in SAL (and FSAL). */ state_status = state_test(data->current_entry, lock_owner, &lock_desc, &conflict_owner, &conflict_desc); if (state_status == STATE_LOCK_CONFLICT) { /* A conflicting lock from a different lock_owner, * returns NFS4ERR_DENIED */ LogStateOwner("Conflict: ", conflict_owner); Process_nfs4_conflict(&res_LOCKT4->LOCKT4res_u.denied, conflict_owner, &conflict_desc); } if (data->minorversion == 0) op_ctx->clientid = NULL; /* Release NFS4 Open Owner reference */ dec_state_owner_ref(lock_owner); /* Return result */ res_LOCKT4->status = nfs4_Errno_state(state_status); out: /* Update the lease before exit */ if (data->minorversion == 0) { PTHREAD_MUTEX_lock(&clientid->cid_mutex); update_lease(clientid); PTHREAD_MUTEX_unlock(&clientid->cid_mutex); } dec_client_id_ref(clientid); return res_LOCKT4->status; } /* nfs4_op_lockt */
int nlm4_Lock(nfs_arg_t * parg /* IN */ , exportlist_t * pexport /* IN */ , fsal_op_context_t * pcontext /* IN */ , cache_inode_client_t * pclient /* INOUT */ , hash_table_t * ht /* INOUT */ , struct svc_req * preq /* IN */ , nfs_res_t * pres /* OUT */ ) { nlm4_lockargs * arg = &parg->arg_nlm4_lock; cache_entry_t * pentry; state_status_t state_status = CACHE_INODE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t * nsm_client; state_nlm_client_t * nlm_client; state_owner_t * nlm_owner, * holder; state_lock_desc_t lock, conflict; int rc; state_block_data_t * pblock_data; netobj_to_string(&arg->cookie, buffer, 1024); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Lock svid=%d off=%llx len=%llx cookie=%s", (int) arg->alock.svid, (unsigned long long) arg->alock.l_offset, (unsigned long long) arg->alock.l_len, buffer); if(!copy_netobj(&pres->res_nlm4test.cookie, &arg->cookie)) { pres->res_nlm4.stat.stat = NLM4_FAILED; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Test %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } /* allow only reclaim lock request during recovery */ if(in_nlm_grace_period() && !arg->reclaim) { pres->res_nlm4.stat.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Lock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } if(!in_nlm_grace_period() && arg->reclaim) { pres->res_nlm4.stat.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Lock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } rc = nlm_process_parameters(preq, arg->exclusive, &arg->alock, &lock, ht, &pentry, pcontext, pclient, CARE_MONITOR, &nsm_client, &nlm_client, &nlm_owner, &pblock_data); if(rc >= 0) { /* Present the error back to the client */ pres->res_nlm4.stat.stat = (nlm4_stats)rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } /* Cast the state number into a state pointer to protect * locks from a client that has rebooted from the SM_NOTIFY * that will release old locks */ if(state_lock(pentry, pcontext, nlm_owner, (void *) (ptrdiff_t) arg->state, arg->block, pblock_data, &lock, &holder, &conflict, pclient, &state_status) != STATE_SUCCESS) { pres->res_nlm4test.test_stat.stat = nlm_convert_state_error(state_status); if(state_status == STATE_LOCK_CONFLICT) { nlm_process_conflict(&pres->res_nlm4test.test_stat.nlm4_testrply_u.holder, holder, &conflict, pclient); } /* If we didn't block, release the block data */ if(state_status != STATE_LOCK_BLOCKED && pblock_data != NULL) Mem_Free(pblock_data); } else { pres->res_nlm4.stat.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner, pclient); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Lock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; }
int nfs4_op_release_lockowner(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { RELEASE_LOCKOWNER4args * const arg_RELEASE_LOCKOWNER4 = &op->nfs_argop4_u.oprelease_lockowner; RELEASE_LOCKOWNER4res * const res_RELEASE_LOCKOWNER4 = &resp->nfs_resop4_u.oprelease_lockowner; nfs_client_id_t *nfs_client_id; state_owner_t *lock_owner; state_nfs4_owner_name_t owner_name; int rc; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 RELEASE_LOCKOWNER handler ----------------------"); resp->resop = NFS4_OP_RELEASE_LOCKOWNER; res_RELEASE_LOCKOWNER4->status = NFS4_OK; if (data->minorversion > 0) { res_RELEASE_LOCKOWNER4->status = NFS4ERR_NOTSUPP; return res_RELEASE_LOCKOWNER4->status; } /* Check clientid */ rc = nfs_client_id_get_confirmed(arg_RELEASE_LOCKOWNER4->lock_owner. clientid, &nfs_client_id); if (rc != CLIENT_ID_SUCCESS) { res_RELEASE_LOCKOWNER4->status = clientid_error_to_nfsstat(rc); goto out2; } PTHREAD_MUTEX_lock(&nfs_client_id->cid_mutex); if (!reserve_lease(nfs_client_id)) { PTHREAD_MUTEX_unlock(&nfs_client_id->cid_mutex); dec_client_id_ref(nfs_client_id); res_RELEASE_LOCKOWNER4->status = NFS4ERR_EXPIRED; goto out2; } PTHREAD_MUTEX_unlock(&nfs_client_id->cid_mutex); /* look up the lock owner and see if we can find it */ convert_nfs4_lock_owner(&arg_RELEASE_LOCKOWNER4->lock_owner, &owner_name); /* If this lock owner is not known yet, allocated * and set up a new one */ lock_owner = create_nfs4_owner(&owner_name, nfs_client_id, STATE_LOCK_OWNER_NFSV4, NULL, 0, NULL, CARE_NOT); if (lock_owner == NULL) { /* the owner doesn't exist, we are done */ LogDebug(COMPONENT_NFS_V4_LOCK, "lock owner does not exist"); res_RELEASE_LOCKOWNER4->status = NFS4_OK; goto out1; } res_RELEASE_LOCKOWNER4->status = release_lock_owner(lock_owner); /* Release the reference to the lock owner acquired * via create_nfs4_owner */ dec_state_owner_ref(lock_owner); out1: /* Update the lease before exit */ PTHREAD_MUTEX_lock(&nfs_client_id->cid_mutex); update_lease(nfs_client_id); PTHREAD_MUTEX_unlock(&nfs_client_id->cid_mutex); dec_client_id_ref(nfs_client_id); out2: LogDebug(COMPONENT_NFS_V4_LOCK, "Leaving NFS v4 RELEASE_LOCKOWNER handler -----------------------"); return res_RELEASE_LOCKOWNER4->status; } /* nfs4_op_release_lock_owner */
int nlm4_Unshare(nfs_arg_t *args, struct svc_req *req, nfs_res_t *res) { nlm4_shareargs *arg = &args->arg_nlm4_share; struct fsal_obj_handle *obj; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2] = "\0"; state_nsm_client_t *nsm_client; state_nlm_client_t *nlm_client; state_owner_t *nlm_owner; state_t *nlm_state; int rc; /* NLM doesn't have a BADHANDLE error, nor can rpc_execute deal with * responding to an NLM_*_MSG call, so we check here if the export is * NULL and if so, handle the response. */ if (op_ctx->ctx_export == NULL) { res->res_nlm4share.stat = NLM4_STALE_FH; LogInfo(COMPONENT_NLM, "INVALID HANDLE: NLM4_UNSHARE"); return NFS_REQ_OK; } res->res_nlm4share.sequence = 0; netobj_to_string(&arg->cookie, buffer, 1024); if (isDebug(COMPONENT_NLM)) { char str[LEN_FH_STR]; char oh[MAXNETOBJ_SZ * 2] = "\0"; sprint_fhandle3(str, (struct nfs_fh3 *)&arg->share.fh); netobj_to_string(&arg->share.oh, oh, 1024); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling NLM4_UNSHARE handle: %s, cookie=%s, reclaim=%s, owner=%s, access=%d, deny=%d", str, buffer, arg->reclaim ? "yes" : "no", oh, arg->share.access, arg->share.mode); } copy_netobj(&res->res_nlm4share.cookie, &arg->cookie); rc = nlm_process_share_parms(req, &arg->share, op_ctx->fsal_export, &obj, CARE_NOT, &nsm_client, &nlm_client, &nlm_owner, &nlm_state); if (rc >= 0) { /* Present the error back to the client */ res->res_nlm4share.stat = (nlm4_stats) rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: NLM4_UNSHARE %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } state_status = state_nlm_share(obj, arg->share.access, arg->share.mode, nlm_owner, nlm_state, false, true); if (state_status != STATE_SUCCESS) { res->res_nlm4share.stat = nlm_convert_state_error(state_status); } else { res->res_nlm4share.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); obj->obj_ops->put_ref(obj); dec_nlm_state_ref(nlm_state); LogDebug(COMPONENT_NLM, "REQUEST RESULT: NLM4_UNSHARE %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; }
int nlm4_Unlock(nfs_arg_t * parg /* IN */ , exportlist_t * pexport /* IN */ , fsal_op_context_t * pcontext /* IN */ , cache_inode_client_t * pclient /* INOUT */ , hash_table_t * ht /* INOUT */ , struct svc_req *preq /* IN */ , nfs_res_t * pres /* OUT */ ) { nlm4_unlockargs * arg = &parg->arg_nlm4_unlock; cache_entry_t * pentry; state_status_t state_status = CACHE_INODE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t * nsm_client; state_nlm_client_t * nlm_client; state_owner_t * nlm_owner; state_lock_desc_t lock; int rc; netobj_to_string(&arg->cookie, buffer, sizeof(buffer)); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Unlock svid=%d off=%llx len=%llx cookie=%s", (int) arg->alock.svid, (unsigned long long) arg->alock.l_offset, (unsigned long long) arg->alock.l_len, buffer); if(!copy_netobj(&pres->res_nlm4test.cookie, &arg->cookie)) { pres->res_nlm4.stat.stat = NLM4_FAILED; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } if(in_nlm_grace_period()) { pres->res_nlm4.stat.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } rc = nlm_process_parameters(preq, FALSE, /* exlcusive doesn't matter */ &arg->alock, &lock, ht, &pentry, pcontext, pclient, CARE_NOT, /* unlock doesn't care if owner is found */ &nsm_client, &nlm_client, &nlm_owner, NULL); if(rc >= 0) { /* Present the error back to the client */ pres->res_nlm4.stat.stat = (nlm4_stats)rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; } if(state_unlock(pentry, pcontext, nlm_owner, NULL, &lock, pclient, &state_status) != STATE_SUCCESS) { /* Unlock could fail in the FSAL and make a bit of a mess, especially if * we are in out of memory situation. Such an error is logged by * Cache Inode. */ pres->res_nlm4test.test_stat.stat = nlm_convert_state_error(state_status); } else { pres->res_nlm4.stat.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner, pclient); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(pres->res_nlm4.stat.stat)); return NFS_REQ_OK; }
int nlm4_Share(nfs_arg_t *args, struct svc_req *req, nfs_res_t *res) { nlm4_shareargs *arg = &args->arg_nlm4_share; struct fsal_obj_handle *obj; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t *nsm_client; state_nlm_client_t *nlm_client; state_owner_t *nlm_owner; state_t *nlm_state; int rc; int grace = nfs_in_grace(); /* Indicate if we let FSAL to handle requests during grace. */ bool_t fsal_grace = false; /* NLM doesn't have a BADHANDLE error, nor can rpc_execute deal with * responding to an NLM_*_MSG call, so we check here if the export is * NULL and if so, handle the response. */ if (op_ctx->ctx_export == NULL) { res->res_nlm4share.stat = NLM4_STALE_FH; LogInfo(COMPONENT_NLM, "INVALID HANDLE: nlm4_Share"); return NFS_REQ_OK; } res->res_nlm4share.sequence = 0; netobj_to_string(&arg->cookie, buffer, 1024); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Share cookie=%s reclaim=%s", buffer, arg->reclaim ? "yes" : "no"); copy_netobj(&res->res_nlm4share.cookie, &arg->cookie); /* Allow only reclaim share request during recovery and visa versa. * Note: NLM_SHARE is indicated to be non-monitored, however, it does * have a reclaim flag, so we will honor the reclaim flag if used. */ if (grace) { if (op_ctx->fsal_export->exp_ops. fs_supports(op_ctx->fsal_export, fso_grace_method)) fsal_grace = true; if (!fsal_grace && !arg->reclaim) { res->res_nlm4share.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } } else if (arg->reclaim) { res->res_nlm4share.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } rc = nlm_process_share_parms(req, &arg->share, op_ctx->fsal_export, &obj, CARE_NO_MONITOR, &nsm_client, &nlm_client, &nlm_owner, &nlm_state); if (rc >= 0) { /* Present the error back to the client */ res->res_nlm4share.stat = (nlm4_stats) rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } state_status = state_nlm_share(obj, arg->share.access, arg->share.mode, nlm_owner, nlm_state, grace); if (state_status != STATE_SUCCESS) { res->res_nlm4share.stat = nlm_convert_state_error(state_status); } else { res->res_nlm4share.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); obj->obj_ops.put_ref(obj); dec_nlm_state_ref(nlm_state); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; }