static nfsstat4 open4_validate_claim(compound_data_t *data, open_claim_type4 claim, nfs_client_id_t *clientid) { /* Return code */ nfsstat4 status = NFS4_OK; /* Indicate if we let FSAL to handle requests during grace. */ bool_t fsal_grace = false; /* Pick off erroneous claims so we don't have to deal with them later. */ switch (claim) { case CLAIM_NULL: if (nfs_in_grace() || ((data->minorversion > 0) && !clientid->cid_cb.v41.cid_reclaim_complete)) status = NFS4ERR_GRACE; break; case CLAIM_FH: if (data->minorversion == 0) status = NFS4ERR_NOTSUPP; if (op_ctx->fsal_export->exp_ops.fs_supports( op_ctx->fsal_export, fso_grace_method)) fsal_grace = true; if (!fsal_grace && nfs_in_grace()) status = NFS4ERR_GRACE; break; case CLAIM_DELEGATE_PREV: status = NFS4ERR_NOTSUPP; break; case CLAIM_PREVIOUS: if (!clientid->cid_allow_reclaim || !nfs_in_grace() || ((data->minorversion > 0) && clientid->cid_cb.v41.cid_reclaim_complete)) status = NFS4ERR_NO_GRACE; break; case CLAIM_DELEGATE_CUR: break; case CLAIM_DELEG_CUR_FH: case CLAIM_DELEG_PREV_FH: status = NFS4ERR_NOTSUPP; break; default: status = NFS4ERR_INVAL; } return status; }
/** * @brief The NFS4_OP_REMOVEXATTR operation. * * This functions handles the NFS4_OP_REMOVEXATTR operation in NFSv4. This * function can be called only from nfs4_Compound * * @param[in] op Arguments for nfs4_op * @param[in,out] data Compound request's data * @param[out] resp Results for nfs4_op * * @return per RFC5661, p. 373-4 */ int nfs4_op_removexattr(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { REMOVEXATTR4args * const arg_REMOVEXATTR4 = &op->nfs_argop4_u.opremovexattr; REMOVEXATTR4res * const res_REMOVEXATTR4 = &resp->nfs_resop4_u.opremovexattr; fsal_status_t fsal_status; struct fsal_obj_handle *obj_handle = data->current_obj; resp->resop = NFS4_OP_REMOVEXATTR; res_REMOVEXATTR4->status = NFS4_OK; LogDebug(COMPONENT_NFS_V4, "RemoveXattr len %d name: %s", arg_REMOVEXATTR4->ra_name.utf8string_len, arg_REMOVEXATTR4->ra_name.utf8string_val); /* Do basic checks on a filehandle */ res_REMOVEXATTR4->status = nfs4_sanity_check_FH(data, NO_FILE_TYPE, false); if (res_REMOVEXATTR4->status != NFS4_OK) return res_REMOVEXATTR4->status; /* Don't allow attribute change while we are in grace period. * Required for delegation reclaims and may be needed for other * reclaimable states as well. */ if (nfs_in_grace()) { res_REMOVEXATTR4->status = NFS4ERR_GRACE; return res_REMOVEXATTR4->status; } res_REMOVEXATTR4->REMOVEXATTR4res_u.resok4.rr_info.atomic = false; res_REMOVEXATTR4->REMOVEXATTR4res_u.resok4.rr_info.before = fsal_get_changeid4(data->current_obj); fsal_status = obj_handle->obj_ops->removexattrs(obj_handle, &arg_REMOVEXATTR4->ra_name); if (FSAL_IS_ERROR(fsal_status)) return res_REMOVEXATTR4->status = nfs4_Errno_state( state_error_convert(fsal_status)); res_REMOVEXATTR4->REMOVEXATTR4res_u.resok4.rr_info.after = fsal_get_changeid4(data->current_obj); return res_REMOVEXATTR4->status; }
int nfs4_op_setattr(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { SETATTR4args * const arg_SETATTR4 = &op->nfs_argop4_u.opsetattr; SETATTR4res * const res_SETATTR4 = &resp->nfs_resop4_u.opsetattr; struct attrlist sattr; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; const char *tag = "SETATTR"; state_t *state_found = NULL; state_t *state_open = NULL; cache_entry_t *entry = NULL; bool anonymous_started = false; resp->resop = NFS4_OP_SETATTR; res_SETATTR4->status = NFS4_OK; /* Do basic checks on a filehandle */ res_SETATTR4->status = nfs4_sanity_check_FH(data, NO_FILE_TYPE, false); if (res_SETATTR4->status != NFS4_OK) return res_SETATTR4->status; /* Don't allow attribute change while we are in grace period. * Required for delegation reclaims and may be needed for other * reclaimable states as well. */ if (nfs_in_grace()) { res_SETATTR4->status = NFS4ERR_GRACE; return res_SETATTR4->status; } /* Get only attributes that are allowed to be read */ if (!nfs4_Fattr_Check_Access (&arg_SETATTR4->obj_attributes, FATTR4_ATTR_WRITE)) { res_SETATTR4->status = NFS4ERR_INVAL; return res_SETATTR4->status; } /* Ask only for supported attributes */ if (!nfs4_Fattr_Supported(&arg_SETATTR4->obj_attributes)) { res_SETATTR4->status = NFS4ERR_ATTRNOTSUPP; return res_SETATTR4->status; } /* Convert the fattr4 in the request to a fsal sattr structure */ res_SETATTR4->status = nfs4_Fattr_To_FSAL_attr(&sattr, &arg_SETATTR4->obj_attributes, data); if (res_SETATTR4->status != NFS4_OK) return res_SETATTR4->status; /* Trunc may change Xtime so we have to start with trunc and * finish by the mtime and atime */ if ((FSAL_TEST_MASK(sattr.mask, ATTR_SIZE)) || (FSAL_TEST_MASK(sattr.mask, ATTR4_SPACE_RESERVED))) { /* Setting the size of a directory is prohibited */ if (data->current_filetype == DIRECTORY) { res_SETATTR4->status = NFS4ERR_ISDIR; return res_SETATTR4->status; } /* Object should be a file */ if (data->current_entry->type != REGULAR_FILE) { res_SETATTR4->status = NFS4ERR_INVAL; return res_SETATTR4->status; } entry = data->current_entry; /* Check stateid correctness and get pointer to state */ res_SETATTR4->status = nfs4_Check_Stateid(&arg_SETATTR4->stateid, data->current_entry, &state_found, data, STATEID_SPECIAL_ANY, 0, false, tag); if (res_SETATTR4->status != NFS4_OK) return res_SETATTR4->status; /* NB: After this point, if state_found == NULL, then * the stateid is all-0 or all-1 */ if (state_found != NULL) { switch (state_found->state_type) { case STATE_TYPE_SHARE: state_open = state_found; /* Note this causes an extra refcount, but it * simplifies logic below. */ inc_state_t_ref(state_open); break; case STATE_TYPE_LOCK: state_open = state_found->state_data.lock.openstate; inc_state_t_ref(state_open); break; case STATE_TYPE_DELEG: state_open = NULL; break; default: res_SETATTR4->status = NFS4ERR_BAD_STATEID; return res_SETATTR4->status; } /* This is a size operation, this means that * the file MUST have been opened for writing */ if (state_open != NULL && (state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0) { /* Bad open mode, return NFS4ERR_OPENMODE */ res_SETATTR4->status = NFS4ERR_OPENMODE; return res_SETATTR4->status; } } else { /* Special stateid, no open state, check to * see if any share conflicts */ state_open = NULL; /* Special stateid, no open state, check to see if * any share conflicts The stateid is all-0 or all-1 */ res_SETATTR4->status = nfs4_Errno_state( state_share_anonymous_io_start( entry, OPEN4_SHARE_ACCESS_WRITE, SHARE_BYPASS_NONE)); if (res_SETATTR4->status != NFS4_OK) return res_SETATTR4->status; anonymous_started = true; } } const time_t S_NSECS = 1000000000UL; /* Set the atime and mtime (ctime is not setable) */ /* A carry into seconds considered invalid */ if (sattr.atime.tv_nsec >= S_NSECS) { res_SETATTR4->status = NFS4ERR_INVAL; goto done; } if (sattr.mtime.tv_nsec >= S_NSECS) { res_SETATTR4->status = NFS4ERR_INVAL; goto done; } /* If owner or owner_group are set, and the credential was * squashed, then we must squash the set owner and owner_group. */ squash_setattr(&sattr); /* If a SETATTR comes with an open stateid, and size is being * set, then the open MUST be for write (checked above), so * is_open_write is simple at this stage, it's just a check that * we have an open owner. */ cache_status = cache_inode_setattr(data->current_entry, &sattr, state_open != NULL); if (cache_status != CACHE_INODE_SUCCESS) { res_SETATTR4->status = nfs4_Errno(cache_status); goto done; } /* Set the replyed structure */ res_SETATTR4->attrsset = arg_SETATTR4->obj_attributes.attrmask; /* Exit with no error */ res_SETATTR4->status = NFS4_OK; done: if (anonymous_started) state_share_anonymous_io_done(entry, OPEN4_SHARE_ACCESS_WRITE); if (state_found != NULL) dec_state_t_ref(state_found); if (state_open != NULL) dec_state_t_ref(state_open); return res_SETATTR4->status; } /* nfs4_op_setattr */
int nfs4_op_lockt(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Alias for arguments */ LOCKT4args * const arg_LOCKT4 = &op->nfs_argop4_u.oplockt; /* Alias for response */ LOCKT4res * const res_LOCKT4 = &resp->nfs_resop4_u.oplockt; /* Return code from state calls */ state_status_t state_status = STATE_SUCCESS; /* Client id record */ nfs_client_id_t *clientid = NULL; /* Lock owner name */ state_nfs4_owner_name_t owner_name; /* Lock owner record */ state_owner_t *lock_owner = NULL; /* Owner of conflicting lock */ state_owner_t *conflict_owner = NULL; /* Description of lock to test */ fsal_lock_param_t lock_desc = { FSAL_NO_LOCK, 0, 0 }; /* Description of conflicting lock */ fsal_lock_param_t conflict_desc; /* return code from id confirm calls */ int rc; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 LOCKT handler ----------------------------"); /* Initialize to sane default */ resp->resop = NFS4_OP_LOCKT; res_LOCKT4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false); if (res_LOCKT4->status != NFS4_OK) return res_LOCKT4->status; /* Lock length should not be 0 */ if (arg_LOCKT4->length == 0LL) { res_LOCKT4->status = NFS4ERR_INVAL; return res_LOCKT4->status; } if (nfs_in_grace()) { res_LOCKT4->status = NFS4ERR_GRACE; return res_LOCKT4->status; } /* Convert lock parameters to internal types */ switch (arg_LOCKT4->locktype) { case READ_LT: case READW_LT: lock_desc.lock_type = FSAL_LOCK_R; break; case WRITE_LT: case WRITEW_LT: lock_desc.lock_type = FSAL_LOCK_W; break; default: LogDebug(COMPONENT_NFS_V4_LOCK, "Invalid lock type"); res_LOCKT4->status = NFS4ERR_INVAL; return res_LOCKT4->status; } lock_desc.lock_start = arg_LOCKT4->offset; if (arg_LOCKT4->length != STATE_LOCK_OFFSET_EOF) lock_desc.lock_length = arg_LOCKT4->length; else lock_desc.lock_length = 0; /* Check for range overflow. Comparing beyond 2^64 is not * possible in 64 bit precision, but off+len > 2^64-1 is * equivalent to len > 2^64-1 - off */ if (lock_desc.lock_length > (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) { res_LOCKT4->status = NFS4ERR_INVAL; return res_LOCKT4->status; } /* Check clientid */ rc = nfs_client_id_get_confirmed(data->minorversion == 0 ? arg_LOCKT4->owner.clientid : data->session->clientid, &clientid); if (rc != CLIENT_ID_SUCCESS) { res_LOCKT4->status = clientid_error_to_nfsstat(rc); return res_LOCKT4->status; } PTHREAD_MUTEX_lock(&clientid->cid_mutex); if (data->minorversion == 0 && !reserve_lease(clientid)) { PTHREAD_MUTEX_unlock(&clientid->cid_mutex); dec_client_id_ref(clientid); res_LOCKT4->status = NFS4ERR_EXPIRED; return res_LOCKT4->status; } PTHREAD_MUTEX_unlock(&clientid->cid_mutex); /* Is this lock_owner known ? */ convert_nfs4_lock_owner(&arg_LOCKT4->owner, &owner_name); /* This lock owner is not known yet, allocated and set up a new one */ lock_owner = create_nfs4_owner(&owner_name, clientid, STATE_LOCK_OWNER_NFSV4, NULL, 0, NULL, CARE_ALWAYS); LogStateOwner("Lock: ", lock_owner); if (lock_owner == NULL) { LogEvent(COMPONENT_NFS_V4_LOCK, "LOCKT unable to create lock owner"); res_LOCKT4->status = NFS4ERR_SERVERFAULT; goto out; } LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCKT", data->current_entry, lock_owner, &lock_desc); if (data->minorversion == 0) { op_ctx->clientid = &lock_owner->so_owner.so_nfs4_owner.so_clientid; } /* Now we have a lock owner and a stateid. Go ahead and test * the lock in SAL (and FSAL). */ state_status = state_test(data->current_entry, lock_owner, &lock_desc, &conflict_owner, &conflict_desc); if (state_status == STATE_LOCK_CONFLICT) { /* A conflicting lock from a different lock_owner, * returns NFS4ERR_DENIED */ LogStateOwner("Conflict: ", conflict_owner); Process_nfs4_conflict(&res_LOCKT4->LOCKT4res_u.denied, conflict_owner, &conflict_desc); } if (data->minorversion == 0) op_ctx->clientid = NULL; /* Release NFS4 Open Owner reference */ dec_state_owner_ref(lock_owner); /* Return result */ res_LOCKT4->status = nfs4_Errno_state(state_status); out: /* Update the lease before exit */ if (data->minorversion == 0) { PTHREAD_MUTEX_lock(&clientid->cid_mutex); update_lease(clientid); PTHREAD_MUTEX_unlock(&clientid->cid_mutex); } dec_client_id_ref(clientid); return res_LOCKT4->status; } /* nfs4_op_lockt */
/** * @brief Start NFS service * * @param[in] p_start_info Startup parameters */ void nfs_start(nfs_start_info_t *p_start_info) { struct rlimit ulimit_data; /* store the start info so it is available for all layers */ nfs_start_info = *p_start_info; if (p_start_info->dump_default_config == true) { nfs_print_param_config(); exit(0); } /* Set the Core dump size if set */ if (nfs_param.core_param.core_dump_size != -1) { LogInfo(COMPONENT_INIT, "core size rlimit set to %ld", nfs_param.core_param.core_dump_size); ulimit_data.rlim_cur = nfs_param.core_param.core_dump_size; ulimit_data.rlim_max = nfs_param.core_param.core_dump_size; if (setrlimit(RLIMIT_CORE, &ulimit_data) != 0) { LogCrit(COMPONENT_INIT, "setrlimit() returned error on RLIMIT_CORE, core dump size: %ld, error %s(%d)", nfs_param.core_param.core_dump_size, strerror(errno), errno); } } else { if (getrlimit(RLIMIT_CORE, &ulimit_data) != 0) { LogCrit(COMPONENT_INIT, "getrlimit() returned error on RLIMIT_CORE, error %s(%d)", strerror(errno), errno); } else { LogInfo(COMPONENT_INIT, "core size rlimit is %ld", ulimit_data.rlim_cur); } } /* Make sure Ganesha runs with a 0000 umask. */ umask(0000); { /* Set the write verifiers */ union { verifier4 NFS4_write_verifier; writeverf3 NFS3_write_verifier; uint64_t epoch; } build_verifier; build_verifier.epoch = (uint64_t) ServerEpoch; memcpy(NFS3_write_verifier, build_verifier.NFS3_write_verifier, sizeof(NFS3_write_verifier)); memcpy(NFS4_write_verifier, build_verifier.NFS4_write_verifier, sizeof(NFS4_write_verifier)); } #ifdef USE_CAPS lower_my_caps(); #endif /* Initialize all layers and service threads */ nfs_Init(p_start_info); /* Spawns service threads */ nfs_Start_threads(); if (nfs_param.core_param.enable_NLM) { /* NSM Unmonitor all */ nsm_unmonitor_all(); } if (nfs_param.ip_name_param.mapfile == NULL) { LogDebug(COMPONENT_INIT, "No Hosts Map file is used"); } else { LogEvent(COMPONENT_INIT, "Populating IP_NAME with file %s", nfs_param.ip_name_param.mapfile); if (nfs_ip_name_populate(nfs_param.ip_name_param.mapfile) != IP_NAME_SUCCESS) LogDebug(COMPONENT_INIT, "IP_NAME was NOT populated"); } LogEvent(COMPONENT_INIT, "-------------------------------------------------"); LogEvent(COMPONENT_INIT, " NFS SERVER INITIALIZED"); LogEvent(COMPONENT_INIT, "-------------------------------------------------"); /* Wait for dispatcher to exit */ LogDebug(COMPONENT_THREAD, "Wait for admin thread to exit"); pthread_join(admin_thrid, NULL); /* Regular exit */ LogEvent(COMPONENT_MAIN, "NFS EXIT: regular exit"); /* if not in grace period, clean up the old state directory */ if (!nfs_in_grace()) nfs4_clean_old_recov_dir(); Cleanup(); /* let main return 0 to exit */ }
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { /* Shorter alias for arguments */ LOCK4args * const arg_LOCK4 = &op->nfs_argop4_u.oplock; /* Shorter alias for response */ LOCK4res * const res_LOCK4 = &resp->nfs_resop4_u.oplock; /* Status code from state calls */ state_status_t state_status = STATE_SUCCESS; /* Data for lock state to be created */ union state_data candidate_data; /* Status code for protocol functions */ nfsstat4 nfs_status = 0; /* Created or found lock state */ state_t *lock_state = NULL; /* Associated open state */ state_t *state_open = NULL; /* The lock owner */ state_owner_t *lock_owner = NULL; /* The open owner */ state_owner_t *open_owner = NULL; /* The owner of a conflicting lock */ state_owner_t *conflict_owner = NULL; /* The owner in which to store the response for NFSv4.0 */ state_owner_t *resp_owner = NULL; /* Sequence ID, for NFSv4.0 */ seqid4 seqid = 0; /* The client performing these operations */ nfs_client_id_t *clientid = NULL; /* Name for the lock owner */ state_nfs4_owner_name_t owner_name; /* Description of requrested lock */ fsal_lock_param_t lock_desc; /* Description of conflicting lock */ fsal_lock_param_t conflict_desc; /* Whether to block */ state_blocking_t blocking = STATE_NON_BLOCKING; /* Tracking data for the lock state */ struct state_refer refer; /* Indicate if we let FSAL to handle requests during grace. */ bool_t fsal_grace = false; int rc; LogDebug(COMPONENT_NFS_V4_LOCK, "Entering NFS v4 LOCK handler ----------------------"); /* Initialize to sane starting values */ resp->resop = NFS4_OP_LOCK; res_LOCK4->status = NFS4_OK; /* Record the sequence info */ if (data->minorversion > 0) { memcpy(refer.session, data->session->session_id, sizeof(sessionid4)); refer.sequence = data->sequence; refer.slot = data->slot; } res_LOCK4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false); if (res_LOCK4->status != NFS4_OK) return res_LOCK4->status; /* Convert lock parameters to internal types */ switch (arg_LOCK4->locktype) { case READW_LT: blocking = STATE_NFSV4_BLOCKING; /* Fall through */ case READ_LT: lock_desc.lock_type = FSAL_LOCK_R; break; case WRITEW_LT: blocking = STATE_NFSV4_BLOCKING; /* Fall through */ case WRITE_LT: lock_desc.lock_type = FSAL_LOCK_W; break; default: LogDebug(COMPONENT_NFS_V4_LOCK, "Invalid lock type"); res_LOCK4->status = NFS4ERR_INVAL; return res_LOCK4->status; } lock_desc.lock_start = arg_LOCK4->offset; lock_desc.lock_sle_type = FSAL_POSIX_LOCK; lock_desc.lock_reclaim = arg_LOCK4->reclaim; if (arg_LOCK4->length != STATE_LOCK_OFFSET_EOF) lock_desc.lock_length = arg_LOCK4->length; else lock_desc.lock_length = 0; if (arg_LOCK4->locker.new_lock_owner) { /* Check stateid correctness and get pointer to state */ nfs_status = nfs4_Check_Stateid( &arg_LOCK4->locker.locker4_u.open_owner.open_stateid, data->current_obj, &state_open, data, STATEID_SPECIAL_FOR_LOCK, arg_LOCK4->locker.locker4_u.open_owner.open_seqid, data->minorversion == 0, lock_tag); if (nfs_status != NFS4_OK) { if (nfs_status == NFS4ERR_REPLAY) { open_owner = get_state_owner_ref(state_open); LogStateOwner("Open: ", open_owner); if (open_owner != NULL) { resp_owner = open_owner; seqid = arg_LOCK4->locker.locker4_u .open_owner.open_seqid; goto check_seqid; } } res_LOCK4->status = nfs_status; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for open owner"); return res_LOCK4->status; } open_owner = get_state_owner_ref(state_open); LogStateOwner("Open: ", open_owner); if (open_owner == NULL) { /* State is going stale. */ res_LOCK4->status = NFS4ERR_STALE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid, stale open owner"); goto out2; } lock_state = NULL; lock_owner = NULL; resp_owner = open_owner; seqid = arg_LOCK4->locker.locker4_u.open_owner.open_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK New lock owner from open owner", data->current_obj, open_owner, &lock_desc); /* Check is the clientid is known or not */ rc = nfs_client_id_get_confirmed( data->minorversion == 0 ? arg_LOCK4->locker. locker4_u.open_owner.lock_owner.clientid : data->session->clientid, &clientid); if (rc != CLIENT_ID_SUCCESS) { res_LOCK4->status = clientid_error_to_nfsstat(rc); LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs_client_id_get"); goto out2; } if (isDebug(COMPONENT_CLIENTID) && (clientid != open_owner->so_owner.so_nfs4_owner.so_clientrec)) { char str_open[LOG_BUFF_LEN / 2]; struct display_buffer dspbuf_open = { sizeof(str_open), str_open, str_open}; char str_lock[LOG_BUFF_LEN / 2]; struct display_buffer dspbuf_lock = { sizeof(str_lock), str_lock, str_lock}; display_client_id_rec(&dspbuf_open, open_owner->so_owner .so_nfs4_owner.so_clientrec); display_client_id_rec(&dspbuf_lock, clientid); LogDebug(COMPONENT_CLIENTID, "Unexpected, new lock owner clientid {%s} doesn't match open owner clientid {%s}", str_lock, str_open); } /* The related stateid is already stored in state_open */ /* An open state has been found. Check its type */ if (state_open->state_type != STATE_TYPE_SHARE) { res_LOCK4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed open stateid is not a SHARE"); goto out2; } /* Is this lock_owner known ? */ convert_nfs4_lock_owner(&arg_LOCK4->locker.locker4_u.open_owner. lock_owner, &owner_name); LogStateOwner("Lock: ", lock_owner); } else { /* Existing lock owner Find the lock stateid From * that, get the open_owner * * There was code here before to handle all-0 stateid, * but that really doesn't apply - when we handle * temporary locks for I/O operations (which is where * we will see all-0 or all-1 stateid, those will not * come in through nfs4_op_lock. * * Check stateid correctness and get pointer to state */ nfs_status = nfs4_Check_Stateid( &arg_LOCK4->locker.locker4_u.lock_owner.lock_stateid, data->current_obj, &lock_state, data, STATEID_SPECIAL_FOR_LOCK, arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid, data->minorversion == 0, lock_tag); if (nfs_status != NFS4_OK) { if (nfs_status == NFS4ERR_REPLAY) { lock_owner = get_state_owner_ref(lock_state); LogStateOwner("Lock: ", lock_owner); if (lock_owner != NULL) { open_owner = lock_owner->so_owner .so_nfs4_owner.so_related_owner; inc_state_owner_ref(open_owner); resp_owner = lock_owner; seqid = arg_LOCK4->locker.locker4_u .lock_owner.lock_seqid; goto check_seqid; } } res_LOCK4->status = nfs_status; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid for existing lock owner"); return res_LOCK4->status; } /* Check if lock state belongs to same export */ if (!state_same_export(lock_state, op_ctx->ctx_export)) { LogEvent(COMPONENT_STATE, "Lock Owner Export Conflict, Lock held for export %" PRIu16" request for export %"PRIu16, state_export_id(lock_state), op_ctx->ctx_export->export_id); res_LOCK4->status = NFS4ERR_INVAL; goto out2; } /* A lock state has been found. Check its type */ if (lock_state->state_type != STATE_TYPE_LOCK) { res_LOCK4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed existing lock owner, state type is not LOCK"); goto out2; } /* Get the old lockowner. We can do the following * 'cast', in NFSv4 lock_owner4 and open_owner4 are * different types but with the same definition */ lock_owner = get_state_owner_ref(lock_state); LogStateOwner("Lock: ", lock_owner); if (lock_owner == NULL) { /* State is going stale. */ res_LOCK4->status = NFS4ERR_STALE; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed nfs4_Check_Stateid, stale open owner"); goto out2; } open_owner = lock_owner->so_owner.so_nfs4_owner.so_related_owner; LogStateOwner("Open: ", open_owner); inc_state_owner_ref(open_owner); state_open = lock_state->state_data.lock.openstate; inc_state_t_ref(state_open); resp_owner = lock_owner; seqid = arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid; LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK Existing lock owner", data->current_obj, lock_owner, &lock_desc); /* Get the client for this open owner */ clientid = open_owner->so_owner.so_nfs4_owner.so_clientrec; inc_client_id_ref(clientid); } check_seqid: /* Check seqid (lock_seqid or open_seqid) */ if (data->minorversion == 0) { if (!Check_nfs4_seqid(resp_owner, seqid, op, data->current_obj, resp, lock_tag)) { /* Response is all setup for us and LogDebug * told what was wrong */ goto out2; } } /* Lock length should not be 0 */ if (arg_LOCK4->length == 0LL) { res_LOCK4->status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length == 0"); goto out; } /* Check for range overflow. Comparing beyond 2^64 is not * possible int 64 bits precision, but off+len > 2^64-1 is * equivalent to len > 2^64-1 - off */ if (lock_desc.lock_length > (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) { res_LOCK4->status = NFS4ERR_INVAL; LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length overflow"); goto out; } /* Check if open state has correct access for type of lock. * * Don't need to check for conflicting states since this open * state assures there are no conflicting states. */ if (((arg_LOCK4->locktype == WRITE_LT || arg_LOCK4->locktype == WRITEW_LT) && ((state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0)) || ((arg_LOCK4->locktype == READ_LT || arg_LOCK4->locktype == READW_LT) && ((state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_READ) == 0))) { /* The open state doesn't allow access based on the * type of lock */ LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, SHARE doesn't allow access", data->current_obj, lock_owner, &lock_desc); res_LOCK4->status = NFS4ERR_OPENMODE; goto out; } /* Do grace period checking (use resp_owner below since a new * lock request with a new lock owner doesn't have a lock owner * yet, but does have an open owner - resp_owner is always one or * the other and non-NULL at this point - so makes for a better log). */ if (nfs_in_grace()) { if (op_ctx->fsal_export->exp_ops. fs_supports(op_ctx->fsal_export, fso_grace_method)) fsal_grace = true; if (!fsal_grace && !arg_LOCK4->reclaim) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, non-reclaim while in grace", data->current_obj, resp_owner, &lock_desc); res_LOCK4->status = NFS4ERR_GRACE; goto out; } if (!fsal_grace && arg_LOCK4->reclaim && !clientid->cid_allow_reclaim) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, invalid reclaim while in grace", data->current_obj, resp_owner, &lock_desc); res_LOCK4->status = NFS4ERR_NO_GRACE; goto out; } } else { if (arg_LOCK4->reclaim) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed, reclaim while not in grace", data->current_obj, resp_owner, &lock_desc); res_LOCK4->status = NFS4ERR_NO_GRACE; goto out; } } /* Test if this request is attempting to create a new lock owner */ if (arg_LOCK4->locker.new_lock_owner) { bool_t isnew; /* A lock owner is always associated with a previously made open which has itself a previously made stateid */ /* This lock owner is not known yet, allocated and set up a new one */ lock_owner = create_nfs4_owner(&owner_name, clientid, STATE_LOCK_OWNER_NFSV4, open_owner, 0, &isnew, CARE_ALWAYS); LogStateOwner("Lock: ", lock_owner); if (lock_owner == NULL) { res_LOCK4->status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_EVENT, "LOCK failed to create new lock owner", data->current_obj, open_owner, &lock_desc); goto out2; } if (!isnew) { PTHREAD_MUTEX_lock(&lock_owner->so_mutex); /* Check lock_seqid if it has attached locks. */ if (!glist_empty(&lock_owner->so_lock_list) && (data->minorversion == 0) && !Check_nfs4_seqid(lock_owner, arg_LOCK4->locker.locker4_u. open_owner.lock_seqid, op, data->current_obj, resp, lock_tag)) { LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to create new lock owner, re-use", data->current_obj, open_owner, &lock_desc); dump_all_locks( "All locks (re-use of lock owner)"); PTHREAD_MUTEX_unlock(&lock_owner->so_mutex); /* Response is all setup for us and * LogDebug told what was wrong */ goto out2; } PTHREAD_MUTEX_unlock(&lock_owner->so_mutex); /* Lock owner is known, see if we also already have * a stateid. Do this here since it's impossible for * there to be such a state if the lock owner was * previously unknown. */ lock_state = nfs4_State_Get_Obj(data->current_obj, lock_owner); } if (lock_state == NULL) { /* Prepare state management structure */ memset(&candidate_data, 0, sizeof(candidate_data)); candidate_data.lock.openstate = state_open; /* Add the lock state to the lock table */ state_status = state_add(data->current_obj, STATE_TYPE_LOCK, &candidate_data, lock_owner, &lock_state, data->minorversion > 0 ? &refer : NULL); if (state_status != STATE_SUCCESS) { res_LOCK4->status = NFS4ERR_RESOURCE; LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG, "LOCK failed to add new stateid", data->current_obj, lock_owner, &lock_desc); goto out2; } glist_init(&lock_state->state_data.lock.state_locklist); /* Add lock state to the list of lock states belonging to the open state */ glist_add_tail( &state_open->state_data.share.share_lockstates, &lock_state->state_data.lock.state_sharelist); } } if (data->minorversion == 0) { op_ctx->clientid = &lock_owner->so_owner.so_nfs4_owner.so_clientid; } /* Now we have a lock owner and a stateid. Go ahead and push * lock into SAL (and FSAL). */ state_status = state_lock(data->current_obj, lock_owner, lock_state, blocking, NULL, /* No block data for now */ &lock_desc, &conflict_owner, &conflict_desc); if (state_status != STATE_SUCCESS) { if (state_status == STATE_LOCK_CONFLICT) { /* A conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */ Process_nfs4_conflict(&res_LOCK4->LOCK4res_u.denied, conflict_owner, &conflict_desc); } LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed with status %s", state_err_str(state_status)); res_LOCK4->status = nfs4_Errno_state(state_status); /* Save the response in the lock or open owner */ if (res_LOCK4->status != NFS4ERR_RESOURCE && res_LOCK4->status != NFS4ERR_BAD_STATEID && data->minorversion == 0) { Copy_nfs4_state_req(resp_owner, seqid, op, data->current_obj, resp, lock_tag); } if (arg_LOCK4->locker.new_lock_owner) { /* Need to destroy new state */ state_del(lock_state); } goto out2; } if (data->minorversion == 0) op_ctx->clientid = NULL; res_LOCK4->status = NFS4_OK; /* Handle stateid/seqid for success */ update_stateid(lock_state, &res_LOCK4->LOCK4res_u.resok4.lock_stateid, data, lock_tag); if (arg_LOCK4->locker.new_lock_owner) { /* Also save the response in the lock owner */ Copy_nfs4_state_req(lock_owner, arg_LOCK4->locker.locker4_u.open_owner. lock_seqid, op, data->current_obj, resp, lock_tag); } if (isFullDebug(COMPONENT_NFS_V4_LOCK)) { char str[LOG_BUFF_LEN]; struct display_buffer dspbuf = {sizeof(str), str, str}; display_stateid(&dspbuf, lock_state); LogFullDebug(COMPONENT_NFS_V4_LOCK, "LOCK stateid %s", str); } LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK applied", data->current_obj, lock_owner, &lock_desc); out: if (data->minorversion == 0) { /* Save the response in the lock or open owner */ Copy_nfs4_state_req(resp_owner, seqid, op, data->current_obj, resp, lock_tag); } out2: if (state_open != NULL) dec_state_t_ref(state_open); if (lock_state != NULL) dec_state_t_ref(lock_state); LogStateOwner("Open: ", open_owner); LogStateOwner("Lock: ", lock_owner); if (open_owner != NULL) dec_state_owner_ref(open_owner); if (lock_owner != NULL) dec_state_owner_ref(lock_owner); if (clientid != NULL) dec_client_id_ref(clientid); return res_LOCK4->status; } /* nfs4_op_lock */
int nlm4_Share(nfs_arg_t * parg, exportlist_t * pexport, fsal_op_context_t * pcontext, nfs_worker_data_t * pworker, struct svc_req * preq, nfs_res_t * pres) { nlm4_shareargs * arg = &parg->arg_nlm4_share; cache_entry_t * pentry; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t * nsm_client; state_nlm_client_t * nlm_client; state_owner_t * nlm_owner; int rc; int grace = nfs_in_grace(); pres->res_nlm4share.sequence = 0; netobj_to_string(&arg->cookie, buffer, 1024); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Share cookie=%s reclaim=%s", buffer, arg->reclaim ? "yes" : "no"); if(!copy_netobj(&pres->res_nlm4share.cookie, &arg->cookie)) { pres->res_nlm4share.stat = NLM4_FAILED; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; } /* Allow only reclaim share request during recovery and visa versa. * Note: NLM_SHARE is indicated to be non-monitored, however, it does * have a reclaim flag, so we will honor the reclaim flag if used. */ if((grace && !arg->reclaim) || (!grace && arg->reclaim)) { pres->res_nlm4share.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; } rc = nlm_process_share_parms(preq, &arg->share, &pentry, pcontext, CARE_NO_MONITOR, &nsm_client, &nlm_client, &nlm_owner); if(rc >= 0) { /* Present the error back to the client */ pres->res_nlm4share.stat = (nlm4_stats)rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; } if(state_nlm_share(pentry, pcontext, pexport, arg->share.access, arg->share.mode, nlm_owner, &state_status) != STATE_SUCCESS) { pres->res_nlm4share.stat = nlm_convert_state_error(state_status); } else { pres->res_nlm4share.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); cache_inode_put(pentry); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(pres->res_nlm4share.stat)); return NFS_REQ_OK; }
*/ int nlm4_Lock(nfs_arg_t *args, exportlist_t *export, struct req_op_context *req_ctx, nfs_worker_data_t *worker, struct svc_req *req, nfs_res_t *res) { nlm4_lockargs *arg = &args->arg_nlm4_lock; cache_entry_t *entry; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t *nsm_client; state_nlm_client_t *nlm_client; state_owner_t *nlm_owner, *holder; fsal_lock_param_t lock, conflict; int rc; int grace = nfs_in_grace(); state_block_data_t *pblock_data; const char *proc_name = "nlm4_Lock"; care_t care = CARE_MONITOR; if (req->rq_proc == NLMPROC4_NM_LOCK) { /* If call is a NM lock, indicate that we care about NLM * client but will not monitor. */ proc_name = "nlm4_NM_Lock"; care = CARE_NO_MONITOR; } if (export == NULL) { res->res_nlm4.stat.stat = NLM4_STALE_FH; LogInfo(COMPONENT_NLM, "INVALID HANDLE: %s", proc_name);
/** * @brief Start NFS service * * @param[in] p_start_info Startup parameters */ void nfs_start(nfs_start_info_t *p_start_info) { /* store the start info so it is available for all layers */ nfs_start_info = *p_start_info; if (p_start_info->dump_default_config == true) { nfs_print_param_config(); exit(0); } /* Make sure Ganesha runs with a 0000 umask. */ umask(0000); { /* Set the write verifiers */ union { verifier4 NFS4_write_verifier; writeverf3 NFS3_write_verifier; uint64_t epoch; } build_verifier; build_verifier.epoch = (uint64_t) ServerEpoch; memcpy(NFS3_write_verifier, build_verifier.NFS3_write_verifier, sizeof(NFS3_write_verifier)); memcpy(NFS4_write_verifier, build_verifier.NFS4_write_verifier, sizeof(NFS4_write_verifier)); } #ifdef USE_CAPS lower_my_caps(); #endif /* Initialize all layers and service threads */ nfs_Init(p_start_info); /* Spawns service threads */ nfs_Start_threads(); if (nfs_param.core_param.enable_NLM) { /* NSM Unmonitor all */ nsm_unmonitor_all(); } LogEvent(COMPONENT_INIT, "-------------------------------------------------"); LogEvent(COMPONENT_INIT, " NFS SERVER INITIALIZED"); LogEvent(COMPONENT_INIT, "-------------------------------------------------"); /* Wait for dispatcher to exit */ LogDebug(COMPONENT_THREAD, "Wait for admin thread to exit"); pthread_join(admin_thrid, NULL); /* Regular exit */ LogEvent(COMPONENT_MAIN, "NFS EXIT: regular exit"); /* if not in grace period, clean up the old state directory */ if (!nfs_in_grace()) nfs4_clean_old_recov_dir(v4_old_dir); Cleanup(); /* let main return 0 to exit */ }
int nfs4_op_rename(struct nfs_argop4 *op, compound_data_t * data, struct nfs_resop4 *resp) { char __attribute__ ((__unused__)) funcname[] = "nfs4_op_rename"; cache_entry_t * dst_entry = NULL; cache_entry_t * src_entry = NULL; cache_entry_t * tst_entry_dst = NULL; cache_entry_t * tst_entry_src = NULL; fsal_attrib_list_t attr_dst; fsal_attrib_list_t attr_src; fsal_attrib_list_t attr_tst_dst; fsal_attrib_list_t attr_tst_src; cache_inode_status_t cache_status; fsal_status_t fsal_status; fsal_handle_t * handlenew = NULL; fsal_handle_t * handleold = NULL; fsal_name_t oldname; fsal_name_t newname; resp->resop = NFS4_OP_RENAME; res_RENAME4.status = NFS4_OK; /* Do basic checks on a filehandle */ res_RENAME4.status = nfs4_sanity_check_FH(data, 0LL); if(res_RENAME4.status != NFS4_OK) return res_RENAME4.status; /* Do basic checks on saved filehandle */ /* If there is no FH */ if(nfs4_Is_Fh_Empty(&(data->savedFH))) { res_RENAME4.status = NFS4ERR_NOFILEHANDLE; return res_RENAME4.status; } /* If the filehandle is invalid */ if(nfs4_Is_Fh_Invalid(&(data->savedFH))) { res_RENAME4.status = NFS4ERR_BADHANDLE; return res_RENAME4.status; } /* Tests if the Filehandle is expired (for volatile filehandle) */ if(nfs4_Is_Fh_Expired(&(data->savedFH))) { res_RENAME4.status = NFS4ERR_FHEXPIRED; return res_RENAME4.status; } /* Pseudo Fs is explictely a Read-Only File system */ if(nfs4_Is_Fh_Pseudo(&(data->savedFH))) { res_RENAME4.status = NFS4ERR_ROFS; return res_RENAME4.status; } if (nfs_in_grace()) { res_RENAME4.status = NFS4ERR_GRACE; return res_RENAME4.status; } /* get the names from the RPC input */ cache_status = utf8_to_name(&arg_RENAME4.oldname, &oldname); if(cache_status != CACHE_INODE_SUCCESS) { res_RENAME4.status = nfs4_Errno(cache_status); return res_RENAME4.status; } cache_status = utf8_to_name(&arg_RENAME4.newname, &newname); if(cache_status != CACHE_INODE_SUCCESS) { res_RENAME4.status = nfs4_Errno(cache_status); return res_RENAME4.status; } /* Sanuty check: never rename to '.' or '..' */ if(!FSAL_namecmp(&newname, (fsal_name_t *) & FSAL_DOT) || !FSAL_namecmp(&newname, (fsal_name_t *) & FSAL_DOT_DOT)) { res_RENAME4.status = NFS4ERR_BADNAME; return res_RENAME4.status; } /* Sanuty check: never rename to '.' or '..' */ if(!FSAL_namecmp(&oldname, (fsal_name_t *) & FSAL_DOT) || !FSAL_namecmp(&oldname, (fsal_name_t *) & FSAL_DOT_DOT)) { res_RENAME4.status = NFS4ERR_BADNAME; return res_RENAME4.status; } /* * This operation renames * - the object in directory pointed by savedFH, named arg_RENAME4.oldname * into * - an object in directory pointed by currentFH, named arg_RENAME4.newname * * Because of this, we will use 2 entry and we have verified both currentFH and savedFH */ /* No Cross Device */ if(((file_handle_v4_t *) (data->currentFH.nfs_fh4_val))->exportid != ((file_handle_v4_t *) (data->savedFH.nfs_fh4_val))->exportid) { res_RENAME4.status = NFS4ERR_XDEV; return res_RENAME4.status; } /* destination must be a directory */ dst_entry = data->current_entry; if(data->current_filetype != DIRECTORY) { res_RENAME4.status = NFS4ERR_NOTDIR; return res_RENAME4.status; } /* Convert saved FH into a vnode */ src_entry = data->saved_entry; /* Source must be a directory */ if(data->saved_filetype != DIRECTORY) { res_RENAME4.status = NFS4ERR_NOTDIR; return res_RENAME4.status; } /* Renaming a file to himself is allowed, returns NFS4_OK */ if(src_entry == dst_entry) { if(!FSAL_namecmp(&oldname, &newname)) { res_RENAME4.status = NFS4_OK; return res_RENAME4.status; } } /* For the change_info4, get the 'change' attributes for both directories */ if((cache_status = cache_inode_getattr(src_entry, &attr_src, data->pcontext, &cache_status)) != CACHE_INODE_SUCCESS) { res_RENAME4.status = nfs4_Errno(cache_status); return res_RENAME4.status; } #ifdef BUGAZOMEU /* Ne devrait pas se produire dans le cas de exportid differents */ /* Both object must resides on the same filesystem, return NFS4ERR_XDEV if not */ if(attr_src.va_rdev != attr_dst.va_rdev) { res_RENAME4.status = NFS4ERR_XDEV; return res_RENAME4.status; } #endif /* Lookup oldfile to see if it exists (refcount +1) */ if((tst_entry_src = cache_inode_lookup(src_entry, &oldname, &attr_tst_src, data->pcontext, &cache_status)) == NULL) { res_RENAME4.status = nfs4_Errno(cache_status); goto release; } /* Lookup file with new name to see if it already exists (refcount +1), * I expect to get NO_ERROR or ENOENT, anything else means an error */ tst_entry_dst = cache_inode_lookup(dst_entry, &newname, &attr_tst_dst, data->pcontext, &cache_status); if((cache_status != CACHE_INODE_SUCCESS) && (cache_status != CACHE_INODE_NOT_FOUND)) { /* Unexpected status at this step, exit with an error */ res_RENAME4.status = nfs4_Errno(cache_status); goto release; } if(cache_status == CACHE_INODE_NOT_FOUND) tst_entry_dst = NULL; /* Just to make sure */ /* Renaming a file to one of its own hardlink is allowed, return NFS4_OK */ if(tst_entry_src == tst_entry_dst) { res_RENAME4.status = NFS4_OK; goto release; } /* Renaming dir into existing file should return NFS4ERR_EXIST */ if ((tst_entry_src->type == DIRECTORY) && ((tst_entry_dst != NULL) && (tst_entry_dst->type == REGULAR_FILE))) { res_RENAME4.status = NFS4ERR_EXIST; goto release; } /* Renaming file into existing dir should return NFS4ERR_EXIST */ if(tst_entry_src->type == REGULAR_FILE) { if(tst_entry_dst != NULL) { if(tst_entry_dst->type == DIRECTORY) { res_RENAME4.status = NFS4ERR_EXIST; goto release; } } } /* Renaming dir1 into existing, nonempty dir2 should return NFS4ERR_EXIST * Renaming file into existing, nonempty dir should return NFS4ERR_EXIST */ if(tst_entry_dst != NULL) { if((tst_entry_dst->type == DIRECTORY) && ((tst_entry_src->type == DIRECTORY) || (tst_entry_src->type == REGULAR_FILE))) { if(cache_inode_is_dir_empty_WithLock(tst_entry_dst) == CACHE_INODE_DIR_NOT_EMPTY) { res_RENAME4.status = NFS4ERR_EXIST; goto release; } } } res_RENAME4.RENAME4res_u.resok4.source_cinfo.before = cache_inode_get_changeid4(src_entry); res_RENAME4.RENAME4res_u.resok4.target_cinfo.before = cache_inode_get_changeid4(dst_entry); if(cache_status == CACHE_INODE_SUCCESS) { /* New entry already exists, its attributes are in attr_tst_*, check for old entry to see if types are compatible */ handlenew = &tst_entry_dst->handle; handleold = &tst_entry_src->handle; if(!FSAL_handlecmp(handlenew, handleold, &fsal_status)) { /* For the change_info4, get the 'change' attributes for both directories */ res_RENAME4.RENAME4res_u.resok4.source_cinfo.before = cache_inode_get_changeid4(src_entry); res_RENAME4.RENAME4res_u.resok4.target_cinfo.before = cache_inode_get_changeid4(dst_entry); res_RENAME4.RENAME4res_u.resok4.target_cinfo.atomic = FALSE; res_RENAME4.RENAME4res_u.resok4.source_cinfo.atomic = FALSE; res_RENAME4.status = NFS4_OK; goto release; } else { /* Destination exists and is something different from source */ if(( tst_entry_src->type == REGULAR_FILE && tst_entry_dst->type == REGULAR_FILE ) || ( tst_entry_src->type == DIRECTORY && tst_entry_dst->type == DIRECTORY )) { if(cache_inode_rename(src_entry, &oldname, dst_entry, &newname, &attr_src, &attr_dst, data->pcontext, &cache_status) != CACHE_INODE_SUCCESS) { res_RENAME4.status = nfs4_Errno(cache_status); goto release; } } else { res_RENAME4.status = NFS4ERR_EXIST; goto release; } } } else { /* New entry does not already exist, call cache_entry_rename */ if(cache_inode_rename(src_entry, &oldname, dst_entry, &newname, &attr_src, &attr_dst, data->pcontext, &cache_status) != CACHE_INODE_SUCCESS) { res_RENAME4.status = nfs4_Errno(cache_status); goto release; } } /* If you reach this point, then everything was alright */ /* For the change_info4, get the 'change' attributes for both directories */ res_RENAME4.RENAME4res_u.resok4.source_cinfo.after = cache_inode_get_changeid4(src_entry); res_RENAME4.RENAME4res_u.resok4.target_cinfo.after = cache_inode_get_changeid4(dst_entry); res_RENAME4.RENAME4res_u.resok4.target_cinfo.atomic = FALSE; res_RENAME4.RENAME4res_u.resok4.source_cinfo.atomic = FALSE; res_RENAME4.status = nfs4_Errno(cache_status); release: if (tst_entry_src) (void) cache_inode_put(tst_entry_src); if (tst_entry_dst) (void) cache_inode_put(tst_entry_dst); return (res_RENAME4.status); } /* nfs4_op_rename */
int nfs4_op_remove(struct nfs_argop4 *op, compound_data_t * data, struct nfs_resop4 *resp) { char __attribute__ ((__unused__)) funcname[] = "nfs4_op_remove"; cache_entry_t * parent_entry = NULL; fsal_attrib_list_t attr_parent; fsal_name_t name; cache_inode_status_t cache_status; resp->resop = NFS4_OP_REMOVE; res_REMOVE4.status = NFS4_OK; /* * Do basic checks on a filehandle * Delete arg_REMOVE4.target in directory pointed by currentFH * Make sure the currentFH is pointed a directory */ res_REMOVE4.status = nfs4_sanity_check_FH(data, DIRECTORY); if(res_REMOVE4.status != NFS4_OK) return res_REMOVE4.status; /* Pseudo Fs is explictely a Read-Only File system */ if(nfs4_Is_Fh_Pseudo(&(data->currentFH))) { res_REMOVE4.status = NFS4ERR_ROFS; return res_REMOVE4.status; } if (nfs_in_grace()) { res_REMOVE4.status = NFS4ERR_GRACE; return res_REMOVE4.status; } /* If Filehandle points to a xattr object, manage it via the xattrs specific functions */ if(nfs4_Is_Fh_Xattr(&(data->currentFH))) return nfs4_op_remove_xattr(op, data, resp); /* Get the parent entry (aka the current one in the compound data) */ parent_entry = data->current_entry; /* We have to keep track of the 'change' file attribute for reply structure */ memset(&(res_REMOVE4.REMOVE4res_u.resok4.cinfo.before), 0, sizeof(changeid4)); res_REMOVE4.REMOVE4res_u.resok4.cinfo.before = cache_inode_get_changeid4(parent_entry); /* Check for name length */ if(arg_REMOVE4.target.utf8string_len > FSAL_MAX_NAME_LEN) { res_REMOVE4.status = NFS4ERR_NAMETOOLONG; return res_REMOVE4.status; } /* get the filename from the argument, it should not be empty */ if(arg_REMOVE4.target.utf8string_len == 0) { res_REMOVE4.status = NFS4ERR_INVAL; return res_REMOVE4.status; } /* NFS4_OP_REMOVE can delete files as well as directory, it replaces NFS3_RMDIR and NFS3_REMOVE * because of this, we have to know if object is a directory or not */ if((cache_status = cache_inode_error_convert(FSAL_buffdesc2name ((fsal_buffdesc_t *) & arg_REMOVE4.target, &name))) != CACHE_INODE_SUCCESS) { res_REMOVE4.status = nfs4_Errno(cache_status); return res_REMOVE4.status; } /* Test RM7: remiving '.' should return NFS4ERR_BADNAME */ if(!FSAL_namecmp(&name, (fsal_name_t *) & FSAL_DOT) || !FSAL_namecmp(&name, (fsal_name_t *) & FSAL_DOT_DOT)) { res_REMOVE4.status = NFS4ERR_BADNAME; return res_REMOVE4.status; } if((cache_status = cache_inode_remove(parent_entry, &name, &attr_parent, data->pcontext, &cache_status)) != CACHE_INODE_SUCCESS) { res_REMOVE4.status = nfs4_Errno(cache_status); return res_REMOVE4.status; } res_REMOVE4.REMOVE4res_u.resok4.cinfo.after = cache_inode_get_changeid4(parent_entry); /* Operation was not atomic .... */ res_REMOVE4.REMOVE4res_u.resok4.cinfo.atomic = FALSE; /* If you reach this point, everything was ok */ res_REMOVE4.status = NFS4_OK; return NFS4_OK; } /* nfs4_op_remove */
int nlm4_Unlock(nfs_arg_t *args, struct svc_req *req, nfs_res_t *res) { nlm4_unlockargs *arg = &args->arg_nlm4_unlock; struct fsal_obj_handle *obj; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2] = "\0"; state_nsm_client_t *nsm_client; state_nlm_client_t *nlm_client; state_owner_t *nlm_owner; fsal_lock_param_t lock; int rc; state_t *state; /* NLM doesn't have a BADHANDLE error, nor can rpc_execute deal with * responding to an NLM_*_MSG call, so we check here if the export is * NULL and if so, handle the response. */ if (op_ctx->ctx_export == NULL) { res->res_nlm4.stat.stat = NLM4_STALE_FH; LogInfo(COMPONENT_NLM, "INVALID HANDLE: nlm4_Unlock"); return NFS_REQ_OK; } netobj_to_string(&arg->cookie, buffer, sizeof(buffer)); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Unlock svid=%d off=%llx len=%llx cookie=%s", (int)arg->alock.svid, (unsigned long long)arg->alock.l_offset, (unsigned long long)arg->alock.l_len, buffer); copy_netobj(&res->res_nlm4test.cookie, &arg->cookie); if (nfs_in_grace()) { res->res_nlm4.stat.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(res->res_nlm4.stat.stat)); return NFS_REQ_OK; } /* unlock doesn't care if owner is found */ rc = nlm_process_parameters(req, false, &arg->alock, &lock, &obj, CARE_NOT, &nsm_client, &nlm_client, &nlm_owner, NULL, 0, &state); if (rc >= 0) { /* resent the error back to the client */ res->res_nlm4.stat.stat = (nlm4_stats) rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(res->res_nlm4.stat.stat)); return NFS_REQ_OK; } if (state != NULL) state_status = state_unlock(obj, state, nlm_owner, false, 0, &lock); if (state_status != STATE_SUCCESS) { /* Unlock could fail in the FSAL and make a bit of a mess, * especially if we are in out of memory situation. Such an * error is logged by Cache Inode. */ res->res_nlm4test.test_stat.stat = nlm_convert_state_error(state_status); } else { res->res_nlm4.stat.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ if (state != NULL) dec_state_t_ref(state); dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); obj->obj_ops.put_ref(obj); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Unlock %s", lock_result_str(res->res_nlm4.stat.stat)); return NFS_REQ_OK; }
int nfs4_op_remove(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { REMOVE4args * const arg_REMOVE4 = &op->nfs_argop4_u.opremove; REMOVE4res * const res_REMOVE4 = &resp->nfs_resop4_u.opremove; cache_entry_t *parent_entry = NULL; char *name = NULL; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; resp->resop = NFS4_OP_REMOVE; res_REMOVE4->status = NFS4_OK; /* Do basic checks on a filehandle * Delete arg_REMOVE4.target in directory pointed by currentFH * Make sure the currentFH is pointed a directory */ res_REMOVE4->status = nfs4_sanity_check_FH(data, DIRECTORY, false); if (res_REMOVE4->status != NFS4_OK) goto out; /* Validate and convert the UFT8 target to a regular string */ res_REMOVE4->status = nfs4_utf8string2dynamic(&arg_REMOVE4->target, UTF8_SCAN_ALL, &name); if (res_REMOVE4->status != NFS4_OK) goto out; if (nfs_in_grace()) { res_REMOVE4->status = NFS4ERR_GRACE; goto out; } /* Get the parent entry (aka the current one in the compound data) */ parent_entry = data->current_entry; /* We have to keep track of the 'change' file attribute * for reply structure */ memset(&res_REMOVE4->REMOVE4res_u.resok4.cinfo.before, 0, sizeof(changeid4)); res_REMOVE4->REMOVE4res_u.resok4.cinfo.before = cache_inode_get_changeid4(parent_entry, data->req_ctx); cache_status = cache_inode_remove(parent_entry, name, data->req_ctx); if (cache_status != CACHE_INODE_SUCCESS) { res_REMOVE4->status = nfs4_Errno(cache_status); goto out; } res_REMOVE4->REMOVE4res_u.resok4.cinfo.after = cache_inode_get_changeid4(parent_entry, data->req_ctx); /* Operation was not atomic .... */ res_REMOVE4->REMOVE4res_u.resok4.cinfo.atomic = FALSE; /* If you reach this point, everything was ok */ res_REMOVE4->status = NFS4_OK; out: if (name) gsh_free(name); return res_REMOVE4->status; } /* nfs4_op_remove */
/** * @brief the NFS4_OP_SEQUENCE operation * * @param[in] op nfs4_op arguments * @param[in,out] data Compound request's data * @param[out] resp nfs4_op results * * @return per RFC5661, p. 374 * * @see nfs4_Compound * */ int nfs4_op_sequence(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp) { SEQUENCE4args * const arg_SEQUENCE4 = &op->nfs_argop4_u.opsequence; SEQUENCE4res * const res_SEQUENCE4 = &resp->nfs_resop4_u.opsequence; nfs41_session_t *session; resp->resop = NFS4_OP_SEQUENCE; res_SEQUENCE4->sr_status = NFS4_OK; if (data->minorversion == 0) { res_SEQUENCE4->sr_status = NFS4ERR_INVAL; return res_SEQUENCE4->sr_status; } /* OP_SEQUENCE is always the first operation of the request */ if (data->oppos != 0) { res_SEQUENCE4->sr_status = NFS4ERR_SEQUENCE_POS; return res_SEQUENCE4->sr_status; } if (!nfs41_Session_Get_Pointer(arg_SEQUENCE4->sa_sessionid, &session)) { if (nfs_in_grace()) { memcpy(res_SEQUENCE4->SEQUENCE4res_u.sr_resok4. sr_sessionid, arg_SEQUENCE4->sa_sessionid, NFS4_SESSIONID_SIZE); res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_sequenceid = arg_SEQUENCE4->sa_sequenceid; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_slotid = arg_SEQUENCE4->sa_slotid; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4. sr_highest_slotid = NFS41_NB_SLOTS - 1; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4. sr_target_highest_slotid = arg_SEQUENCE4->sa_slotid; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4. sr_status_flags = SEQ4_STATUS_RESTART_RECLAIM_NEEDED; LogDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "SEQUENCE returning status %s flags 0x%X", nfsstat4_to_str(res_SEQUENCE4->sr_status), res_SEQUENCE4->SEQUENCE4res_u.sr_resok4. sr_status_flags); } else { res_SEQUENCE4->sr_status = NFS4ERR_BADSESSION; LogDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "SEQUENCE returning status %s", nfsstat4_to_str(res_SEQUENCE4->sr_status)); } return res_SEQUENCE4->sr_status; } /* session->refcount +1 */ LogDebug(COMPONENT_SESSIONS, "SEQUENCE session=%p", session); /* Check if lease is expired and reserve it */ pthread_mutex_lock(&session->clientid_record->cid_mutex); if (!reserve_lease(session->clientid_record)) { pthread_mutex_unlock(&session->clientid_record->cid_mutex); dec_session_ref(session); res_SEQUENCE4->sr_status = NFS4ERR_EXPIRED; LogDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "SEQUENCE returning status %s", nfsstat4_to_str(res_SEQUENCE4->sr_status)); return res_SEQUENCE4->sr_status; } data->preserved_clientid = session->clientid_record; pthread_mutex_unlock(&session->clientid_record->cid_mutex); /* Check is slot is compliant with ca_maxrequests */ if (arg_SEQUENCE4->sa_slotid >= session->fore_channel_attrs.ca_maxrequests) { dec_session_ref(session); res_SEQUENCE4->sr_status = NFS4ERR_BADSLOT; LogDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "SEQUENCE returning status %s", nfsstat4_to_str(res_SEQUENCE4->sr_status)); return res_SEQUENCE4->sr_status; } /* By default, no DRC replay */ data->use_drc = false; pthread_mutex_lock(&session->slots[arg_SEQUENCE4->sa_slotid].lock); if (session->slots[arg_SEQUENCE4->sa_slotid].sequence + 1 != arg_SEQUENCE4->sa_sequenceid) { if (session->slots[arg_SEQUENCE4->sa_slotid].sequence == arg_SEQUENCE4->sa_sequenceid) { #if IMPLEMENT_CACHETHIS /** @todo * * Ganesha always caches result anyway so ignore * cachethis */ if (session->slots[arg_SEQUENCE4->sa_slotid] .cache_used) { #endif /* Replay operation through the DRC */ data->use_drc = true; data->cached_res = &session->slots[arg_SEQUENCE4->sa_slotid]. cached_result; LogFullDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "Use sesson slot %" PRIu32 "=%p for DRC", arg_SEQUENCE4->sa_slotid, data->cached_res); pthread_mutex_unlock(&session-> slots[arg_SEQUENCE4->sa_slotid].lock); dec_session_ref(session); res_SEQUENCE4->sr_status = NFS4_OK; return res_SEQUENCE4->sr_status; #if IMPLEMENT_CACHETHIS } else { /* Illegal replay */ pthread_mutex_unlock(&session-> slots[arg_SEQUENCE4->sa_slotid].lock); dec_session_ref(session); res_SEQUENCE4->sr_status = NFS4ERR_RETRY_UNCACHED_REP; LogDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "SEQUENCE returning status %s", nfsstat4_to_str(res_SEQUENCE4-> sr_status)); return res_SEQUENCE4->sr_status; } #endif } pthread_mutex_unlock(&session-> slots[arg_SEQUENCE4->sa_slotid].lock); dec_session_ref(session); res_SEQUENCE4->sr_status = NFS4ERR_SEQ_MISORDERED; LogDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "SEQUENCE returning status %s", nfsstat4_to_str(res_SEQUENCE4->sr_status)); return res_SEQUENCE4->sr_status; } /* Keep memory of the session in the COMPOUND's data */ data->session = session; /* Record the sequenceid and slotid in the COMPOUND's data */ data->sequence = arg_SEQUENCE4->sa_sequenceid; data->slot = arg_SEQUENCE4->sa_slotid; /* Update the sequence id within the slot */ session->slots[arg_SEQUENCE4->sa_slotid].sequence += 1; memcpy(res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_sessionid, arg_SEQUENCE4->sa_sessionid, NFS4_SESSIONID_SIZE); res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_sequenceid = session->slots[arg_SEQUENCE4->sa_slotid].sequence; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_slotid = arg_SEQUENCE4->sa_slotid; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_highest_slotid = NFS41_NB_SLOTS - 1; res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_target_highest_slotid = arg_SEQUENCE4->sa_slotid; /* Maybe not the best choice */ res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_status_flags = 0; if (nfs_rpc_get_chan(session->clientid_record, 0) == NULL) { res_SEQUENCE4->SEQUENCE4res_u.sr_resok4.sr_status_flags |= SEQ4_STATUS_CB_PATH_DOWN; } #if IMPLEMENT_CACHETHIS /* Ganesha always caches result anyway so ignore cachethis */ if (arg_SEQUENCE4->sa_cachethis) { #endif data->cached_res = &session->slots[arg_SEQUENCE4->sa_slotid].cached_result; session->slots[arg_SEQUENCE4->sa_slotid].cache_used = true; LogFullDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "Use sesson slot %" PRIu32 "=%p for DRC", arg_SEQUENCE4->sa_slotid, data->cached_res); #if IMPLEMENT_CACHETHIS } else { data->cached_res = NULL; session->slots[arg_SEQUENCE4->sa_slotid].cache_used = false; LogFullDebugAlt(COMPONENT_SESSIONS, COMPONENT_CLIENTID, "Don't use sesson slot %" PRIu32 "=NULL for DRC", arg_SEQUENCE4->sa_slotid); } #endif pthread_mutex_unlock(&session->slots[arg_SEQUENCE4->sa_slotid].lock); /* If we were successful, stash the clientid in the request * context. */ data->req_ctx->clientid = &data->session->clientid; res_SEQUENCE4->sr_status = NFS4_OK; return res_SEQUENCE4->sr_status; } /* nfs41_op_sequence */
int nlm4_Share(nfs_arg_t *args, struct svc_req *req, nfs_res_t *res) { nlm4_shareargs *arg = &args->arg_nlm4_share; struct fsal_obj_handle *obj; state_status_t state_status = STATE_SUCCESS; char buffer[MAXNETOBJ_SZ * 2]; state_nsm_client_t *nsm_client; state_nlm_client_t *nlm_client; state_owner_t *nlm_owner; state_t *nlm_state; int rc; int grace = nfs_in_grace(); /* Indicate if we let FSAL to handle requests during grace. */ bool_t fsal_grace = false; /* NLM doesn't have a BADHANDLE error, nor can rpc_execute deal with * responding to an NLM_*_MSG call, so we check here if the export is * NULL and if so, handle the response. */ if (op_ctx->ctx_export == NULL) { res->res_nlm4share.stat = NLM4_STALE_FH; LogInfo(COMPONENT_NLM, "INVALID HANDLE: nlm4_Share"); return NFS_REQ_OK; } res->res_nlm4share.sequence = 0; netobj_to_string(&arg->cookie, buffer, 1024); LogDebug(COMPONENT_NLM, "REQUEST PROCESSING: Calling nlm4_Share cookie=%s reclaim=%s", buffer, arg->reclaim ? "yes" : "no"); copy_netobj(&res->res_nlm4share.cookie, &arg->cookie); /* Allow only reclaim share request during recovery and visa versa. * Note: NLM_SHARE is indicated to be non-monitored, however, it does * have a reclaim flag, so we will honor the reclaim flag if used. */ if (grace) { if (op_ctx->fsal_export->exp_ops. fs_supports(op_ctx->fsal_export, fso_grace_method)) fsal_grace = true; if (!fsal_grace && !arg->reclaim) { res->res_nlm4share.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } } else if (arg->reclaim) { res->res_nlm4share.stat = NLM4_DENIED_GRACE_PERIOD; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } rc = nlm_process_share_parms(req, &arg->share, op_ctx->fsal_export, &obj, CARE_NO_MONITOR, &nsm_client, &nlm_client, &nlm_owner, &nlm_state); if (rc >= 0) { /* Present the error back to the client */ res->res_nlm4share.stat = (nlm4_stats) rc; LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; } state_status = state_nlm_share(obj, arg->share.access, arg->share.mode, nlm_owner, nlm_state, grace); if (state_status != STATE_SUCCESS) { res->res_nlm4share.stat = nlm_convert_state_error(state_status); } else { res->res_nlm4share.stat = NLM4_GRANTED; } /* Release the NLM Client and NLM Owner references we have */ dec_nsm_client_ref(nsm_client); dec_nlm_client_ref(nlm_client); dec_state_owner_ref(nlm_owner); obj->obj_ops.put_ref(obj); dec_nlm_state_ref(nlm_state); LogDebug(COMPONENT_NLM, "REQUEST RESULT: nlm4_Share %s", lock_result_str(res->res_nlm4share.stat)); return NFS_REQ_OK; }