/** * @brief Callback for NFS3 write done * * @param[in] obj Object being acted on * @param[in] ret Return status of call * @param[in] write_data Data for write call * @param[in] caller_data Data for caller */ static void _9p_write_cb(struct fsal_obj_handle *obj, fsal_status_t ret, void *write_data, void *caller_data) { struct _9p_write_data *data = caller_data; struct fsal_io_arg *write_arg = write_data; /* Fixup ERR_FSAL_SHARE_DENIED status */ if (ret.major == ERR_FSAL_SHARE_DENIED) ret = fsalstat(ERR_FSAL_LOCKED, 0); data->ret = ret; if (data->client) { op_ctx->client = data->client; server_stats_io_done(write_arg->iov[0].iov_len, write_arg->io_amount, FSAL_IS_ERROR(ret), false); } }
int _9p_read(struct _9p_request_data *req9p, u32 *plenout, char *preply) { char *cursor = req9p->_9pmsg + _9P_HDR_SIZE + _9P_TYPE_SIZE; char *databuffer; u16 *msgtag = NULL; u32 *fid = NULL; u64 *offset = NULL; u32 *count = NULL; u32 outcount = 0; struct _9p_fid *pfid = NULL; size_t read_size = 0; bool eof_met; fsal_status_t fsal_status; /* uint64_t stable_flag = CACHE_INODE_SAFE_WRITE_TO_FS; */ bool sync = false; /* Get data */ _9p_getptr(cursor, msgtag, u16); _9p_getptr(cursor, fid, u32); _9p_getptr(cursor, offset, u64); _9p_getptr(cursor, count, u32); LogDebug(COMPONENT_9P, "TREAD: tag=%u fid=%u offset=%llu count=%u", (u32) *msgtag, *fid, (unsigned long long)*offset, *count); if (*fid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); pfid = req9p->pconn->fids[*fid]; /* Make sure the requested amount of data respects negotiated msize */ if (*count + _9P_ROOM_RREAD > req9p->pconn->msize) return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); /* Check that it is a valid fid */ if (pfid == NULL || pfid->pentry == NULL) { LogDebug(COMPONENT_9P, "request on invalid fid=%u", *fid); return _9p_rerror(req9p, msgtag, EIO, plenout, preply); } _9p_init_opctx(pfid, req9p); /* Start building the reply already * So we don't need to use an intermediate data buffer */ _9p_setinitptr(cursor, preply, _9P_RREAD); _9p_setptr(cursor, msgtag, u16); databuffer = _9p_getbuffertofill(cursor); /* Do the job */ if (pfid->xattr != NULL) { /* Copy the value cached during xattrwalk */ if (*offset > pfid->xattr->xattr_size) return _9p_rerror(req9p, msgtag, EINVAL, plenout, preply); if (pfid->xattr->xattr_write != _9P_XATTR_READ_ONLY) return _9p_rerror(req9p, msgtag, EINVAL, plenout, preply); read_size = MIN(*count, pfid->xattr->xattr_size - *offset); memcpy(databuffer, pfid->xattr->xattr_content + *offset, read_size); outcount = read_size; } else { if (pfid->pentry->fsal->m_ops.support_ex(pfid->pentry)) { /* Call the new fsal_read */ fsal_status = fsal_read2(pfid->pentry, false, pfid->state, *offset, *count, &read_size, databuffer, &eof_met, NULL); } else { /* Call legacy fsal_rdwr */ fsal_status = fsal_rdwr(pfid->pentry, FSAL_IO_READ, *offset, *count, &read_size, databuffer, &eof_met, &sync, NULL); } /* Get the handle, for stats */ struct gsh_client *client = req9p->pconn->client; if (client == NULL) { LogDebug(COMPONENT_9P, "Cannot get client block for 9P request"); } else { op_ctx->client = client; server_stats_io_done(*count, read_size, FSAL_IS_ERROR(fsal_status), false); } if (FSAL_IS_ERROR(fsal_status)) return _9p_rerror(req9p, msgtag, _9p_tools_errno(fsal_status), plenout, preply); outcount = (u32) read_size; } _9p_setfilledbuffer(cursor, outcount); _9p_setendptr(cursor, preply); _9p_checkbound(cursor, preply, plenout); LogDebug(COMPONENT_9P, "RREAD: tag=%u fid=%u offset=%llu count=%u", (u32) *msgtag, *fid, (unsigned long long)*offset, *count); /** * @todo read statistics accounting goes here * modeled on nfs I/O statistics */ return 1; }
int _9p_write(struct _9p_request_data *req9p, u32 *plenout, char *preply) { char *cursor = req9p->_9pmsg + _9P_HDR_SIZE + _9P_TYPE_SIZE; u16 *msgtag = NULL; u32 *fid = NULL; u64 *offset = NULL; u32 *count = NULL; u32 outcount = 0; struct _9p_fid *pfid = NULL; size_t size; size_t written_size = 0; bool eof_met; cache_inode_status_t cache_status = CACHE_INODE_SUCCESS; /* bool sync = true; */ bool sync = false; char *databuffer = NULL; /* fsal_status_t fsal_status; */ /* Get data */ _9p_getptr(cursor, msgtag, u16); _9p_getptr(cursor, fid, u32); _9p_getptr(cursor, offset, u64); _9p_getptr(cursor, count, u32); databuffer = cursor; LogDebug(COMPONENT_9P, "TWRITE: tag=%u fid=%u offset=%llu count=%u", (u32) *msgtag, *fid, (unsigned long long)*offset, *count); if (*fid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); pfid = req9p->pconn->fids[*fid]; /* Make sure the requested amount of data respects negotiated msize */ if (*count + _9P_ROOM_TWRITE > req9p->pconn->msize) return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); /* Check that it is a valid fid */ if (pfid == NULL || pfid->pentry == NULL) { LogDebug(COMPONENT_9P, "request on invalid fid=%u", *fid); return _9p_rerror(req9p, msgtag, EIO, plenout, preply); } _9p_init_opctx(pfid, req9p); if ((op_ctx->export_perms->options & EXPORT_OPTION_WRITE_ACCESS) == 0) return _9p_rerror(req9p, msgtag, EROFS, plenout, preply); /* Do the job */ size = *count; if (pfid->specdata.xattr.xattr_content != NULL) { memcpy(pfid->specdata.xattr.xattr_content + (*offset), databuffer, size); pfid->specdata.xattr.xattr_offset += size; pfid->specdata.xattr.xattr_write = true; /* ADD CODE TO DETECT GAP */ #if 0 fsal_status = pfid->pentry->obj_handle->ops->setextattr_value_by_id( pfid->pentry->obj_handle, &pfid->op_context, pfid->specdata.xattr.xattr_id, xattrval, size + 1); if (FSAL_IS_ERROR(fsal_status)) return _9p_rerror(req9p, msgtag, _9p_tools_errno (cache_inode_error_convert (fsal_status)), plenout, preply); #endif outcount = *count; } else { cache_status = cache_inode_rdwr(pfid->pentry, CACHE_INODE_WRITE, *offset, size, &written_size, databuffer, &eof_met, &sync, NULL); /* Get the handle, for stats */ struct gsh_client *client = req9p->pconn->client; if (client == NULL) { LogDebug(COMPONENT_9P, "Cannot get client block for 9P request"); } else { op_ctx->client = client; server_stats_io_done(size, written_size, (cache_status == CACHE_INODE_SUCCESS) ? true : false, true); } if (cache_status != CACHE_INODE_SUCCESS) return _9p_rerror(req9p, msgtag, _9p_tools_errno(cache_status), plenout, preply); outcount = (u32) written_size; } /* Build the reply */ _9p_setinitptr(cursor, preply, _9P_RWRITE); _9p_setptr(cursor, msgtag, u16); _9p_setvalue(cursor, outcount, u32); _9p_setendptr(cursor, preply); _9p_checkbound(cursor, preply, plenout); LogDebug(COMPONENT_9P, "RWRITE: tag=%u fid=%u offset=%llu input count=%u output count=%u", (u32) *msgtag, *fid, (unsigned long long)*offset, *count, outcount); /** * @todo write statistics accounting goes here * modeled on nfs I/O stats */ return 1; }
static int nfs4_read(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp, fsal_io_direction_t io, struct io_info *info) { READ4args * const arg_READ4 = &op->nfs_argop4_u.opread; READ4res * const res_READ4 = &resp->nfs_resop4_u.opread; uint64_t size = 0; size_t read_size = 0; uint64_t offset = 0; bool eof_met = false; void *bufferdata = NULL; fsal_status_t fsal_status = {0, 0}; state_t *state_found = NULL; state_t *state_open = NULL; struct fsal_obj_handle *obj = NULL; bool sync = false; bool anonymous_started = false; state_owner_t *owner = NULL; bool bypass = false; uint64_t MaxRead = atomic_fetch_uint64_t(&op_ctx->ctx_export->MaxRead); uint64_t MaxOffsetRead = atomic_fetch_uint64_t( &op_ctx->ctx_export->MaxOffsetRead); /* Say we are managing NFS4_OP_READ */ resp->resop = NFS4_OP_READ; res_READ4->status = NFS4_OK; /* Do basic checks on a filehandle Only files can be read */ if ((data->minorversion > 0) && nfs4_Is_Fh_DSHandle(&data->currentFH)) { if (io == FSAL_IO_READ) return op_dsread(op, data, resp); else return op_dsread_plus(op, data, resp, info); } res_READ4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, true); if (res_READ4->status != NFS4_OK) return res_READ4->status; obj = data->current_obj; /* Check stateid correctness and get pointer to state (also checks for special stateids) */ res_READ4->status = nfs4_Check_Stateid(&arg_READ4->stateid, obj, &state_found, data, STATEID_SPECIAL_ANY, 0, false, "READ"); if (res_READ4->status != NFS4_OK) return res_READ4->status; /* NB: After this point, if state_found == NULL, then the stateid is all-0 or all-1 */ if (state_found != NULL) { struct state_deleg *sdeleg; if (info) info->io_advise = state_found->state_data.io_advise; switch (state_found->state_type) { case STATE_TYPE_SHARE: state_open = state_found; /* Note this causes an extra refcount, but it * simplifies logic below. */ inc_state_t_ref(state_open); /** * @todo FSF: need to check against existing locks */ break; case STATE_TYPE_LOCK: state_open = state_found->state_data.lock.openstate; inc_state_t_ref(state_open); /** * @todo FSF: should check that write is in * range of an byte range lock... */ break; case STATE_TYPE_DELEG: /* Check if the delegation state allows READ */ sdeleg = &state_found->state_data.deleg; if (!(sdeleg->sd_type & OPEN_DELEGATE_READ) || (sdeleg->sd_state != DELEG_GRANTED)) { /* Invalid delegation for this operation. */ LogDebug(COMPONENT_STATE, "Delegation type:%d state:%d", sdeleg->sd_type, sdeleg->sd_state); res_READ4->status = NFS4ERR_BAD_STATEID; goto out; } state_open = NULL; break; default: res_READ4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "READ with invalid statid of type %d", state_found->state_type); goto out; } /* This is a read operation, this means that the file MUST have been opened for reading */ if (state_open != NULL && (state_open->state_data.share. share_access & OPEN4_SHARE_ACCESS_READ) == 0) { /* Even if file is open for write, the client * may do accidently read operation (caching). * Because of this, READ is allowed if not * explicitly denied. See page 112 in RFC 7530 * for more details. */ if (state_open->state_data.share. share_deny & OPEN4_SHARE_DENY_READ) { /* Bad open mode, return NFS4ERR_OPENMODE */ res_READ4->status = NFS4ERR_OPENMODE; if (isDebug(COMPONENT_NFS_V4_LOCK)) { char str[LOG_BUFF_LEN] = "\0"; struct display_buffer dspbuf = { sizeof(str), str, str}; display_stateid(&dspbuf, state_found); LogDebug(COMPONENT_NFS_V4_LOCK, "READ %s doesn't have OPEN4_SHARE_ACCESS_READ", str); } goto out; } } /** * @todo : this piece of code looks a bit suspicious * (see Rong's mail) * * @todo: ACE: This works for now. How do we want to * handle owner confirmation across NFSv4.0/NFSv4.1? * Do we want to mark every NFSv4.1 owner * pre-confirmed, or make the check conditional on * minorversion like we do here? */ switch (state_found->state_type) { case STATE_TYPE_SHARE: if (data->minorversion == 0 && !state_owner_confirmed(state_found)) { res_READ4->status = NFS4ERR_BAD_STATEID; goto out; } break; case STATE_TYPE_LOCK: case STATE_TYPE_DELEG: break; default: /* Sanity check: all other types are illegal. * we should not got that place (similar check * above), anyway it costs nothing to add this * test */ res_READ4->status = NFS4ERR_BAD_STATEID; goto out; } } else { /* Special stateid, no open state, check to see if any share conflicts */ state_open = NULL; /* Special stateid, no open state, check to see if any share * conflicts The stateid is all-0 or all-1 */ bypass = arg_READ4->stateid.seqid != 0; res_READ4->status = nfs4_Errno_state( state_share_anonymous_io_start( obj, OPEN4_SHARE_ACCESS_READ, arg_READ4->stateid.seqid != 0 ? SHARE_BYPASS_READ : SHARE_BYPASS_NONE)); if (res_READ4->status != NFS4_OK) goto out; anonymous_started = true; } /* Need to permission check the read. */ fsal_status = obj->obj_ops.test_access(obj, FSAL_READ_ACCESS, NULL, NULL, true); if (fsal_status.major == ERR_FSAL_ACCESS) { /* Test for execute permission */ fsal_status = fsal_access(obj, FSAL_MODE_MASK_SET(FSAL_X_OK) | FSAL_ACE4_MASK_SET (FSAL_ACE_PERM_EXECUTE)); } if (FSAL_IS_ERROR(fsal_status)) { res_READ4->status = nfs4_Errno_status(fsal_status); goto done; } /* Get the size and offset of the read operation */ offset = arg_READ4->offset; size = arg_READ4->count; if (MaxOffsetRead < UINT64_MAX) { LogFullDebug(COMPONENT_NFS_V4, "Read offset=%" PRIu64 " size=%" PRIu64 " MaxOffSet=%" PRIu64, offset, size, MaxOffsetRead); if ((offset + size) > MaxOffsetRead) { LogEvent(COMPONENT_NFS_V4, "A client tryed to violate max file size %" PRIu64 " for exportid #%hu", MaxOffsetRead, op_ctx->ctx_export->export_id); res_READ4->status = NFS4ERR_FBIG; goto done; } } if (size > MaxRead) { /* the client asked for too much data, this should normally not happen because client will get FATTR4_MAXREAD value at mount time */ if (info == NULL || info->io_content.what != NFS4_CONTENT_HOLE) { LogFullDebug(COMPONENT_NFS_V4, "read requested size = %"PRIu64 " read allowed size = %" PRIu64, size, MaxRead); size = MaxRead; } } /* If size == 0, no I/O is to be made and everything is alright */ if (size == 0) { /* A size = 0 can not lead to EOF */ res_READ4->READ4res_u.resok4.eof = false; res_READ4->READ4res_u.resok4.data.data_len = 0; res_READ4->READ4res_u.resok4.data.data_val = NULL; res_READ4->status = NFS4_OK; goto done; } /* Some work is to be done */ bufferdata = gsh_malloc_aligned(4096, size); if (!anonymous_started && data->minorversion == 0) { owner = get_state_owner_ref(state_found); if (owner != NULL) { op_ctx->clientid = &owner->so_owner.so_nfs4_owner.so_clientid; } } if (obj->fsal->m_ops.support_ex(obj)) { /* Call the new fsal_read2 */ fsal_status = fsal_read2(obj, bypass, state_found, offset, size, &read_size, bufferdata, &eof_met, info); } else { /* Call legacy fsal_rdwr */ fsal_status = fsal_rdwr(obj, io, offset, size, &read_size, bufferdata, &eof_met, &sync, info); } if (FSAL_IS_ERROR(fsal_status)) { res_READ4->status = nfs4_Errno_status(fsal_status); gsh_free(bufferdata); res_READ4->READ4res_u.resok4.data.data_val = NULL; goto done; } if (!eof_met) { /** @todo FSF: add a config option for this behavior? */ /* Need to check against filesize for ESXi clients */ struct attrlist attrs; fsal_prepare_attrs(&attrs, ATTR_SIZE); if (!FSAL_IS_ERROR(obj->obj_ops.getattrs(obj, &attrs))) eof_met = (offset + read_size) >= attrs.filesize; /* Done with the attrs */ fsal_release_attrs(&attrs); } if (!anonymous_started && data->minorversion == 0) op_ctx->clientid = NULL; res_READ4->READ4res_u.resok4.data.data_len = read_size; res_READ4->READ4res_u.resok4.data.data_val = bufferdata; LogFullDebug(COMPONENT_NFS_V4, "NFS4_OP_READ: offset = %" PRIu64 " read length = %zu eof=%u", offset, read_size, eof_met); /* Is EOF met or not ? */ res_READ4->READ4res_u.resok4.eof = eof_met; /* Say it is ok */ res_READ4->status = NFS4_OK; done: if (anonymous_started) state_share_anonymous_io_done(obj, OPEN4_SHARE_ACCESS_READ); server_stats_io_done(size, read_size, (res_READ4->status == NFS4_OK) ? true : false, false); out: if (owner != NULL) dec_state_owner_ref(owner); if (state_found != NULL) dec_state_t_ref(state_found); if (state_open != NULL) dec_state_t_ref(state_open); return res_READ4->status; } /* nfs4_op_read */
static int nfs4_write(struct nfs_argop4 *op, compound_data_t *data, struct nfs_resop4 *resp, fsal_io_direction_t io, struct io_info *info) { WRITE4args * const arg_WRITE4 = &op->nfs_argop4_u.opwrite; WRITE4res * const res_WRITE4 = &resp->nfs_resop4_u.opwrite; uint64_t size = 0; size_t written_size = 0; uint64_t offset; bool eof_met; bool sync = false; void *bufferdata; stable_how4 stable_how; state_t *state_found = NULL; state_t *state_open = NULL; fsal_status_t fsal_status = {0, 0}; struct fsal_obj_handle *obj = NULL; bool anonymous_started = false; struct gsh_buffdesc verf_desc; state_owner_t *owner = NULL; uint64_t MaxWrite = atomic_fetch_uint64_t(&op_ctx->ctx_export->MaxWrite); uint64_t MaxOffsetWrite = atomic_fetch_uint64_t(&op_ctx->ctx_export->MaxOffsetWrite); /* Lock are not supported */ resp->resop = NFS4_OP_WRITE; res_WRITE4->status = NFS4_OK; if ((data->minorversion > 0) && (nfs4_Is_Fh_DSHandle(&data->currentFH))) { if (io == FSAL_IO_WRITE) return op_dswrite(op, data, resp); else return op_dswrite_plus(op, data, resp, info); } /* * Do basic checks on a filehandle * Only files can be written */ res_WRITE4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, true); if (res_WRITE4->status != NFS4_OK) return res_WRITE4->status; /* if quota support is active, then we should check is the FSAL allows inode creation or not */ fsal_status = op_ctx->fsal_export->exp_ops.check_quota( op_ctx->fsal_export, op_ctx->ctx_export->fullpath, FSAL_QUOTA_INODES); if (FSAL_IS_ERROR(fsal_status)) { res_WRITE4->status = NFS4ERR_DQUOT; return res_WRITE4->status; } /* vnode to manage is the current one */ obj = data->current_obj; /* Check stateid correctness and get pointer to state * (also checks for special stateids) */ res_WRITE4->status = nfs4_Check_Stateid(&arg_WRITE4->stateid, obj, &state_found, data, STATEID_SPECIAL_ANY, 0, false, "WRITE"); if (res_WRITE4->status != NFS4_OK) return res_WRITE4->status; /* NB: After this points, if state_found == NULL, then * the stateid is all-0 or all-1 */ if (state_found != NULL) { struct state_deleg *sdeleg; if (info) info->io_advise = state_found->state_data.io_advise; switch (state_found->state_type) { case STATE_TYPE_SHARE: state_open = state_found; /* Note this causes an extra refcount, but it * simplifies logic below. */ inc_state_t_ref(state_open); /** @todo FSF: need to check against existing locks */ break; case STATE_TYPE_LOCK: state_open = state_found->state_data.lock.openstate; inc_state_t_ref(state_open); /** * @todo FSF: should check that write is in range of an * exclusive lock... */ break; case STATE_TYPE_DELEG: /* Check if the delegation state allows READ */ sdeleg = &state_found->state_data.deleg; if (!(sdeleg->sd_type & OPEN_DELEGATE_WRITE) || (sdeleg->sd_state != DELEG_GRANTED)) { /* Invalid delegation for this operation. */ LogDebug(COMPONENT_STATE, "Delegation type:%d state:%d", sdeleg->sd_type, sdeleg->sd_state); res_WRITE4->status = NFS4ERR_BAD_STATEID; return res_WRITE4->status; } state_open = NULL; break; case STATE_TYPE_LAYOUT: state_open = NULL; break; default: res_WRITE4->status = NFS4ERR_BAD_STATEID; LogDebug(COMPONENT_NFS_V4_LOCK, "WRITE with invalid stateid of type %d", (int)state_found->state_type); return res_WRITE4->status; } /* This is a write operation, this means that the file * MUST have been opened for writing */ if (state_open != NULL && (state_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0) { /* Bad open mode, return NFS4ERR_OPENMODE */ res_WRITE4->status = NFS4ERR_OPENMODE; if (isDebug(COMPONENT_NFS_V4_LOCK)) { char str[LOG_BUFF_LEN] = "\0"; struct display_buffer dspbuf = { sizeof(str), str, str}; display_stateid(&dspbuf, state_found); LogDebug(COMPONENT_NFS_V4_LOCK, "WRITE %s doesn't have OPEN4_SHARE_ACCESS_WRITE", str); } goto out; } } else { /* Special stateid, no open state, check to see if any * share conflicts */ state_open = NULL; /* Special stateid, no open state, check to see if any share * conflicts The stateid is all-0 or all-1 */ res_WRITE4->status = nfs4_Errno_state( state_share_anonymous_io_start( obj, OPEN4_SHARE_ACCESS_WRITE, SHARE_BYPASS_NONE)); if (res_WRITE4->status != NFS4_OK) goto out; anonymous_started = true; } /* Need to permission check the write. */ fsal_status = obj->obj_ops.test_access(obj, FSAL_WRITE_ACCESS, NULL, NULL, true); if (FSAL_IS_ERROR(fsal_status)) { res_WRITE4->status = nfs4_Errno_status(fsal_status); goto done; } /* Get the characteristics of the I/O to be made */ offset = arg_WRITE4->offset; size = arg_WRITE4->data.data_len; stable_how = arg_WRITE4->stable; LogFullDebug(COMPONENT_NFS_V4, "offset = %" PRIu64 " length = %" PRIu64 " stable = %d", offset, size, stable_how); if (MaxOffsetWrite < UINT64_MAX) { LogFullDebug(COMPONENT_NFS_V4, "Write offset=%" PRIu64 " count=%" PRIu64 " MaxOffSet=%" PRIu64, offset, size, MaxOffsetWrite); if ((offset + size) > MaxOffsetWrite) { LogEvent(COMPONENT_NFS_V4, "A client tryed to violate max file size %" PRIu64 " for exportid #%hu", MaxOffsetWrite, op_ctx->ctx_export->export_id); res_WRITE4->status = NFS4ERR_FBIG; goto done; } } if (size > MaxWrite) { /* * The client asked for too much data, we * must restrict him */ if (info == NULL || info->io_content.what != NFS4_CONTENT_HOLE) { LogFullDebug(COMPONENT_NFS_V4, "write requested size = %" PRIu64 " write allowed size = %" PRIu64, size, MaxWrite); size = MaxWrite; } } /* Where are the data ? */ bufferdata = arg_WRITE4->data.data_val; LogFullDebug(COMPONENT_NFS_V4, "offset = %" PRIu64 " length = %" PRIu64, offset, size); /* if size == 0 , no I/O) are actually made and everything is alright */ if (size == 0) { res_WRITE4->WRITE4res_u.resok4.count = 0; res_WRITE4->WRITE4res_u.resok4.committed = FILE_SYNC4; verf_desc.addr = res_WRITE4->WRITE4res_u.resok4.writeverf; verf_desc.len = sizeof(verifier4); op_ctx->fsal_export->exp_ops.get_write_verifier( op_ctx->fsal_export, &verf_desc); res_WRITE4->status = NFS4_OK; goto done; } if (arg_WRITE4->stable == UNSTABLE4) sync = false; else sync = true; if (!anonymous_started && data->minorversion == 0) { owner = get_state_owner_ref(state_found); if (owner != NULL) { op_ctx->clientid = &owner->so_owner.so_nfs4_owner.so_clientid; } } if (obj->fsal->m_ops.support_ex(obj)) { /* Call the new fsal_write */ fsal_status = fsal_write2(obj, false, state_found, offset, size, &written_size, bufferdata, &sync, info); } else { /* Call legacy fsal_rdwr */ fsal_status = fsal_rdwr(obj, io, offset, size, &written_size, bufferdata, &eof_met, &sync, info); } if (FSAL_IS_ERROR(fsal_status)) { LogDebug(COMPONENT_NFS_V4, "write returned %s", fsal_err_txt(fsal_status)); res_WRITE4->status = nfs4_Errno_status(fsal_status); goto done; } if (!anonymous_started && data->minorversion == 0) op_ctx->clientid = NULL; /* Set the returned value */ if (sync) res_WRITE4->WRITE4res_u.resok4.committed = FILE_SYNC4; else res_WRITE4->WRITE4res_u.resok4.committed = UNSTABLE4; res_WRITE4->WRITE4res_u.resok4.count = written_size; verf_desc.addr = res_WRITE4->WRITE4res_u.resok4.writeverf; verf_desc.len = sizeof(verifier4); op_ctx->fsal_export->exp_ops.get_write_verifier(op_ctx->fsal_export, &verf_desc); res_WRITE4->status = NFS4_OK; done: if (anonymous_started) state_share_anonymous_io_done(obj, OPEN4_SHARE_ACCESS_WRITE); server_stats_io_done(size, written_size, (res_WRITE4->status == NFS4_OK) ? true : false, true); out: if (owner != NULL) dec_state_owner_ref(owner); if (state_found != NULL) dec_state_t_ref(state_found); if (state_open != NULL) dec_state_t_ref(state_open); return res_WRITE4->status; } /* nfs4_op_write */