static fsal_status_t removexattrs(struct fsal_obj_handle *obj_hdl, xattrname4 *xa_name) { int rc; int errsv; struct removexattr_arg rxarg; struct gpfs_fsal_obj_handle *myself; struct gpfs_filesystem *gpfs_fs = obj_hdl->fs->private_data; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); rxarg.mountdirfd = gpfs_fs->root_fd; rxarg.handle = myself->handle; rxarg.name_len = xa_name->utf8string_len; rxarg.name = xa_name->utf8string_val; rc = gpfs_ganesha(OPENHANDLE_REMOVEXATTRS, &rxarg); if (rc < 0) { errsv = errno; LogDebug(COMPONENT_FSAL, "REMOVEXATTRS returned rc %d errsv %d", rc, errsv); return fsalstat(posix2fsal_error(errsv), errsv); } return fsalstat(ERR_FSAL_NO_ERROR, 0); }
/** * GPFSFSAL_share_op: */ fsal_status_t GPFSFSAL_share_op(int mntfd, /* IN */ int fd, /* IN */ void *p_owner, /* IN */ fsal_share_param_t request_share) { /* IN */ int rc = 0; struct share_reserve_arg share_arg; LogFullDebug(COMPONENT_FSAL, "Share reservation: access:%u deny:%u owner:%p", request_share.share_access, request_share.share_deny, p_owner); share_arg.mountdirfd = mntfd; share_arg.openfd = fd; share_arg.share_access = request_share.share_access; share_arg.share_deny = request_share.share_deny; rc = gpfs_ganesha(OPENHANDLE_SHARE_RESERVE, &share_arg); if (rc < 0) { LogDebug(COMPONENT_FSAL, "gpfs_ganesha: OPENHANDLE_SHARE_RESERVE returned error, rc=%d, errno=%d", rc, errno); return fsalstat(posix2fsal_error(errno), errno); } return fsalstat(ERR_FSAL_NO_ERROR, 0); }
int fsal_get_xstat_by_handle(fsal_op_context_t * p_context, fsal_handle_t * p_handle, gpfsfsal_xstat_t *p_buffxstat){ int rc; int dirfd = 0; struct xstat_arg xstatarg; if(!p_handle || !p_context || !p_context->export_context || !p_buffxstat) return EFAULT; memset(p_buffxstat, 0, sizeof(gpfsfsal_xstat_t)); dirfd = ((gpfsfsal_op_context_t *)p_context)->export_context->mount_root_fd; xstatarg.attr_valid = XATTR_STAT; xstatarg.mountdirfd = dirfd; xstatarg.handle = (struct gpfs_file_handle *) &((gpfsfsal_handle_t *)p_handle)->data.handle; xstatarg.acl = NULL; xstatarg.attr_changed = 0; xstatarg.buf = &p_buffxstat->buffstat; printf("Getattr by handle ...\n"); rc = gpfs_ganesha(OPENHANDLE_GET_XSTAT, &xstatarg); printf("gpfs_ganesha: GET_XSTAT returned, rc = %d\n", rc); if(rc < 0) { printf("fsal_get_xstat_by_handle returned errno:%d -- %s\n", errno, strerror(errno)); return errno; } return 0; }
fsal_status_t gpfs_io_advise(struct fsal_obj_handle *obj_hdl, struct io_hints *hints) { struct fadvise_arg arg; struct gpfs_fsal_obj_handle *myself; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); arg.mountdirfd = myself->u.file.fd; arg.openfd = myself->u.file.fd; arg.offset = hints->offset; arg.length = hints->count; arg.hints = &hints->hints; retval = gpfs_ganesha(OPENHANDLE_FADVISE_BY_FD, &arg); if (retval == -1) { retval = errno; if (retval == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); fsal_error = posix2fsal_error(retval); hints->hints = 0; } return fsalstat(fsal_error, 0); }
int fsal_internal_get_handle_at(int dfd, fsal_name_t * p_fsalname, fsal_handle_t * p_handle) { int rc; struct name_handle_arg harg; if(!p_handle || !p_fsalname) return EFAULT; memset(p_handle, 0, sizeof(*p_handle)); harg.handle = (struct gpfs_file_handle *) &((gpfsfsal_handle_t *)p_handle)->data.handle; harg.handle->handle_size = OPENHANDLE_HANDLE_LEN; harg.handle->handle_key_size = OPENHANDLE_KEY_LEN; harg.name = p_fsalname->name; harg.dfd = dfd; harg.flag = 0; printf("Lookup handle at for %s\n", p_fsalname->name); rc = gpfs_ganesha(OPENHANDLE_NAME_TO_HANDLE, &harg); if(rc < 0) return errno; return 0; }
/** * @brief Get layout types supported by export * * We just return a pointer to the single type and set the count to 1. * * @param[in] export_pub Public export handle * @param[out] count Number of layout types in array * @param[out] types Static array of layout types that must not be * freed or modified and must not be dereferenced * after export reference is relinquished */ static void fs_layouttypes(struct fsal_export *export_hdl, int32_t *count, const layouttype4 **types) { int rc; struct open_arg arg; static const layouttype4 supported_layout_type = LAYOUT4_NFSV4_1_FILES; struct gpfs_filesystem *gpfs_fs; struct gpfs_fsal_export *myself; int errsv = 0; /** @todo FSF: needs real getdeviceinfo that gets to the correct * filesystem, this will not work for sub-mounted filesystems. */ myself = container_of(export_hdl, struct gpfs_fsal_export, export); gpfs_fs = myself->root_fs->private_data; arg.mountdirfd = gpfs_fs->root_fd; rc = gpfs_ganesha(OPENHANDLE_LAYOUT_TYPE, &arg); errsv = errno; if (rc < 0 || (rc != LAYOUT4_NFSV4_1_FILES)) { LogDebug(COMPONENT_PNFS, "fs_layouttypes rc %d", rc); if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); *count = 0; return; } *types = &supported_layout_type; *count = 1; }
fsal_status_t gpfs_commit(struct fsal_obj_handle *obj_hdl, /* sync */ off_t offset, size_t len) { struct fsync_arg arg; verifier4 writeverf; struct gpfs_fsal_obj_handle *myself; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; int retval = 0; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); arg.mountdirfd = myself->u.file.fd; arg.handle = myself->handle; arg.offset = offset; arg.length = len; arg.verifier4 = (int32_t *) &writeverf; retval = gpfs_ganesha(OPENHANDLE_FSYNC, &arg); if (retval == -1) { retval = errno; if (retval == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); fsal_error = posix2fsal_error(retval); } set_gpfs_verifier(&writeverf); return fsalstat(fsal_error, retval); }
/** * @brief Commit a segment of a layout * * Update the size and time for a file accessed through a layout. * * @param[in] obj_pub Public object handle * @param[in] req_ctx Request context * @param[in] lou_body An XDR stream containing the layout * type-specific portion of the LAYOUTCOMMIT * arguments. * @param[in] arg Input arguments of the function * @param[in,out] res In/out and output arguments of the function * * @return Valid error codes in RFC 5661, p. 366. */ static nfsstat4 layoutcommit(struct fsal_obj_handle *obj_hdl, struct req_op_context *req_ctx, XDR *lou_body, const struct fsal_layoutcommit_arg *arg, struct fsal_layoutcommit_res *res) { struct gpfs_fsal_obj_handle *myself; /* The private 'full' object handle */ struct gpfs_file_handle *gpfs_handle; int rc = 0; struct layoutcommit_arg targ; int errsv = 0; struct gpfs_filesystem *gpfs_fs = obj_hdl->fs->private_data; /* Sanity check on type */ if (arg->type != LAYOUT4_NFSV4_1_FILES) { LogCrit(COMPONENT_PNFS, "Unsupported layout type: %x", arg->type); return NFS4ERR_UNKNOWN_LAYOUTTYPE; } myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); gpfs_handle = myself->handle; targ.mountdirfd = gpfs_fs->root_fd; targ.handle = gpfs_handle; targ.xdr = NULL; targ.offset = arg->segment.offset; targ.length = arg->segment.length; targ.reclaim = arg->reclaim; /* True if this is a reclaim commit */ targ.new_offset = arg->new_offset; /* True if the client has suggested a new offset */ if (arg->new_offset) targ.last_write = arg->last_write; /* The offset of the last byte written */ targ.time_changed = arg->time_changed; /*True if provided a new mtime*/ if (arg->time_changed) { targ.new_time.t_sec = arg->new_time.seconds; targ.new_time.t_nsec = arg->new_time.nseconds; } rc = gpfs_ganesha(OPENHANDLE_LAYOUT_COMMIT, &targ); errsv = errno; if (rc != 0) { LogDebug(COMPONENT_PNFS, "GPFSFSAL_layoutcommit rc %d", rc); if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); return posix2nfs4_error(-rc); } res->size_supplied = false; res->commit_done = true; return NFS4_OK; }
/** * @brief Describe a GPFS striping pattern * * At present, we support a files based layout only. The CRUSH * striping pattern is a-periodic * * @param[in] export_pub Public export handle * @param[out] da_addr_body Stream we write the result to * @param[in] type Type of layout that gave the device * @param[in] deviceid The device to look up * * @return Valid error codes in RFC 5661, p. 365. */ nfsstat4 getdeviceinfo(struct fsal_module *fsal_hdl, XDR *da_addr_body, const layouttype4 type, const struct pnfs_deviceid *deviceid) { struct deviceinfo_arg darg; /* The position before any bytes are sent to the stream */ size_t da_beginning; size_t ds_buffer; /* The total length of the XDR-encoded da_addr_body */ size_t da_length; int rc; int errsv; darg.mountdirfd = deviceid->device_id4; darg.type = LAYOUT4_NFSV4_1_FILES; darg.devid.devid = deviceid->devid; darg.devid.device_id1 = deviceid->device_id1; darg.devid.device_id2 = deviceid->device_id2; darg.devid.device_id4 = deviceid->device_id4; darg.devid.devid = deviceid->devid; da_beginning = xdr_getpos(da_addr_body); darg.xdr.p = xdr_inline(da_addr_body, 0); ds_buffer = da_addr_body->x_handy; /* xdr_size_inline(da_addr_body); */ darg.xdr.end = (int *)(darg.xdr.p + ((ds_buffer - da_beginning) / BYTES_PER_XDR_UNIT)); LogDebug(COMPONENT_PNFS, "getdeviceinfo p %p end %p da_length %zu ds_buffer %zu seq %d fd %d fsid 0x%" PRIx64, darg.xdr.p, darg.xdr.end, da_beginning, ds_buffer, deviceid->device_id2, deviceid->device_id4, deviceid->devid); rc = gpfs_ganesha(OPENHANDLE_GET_DEVICEINFO, &darg); errsv = errno; if (rc < 0) { LogDebug(COMPONENT_PNFS, "getdeviceinfo rc %d", rc); if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); return NFS4ERR_RESOURCE; } (void)xdr_inline(da_addr_body, rc); da_length = xdr_getpos(da_addr_body) - da_beginning; LogDebug(COMPONENT_PNFS, "getdeviceinfo rc %d da_length %zd", rc, da_length); return NFS4_OK; }
/** * @brief Potentially return one layout segment * * Since we don't make any reservations, in this version, or get any * pins to release, always succeed * * @param[in] obj_pub Public object handle * @param[in] req_ctx Request context * @param[in] lrf_body Nothing for us * @param[in] arg Input arguments of the function * * @return Valid error codes in RFC 5661, p. 367. */ static nfsstat4 layoutreturn(struct fsal_obj_handle *obj_hdl, struct req_op_context *req_ctx, XDR *lrf_body, const struct fsal_layoutreturn_arg *arg) { struct layoutreturn_arg larg; struct gpfs_fsal_obj_handle *myself; /* The private 'full' object handle */ struct gpfs_file_handle *gpfs_handle; int errsv = 0; struct gpfs_filesystem *gpfs_fs = obj_hdl->fs->private_data; int rc = 0; /* Sanity check on type */ if (arg->lo_type != LAYOUT4_NFSV4_1_FILES) { LogCrit(COMPONENT_PNFS, "Unsupported layout type: %x", arg->lo_type); return NFS4ERR_UNKNOWN_LAYOUTTYPE; } myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); gpfs_handle = myself->handle; if (arg->dispose) { larg.mountdirfd = gpfs_fs->root_fd; larg.handle = gpfs_handle; larg.args.lr_return_type = arg->lo_type; larg.args.lr_reclaim = (arg->circumstance == circumstance_reclaim); larg.args.lr_seg.clientid = 0; larg.args.lr_seg.layout_type = arg->lo_type; larg.args.lr_seg.iomode = arg->spec_segment.io_mode; larg.args.lr_seg.offset = arg->spec_segment.offset; larg.args.lr_seg.length = arg->spec_segment.length; rc = gpfs_ganesha(OPENHANDLE_LAYOUT_RETURN, &larg); errsv = errno; if (rc != 0) { LogDebug(COMPONENT_PNFS, "GPFSFSAL_layoutreturn rc %d", rc); if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); return NFS4ERR_NOMATCHING_LAYOUT; } } return NFS4_OK; }
/** * @brief Read from a data-server handle. * * NFSv4.1 data server handles are disjount from normal * filehandles (in Ganesha, there is a ds_flag in the filehandle_v4_t * structure) and do not get loaded into cache_inode or processed the * normal way. * * @param[in] ds_pub FSAL DS handle * @param[in] req_ctx Credentials * @param[in] stateid The stateid supplied with the READ operation, * for validation * @param[in] offset The offset at which to read * @param[in] requested_length Length of read requested (and size of buffer) * @param[out] buffer The buffer to which to store read data * @param[out] supplied_length Length of data read * @param[out] eof True on end of file * * @return An NFSv4.1 status code. */ static nfsstat4 ds_read(struct fsal_ds_handle *const ds_pub, struct req_op_context *const req_ctx, const stateid4 *stateid, const offset4 offset, const count4 requested_length, void *const buffer, count4 * const supplied_length, bool * const end_of_file) { /* The private 'full' DS handle */ struct gpfs_ds *ds = container_of(ds_pub, struct gpfs_ds, ds); struct gpfs_file_handle *gpfs_handle = &ds->wire; /* The amount actually read */ int amount_read = 0; struct dsread_arg rarg; unsigned int *fh; int errsv = 0; fh = (int *)&(gpfs_handle->f_handle); rarg.mountdirfd = ds->gpfs_fs->root_fd; rarg.handle = gpfs_handle; rarg.bufP = buffer; rarg.offset = offset; rarg.length = requested_length; rarg.options = 0; LogDebug(COMPONENT_PNFS, "fh len %d type %d key %d: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", gpfs_handle->handle_size, gpfs_handle->handle_type, gpfs_handle->handle_key_size, fh[0], fh[1], fh[2], fh[3], fh[4], fh[5], fh[6], fh[7], fh[8], fh[9]); amount_read = gpfs_ganesha(OPENHANDLE_DS_READ, &rarg); errsv = errno; if (amount_read < 0) { if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); return posix2nfs4_error(errsv); } *supplied_length = amount_read; if (amount_read == 0 || amount_read < requested_length) *end_of_file = true; return NFS4_OK; }
/** @fn fsal_status_t * GPFSFSAL_read(int fd, uint64_t offset, size_t buffer_size, caddr_t buffer, * size_t *p_read_amount, bool *p_end_of_file) * @brief Perform a read operation on an opened file. * @param fd The file descriptor returned by FSAL_open. * @offset Offset * @param buf_size Amount (in bytes) of data to be read. * @param buf Address where the read data is to be stored in memory. * @param read_amount Pointer to the amount of data (in bytes) that have been read * during this call. * @param end_of_file Pointer to a boolean that indicates whether the end of file * has been reached during this call. * * @return ERR_FSAL_NO_ERROR on success, error otherwise. */ fsal_status_t GPFSFSAL_read(int fd, uint64_t offset, size_t buf_size, caddr_t buf, size_t *read_amount, bool *end_of_file, int expfd) { struct read_arg rarg = {0}; ssize_t nb_read; int errsv; /* sanity checks. */ if (!buf || !read_amount || !end_of_file) return fsalstat(ERR_FSAL_FAULT, 0); rarg.mountdirfd = expfd; rarg.fd = fd; rarg.bufP = buf; rarg.offset = offset; rarg.length = buf_size; rarg.options = 0; fsal_set_credentials(op_ctx->creds); nb_read = gpfs_ganesha(OPENHANDLE_READ_BY_FD, &rarg); errsv = errno; fsal_restore_ganesha_credentials(); /* negative values mean error */ if (nb_read < 0) { /* if nb_read is not -1, the split rc/errno didn't work */ if (nb_read != -1) { errsv = labs(nb_read); LogWarn(COMPONENT_FSAL, "Received negative value (%d) from ioctl().", (int) nb_read); } if (errsv == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); return fsalstat(posix2fsal_error(errsv), errsv); } if (nb_read == 0 || nb_read < buf_size) *end_of_file = true; *read_amount = nb_read; return fsalstat(ERR_FSAL_NO_ERROR, 0); }
int fsal_internal_get_handle(fsal_op_context_t * p_context, fsal_path_t * p_fsalpath, fsal_handle_t * p_handle) { int rc; struct name_handle_arg harg; if(!p_context || !p_handle || !p_fsalpath) return -1; harg.handle = (struct gpfs_file_handle *) &((gpfsfsal_handle_t *)p_handle)->data.handle; harg.handle->handle_size = OPENHANDLE_HANDLE_LEN; harg.name = p_fsalpath->path; harg.dfd = AT_FDCWD; harg.flag = 0; printf("Lookup handle for %s ...\n",p_fsalpath->path); rc = gpfs_ganesha(OPENHANDLE_NAME_TO_HANDLE, &harg); return rc; }
/** @fn fsal_status_t * GPFSFSAL_write(int fd, uint64_t offset, size_t buf_size, caddr_t buf, * size_t *write_amount, bool *fsal_stable, * const struct req_op_context *op_ctx) * @brief Perform a write operation on an opened file. * * @param fd The file descriptor returned by FSAL_open. * @param buf_size Amount (in bytes) of data to be written. * @param buf Address where the data is in memory. * @param write_amount Pointer to the amount of data (in bytes) that have been written * during this call. * * @return ERR_FSAL_NO_ERROR on success, error otherwise */ fsal_status_t GPFSFSAL_write(int fd, uint64_t offset, size_t buf_size, caddr_t buf, size_t *write_amount, bool *fsal_stable, const struct req_op_context *op_ctx, int expfd) { struct write_arg warg = {0}; uint32_t stability_got = 0; ssize_t nb_write; int errsv; /* sanity checks. */ if (!buf || !write_amount) return fsalstat(ERR_FSAL_FAULT, 0); warg.mountdirfd = expfd; warg.fd = fd; warg.bufP = buf; warg.offset = offset; warg.length = buf_size; warg.stability_wanted = *fsal_stable; warg.stability_got = &stability_got; warg.options = 0; fsal_set_credentials(op_ctx->creds); nb_write = gpfs_ganesha(OPENHANDLE_WRITE_BY_FD, &warg); errsv = errno; fsal_restore_ganesha_credentials(); if (nb_write == -1) { if (errsv == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); return fsalstat(posix2fsal_error(errsv), errsv); } *write_amount = nb_write; *fsal_stable = (stability_got) ? true : false; return fsalstat(ERR_FSAL_NO_ERROR, 0); }
static fsal_status_t getxattrs(struct fsal_obj_handle *obj_hdl, xattrname4 *xa_name, xattrvalue4 *xa_value) { int rc; int errsv; struct getxattr_arg gxarg; struct gpfs_fsal_obj_handle *myself; struct gpfs_filesystem *gpfs_fs = obj_hdl->fs->private_data; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); gxarg.mountdirfd = gpfs_fs->root_fd; gxarg.handle = myself->handle; gxarg.name_len = xa_name->utf8string_len; gxarg.name = xa_name->utf8string_val; gxarg.value_len = xa_value->utf8string_len; gxarg.value = xa_value->utf8string_val; rc = gpfs_ganesha(OPENHANDLE_GETXATTRS, &gxarg); if (rc < 0) { errsv = errno; LogDebug(COMPONENT_FSAL, "GETXATTRS returned rc %d errsv %d", rc, errsv); if (errsv == ERANGE) return fsalstat(ERR_FSAL_TOOSMALL, 0); if (errsv == ENODATA) return fsalstat(ERR_FSAL_NOENT, 0); return fsalstat(posix2fsal_error(errsv), errsv); } LogDebug(COMPONENT_FSAL, "GETXATTRS returned value %.*s len %d rc %d", gxarg.value_len, (char *)gxarg.value, gxarg.value_len, rc); xa_value->utf8string_len = gxarg.value_len; return fsalstat(ERR_FSAL_NO_ERROR, 0); }
fsal_status_t gpfs_seek(struct fsal_obj_handle *obj_hdl, struct io_info *info) { struct fseek_arg arg; struct gpfs_fsal_obj_handle *myself; fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR; struct gpfs_io_info io_info; int retval = 0; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); arg.mountdirfd = myself->u.file.fd; arg.openfd = myself->u.file.fd; arg.info = &io_info; io_info.io_offset = info->io_content.hole.di_offset; if (info->io_content.what == NFS4_CONTENT_DATA) io_info.io_what = SEEK_DATA; else if (info->io_content.what == NFS4_CONTENT_HOLE) io_info.io_what = SEEK_HOLE; else return fsalstat(ERR_FSAL_UNION_NOTSUPP, 0); retval = gpfs_ganesha(OPENHANDLE_SEEK_BY_FD, &arg); if (retval == -1) { retval = errno; if (retval == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); fsal_error = posix2fsal_error(retval); } else { info->io_eof = io_info.io_eof; info->io_content.hole.di_offset = io_info.io_offset; info->io_content.hole.di_length = io_info.io_len; } return fsalstat(fsal_error, 0); }
/** @fn fsal_status_t * GPFSFSAL_alloc(int fd, uint64_t offset, uint64_t length, bool allocate) * @brief Perform a de/allocc operation on an opened file. * @param fd The file descriptor returned by FSAL_open. * @param offset Offset * @param length Length * @param allocate Allocate * * @return ERR_FSAL_NO_ERROR on success, error otherwise */ fsal_status_t GPFSFSAL_alloc(int fd, uint64_t offset, uint64_t length, bool allocate) { struct alloc_arg aarg = {0}; int errsv; int rc; aarg.fd = fd; aarg.offset = offset; aarg.length = length; aarg.options = (allocate) ? IO_ALLOCATE : IO_DEALLOCATE; fsal_set_credentials(op_ctx->creds); rc = gpfs_ganesha(OPENHANDLE_ALLOCATE_BY_FD, &aarg); errsv = errno; fsal_restore_ganesha_credentials(); if (rc == -1) { if (errsv == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); return fsalstat(posix2fsal_error(errsv), errsv); } return fsalstat(ERR_FSAL_NO_ERROR, 0); }
static fsal_status_t listxattrs(struct fsal_obj_handle *obj_hdl, count4 la_maxcount, nfs_cookie4 *la_cookie, verifier4 *la_cookieverf, bool_t *lr_eof, xattrlist4 *lr_names) { int rc; int errsv; char *name, *next, *end, *val, *valstart; int entryCount = 0; char *buf = NULL; struct listxattr_arg lxarg; struct gpfs_fsal_obj_handle *myself; struct gpfs_filesystem *gpfs_fs = obj_hdl->fs->private_data; component4 *entry = lr_names->entries; val = (char *)entry + la_maxcount; valstart = val; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); #define MAXCOUNT (1024*64) buf = gsh_malloc(MAXCOUNT); lxarg.mountdirfd = gpfs_fs->root_fd; lxarg.handle = myself->handle; lxarg.cookie = 0; /* For now gpfs doesn't support cookie */ lxarg.verifier = *((uint64_t *)la_cookieverf); lxarg.eof = false; lxarg.name_len = MAXCOUNT; lxarg.names = buf; LogFullDebug(COMPONENT_FSAL, "in cookie %llu len %d cookieverf %llx", (unsigned long long)lxarg.cookie, la_maxcount, (unsigned long long)lxarg.verifier); rc = gpfs_ganesha(OPENHANDLE_LISTXATTRS, &lxarg); if (rc < 0) { errsv = errno; LogDebug(COMPONENT_FSAL, "LISTXATTRS returned rc %d errsv %d", rc, errsv); gsh_free(buf); if (errsv == ERANGE) return fsalstat(ERR_FSAL_TOOSMALL, 0); return fsalstat(posix2fsal_error(errsv), errsv); } if (!lxarg.eof) { errsv = ERR_FSAL_SERVERFAULT; LogCrit(COMPONENT_FSAL, "Unable to get xattr."); return fsalstat(posix2fsal_error(errsv), errsv); } /* Only return names that the caller can read via getxattr */ name = buf; end = buf + rc; entry->utf8string_len = 0; entry->utf8string_val = NULL; while (name < end) { next = strchr(name, '\0'); next += 1; LogDebug(COMPONENT_FSAL, "nameP %s at offset %ld", name, (next - name)); if (entryCount >= *la_cookie) { if ((((char *)entry - (char *)lr_names->entries) + sizeof(component4) > la_maxcount) || ((val - valstart)+(next - name) > la_maxcount)) { gsh_free(buf); *lr_eof = false; lr_names->entryCount = entryCount - *la_cookie; *la_cookie += entryCount; LogFullDebug(COMPONENT_FSAL, "out1 cookie %llu off %ld eof %d cookieverf %llx", (unsigned long long)*la_cookie, (next - name), *lr_eof, (unsigned long long)* ((uint64_t *)la_cookieverf)); if (lr_names->entryCount == 0) return fsalstat(ERR_FSAL_TOOSMALL, 0); return fsalstat(ERR_FSAL_NO_ERROR, 0); } entry->utf8string_len = next - name; entry->utf8string_val = val; memcpy(entry->utf8string_val, name, entry->utf8string_len); LogFullDebug(COMPONENT_FSAL, "entry %d val %p at %p len %d at %p name %s", entryCount, val, entry, entry->utf8string_len, entry->utf8string_val, entry->utf8string_val); val += entry->utf8string_len; entry += 1; } /* Advance to next name in original buffer */ name = next; entryCount += 1; } lr_names->entryCount = entryCount - *la_cookie; *la_cookie = 0; *lr_eof = true; gsh_free(buf); LogFullDebug(COMPONENT_FSAL, "out2 cookie %llu eof %d cookieverf %llx", (unsigned long long)*la_cookie, *lr_eof, (unsigned long long)*((uint64_t *)la_cookieverf)); return fsalstat(ERR_FSAL_NO_ERROR, 0); }
fsal_status_t gpfs_read_plus(struct fsal_obj_handle *obj_hdl, uint64_t offset, size_t buffer_size, void *buffer, size_t *read_amount, bool *end_of_file, struct io_info *info) { struct gpfs_fsal_obj_handle *myself; fsal_status_t status = { ERR_FSAL_NO_ERROR, 0 }; struct read_arg rarg; ssize_t nb_read; int errsv = 0; if (!buffer || !read_amount || !end_of_file || !info) return fsalstat(ERR_FSAL_FAULT, 0); myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); assert(myself->u.file.fd >= 0 && myself->u.file.openflags != FSAL_O_CLOSED); rarg.mountdirfd = myself->u.file.fd; rarg.fd = myself->u.file.fd; rarg.bufP = buffer; rarg.offset = offset; rarg.length = buffer_size; rarg.options = IO_SKIP_HOLE; nb_read = gpfs_ganesha(OPENHANDLE_READ_BY_FD, &rarg); errsv = errno; if (nb_read < 0) { if (errsv == EUNATCH) LogFatal(COMPONENT_FSAL, "GPFS Returned EUNATCH"); if (errsv != ENODATA) return fsalstat(posix2fsal_error(errsv), errsv); /* errsv == ENODATA */ info->io_content.what = NFS4_CONTENT_HOLE; info->io_content.hole.di_offset = offset; /*offset of hole*/ info->io_content.hole.di_length = buffer_size;/*length of hole*/ *read_amount = buffer_size; if ((buffer_size + offset) > myself->attributes.filesize) { if (offset > myself->attributes.filesize) *read_amount = 0; else *read_amount = myself->attributes.filesize - offset; info->io_content.hole.di_length = *read_amount; } } else { info->io_content.what = NFS4_CONTENT_DATA; info->io_content.data.d_offset = offset + nb_read; info->io_content.data.d_data.data_len = nb_read; info->io_content.data.d_data.data_val = buffer; *read_amount = nb_read; } if (nb_read != -1 && (nb_read == 0 || nb_read < buffer_size || ((offset + nb_read) >= myself->attributes.filesize))) *end_of_file = true; else *end_of_file = false; return status; }
/** * GPFSFSAL_lock_op: * Lock/unlock/test an owner independent (anonymous) lock for a region in a file. * * \param p_file_descriptor (input): * File descriptor of the file to lock. * \param p_filehandle (input): * File handle of the file to lock. * \param p_context (input): * Context * \param p_owner (input): * Owner for the requested lock; Opaque to FSAL. * \param lock_op (input): * Can be either FSAL_OP_LOCKT, FSAL_OP_LOCK, FSAL_OP_UNLOCK. * The operations are test if a file region is locked, lock a * file region, unlock a file region. * \param lock_type (input): * Can be either FSAL_LOCK_R, FSAL_LOCK_W. * Either a read lock or write lock. * \param lock_start (input): * Start of lock region measured as offset of bytes from start of file. * \param lock_length (input): * Number of bytes to lock. * * \return Major error codes: * - ERR_FSAL_NO_ERROR: no error. * - ERR_FSAL_FAULT: One of the in put parameters is NULL. * - ERR_FSAL_PERM: lock_op was FSAL_OP_LOCKT and the result was that the operation would not be possible. */ fsal_status_t GPFSFSAL_lock_op( fsal_file_t * p_file_descriptor, /* IN */ fsal_handle_t * p_filehandle, /* IN */ fsal_op_context_t * p_context, /* IN */ void * p_owner, /* IN */ fsal_lock_op_t lock_op, /* IN */ fsal_lock_param_t request_lock, /* IN */ fsal_lock_param_t * conflicting_lock) /* OUT */ { int retval; struct glock glock_args; struct set_get_lock_arg gpfs_sg_arg; glock_args.lfd = ((gpfsfsal_file_t *)p_file_descriptor)->fd; gpfsfsal_op_context_t *gpfs_op_cxt = (gpfsfsal_op_context_t *)p_context; gpfsfsal_file_t * pfd = (gpfsfsal_file_t *) p_file_descriptor; if(p_file_descriptor == NULL) { LogDebug(COMPONENT_FSAL, "p_file_descriptor arg is NULL."); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_lock_op); } if(p_filehandle == NULL) { LogDebug(COMPONENT_FSAL, "p_filehandle arg is NULL."); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_lock_op); } if(p_context == NULL) { LogDebug(COMPONENT_FSAL, "p_context arg is NULL."); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_lock_op); } if(p_owner == NULL) { LogDebug(COMPONENT_FSAL, "p_owner arg is NULL."); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_lock_op); } if(conflicting_lock == NULL && lock_op == FSAL_OP_LOCKT) { LogDebug(COMPONENT_FSAL, "Conflicting_lock argument can't be NULL with lock_op = LOCKT"); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_lock_op); } LogFullDebug(COMPONENT_FSAL, "Locking: op:%d type:%d start:%llu length:%llu owner:%p", lock_op, request_lock.lock_type, request_lock.lock_start, request_lock.lock_length, p_owner); if(lock_op == FSAL_OP_LOCKT) glock_args.cmd = F_GETLK; else if(lock_op == FSAL_OP_LOCK || lock_op == FSAL_OP_UNLOCK) glock_args.cmd = F_SETLK; else if(lock_op == FSAL_OP_LOCKB) glock_args.cmd = F_SETLKW; /*TODO: Handle FSAL_OP_CANCEL */ else { LogDebug(COMPONENT_FSAL, "ERROR: Lock operation requested was not TEST, GET, or SET."); Return(ERR_FSAL_NOTSUPP, 0, INDEX_FSAL_lock_op); } if(request_lock.lock_type == FSAL_LOCK_R) glock_args.flock.l_type = F_RDLCK; else if(request_lock.lock_type == FSAL_LOCK_W) glock_args.flock.l_type = F_WRLCK; else { LogDebug(COMPONENT_FSAL, "ERROR: The requested lock type was not read or write."); Return(ERR_FSAL_NOTSUPP, 0, INDEX_FSAL_lock_op); } if(lock_op == FSAL_OP_UNLOCK) glock_args.flock.l_type = F_UNLCK; glock_args.flock.l_len = request_lock.lock_length; glock_args.flock.l_start = request_lock.lock_start; glock_args.flock.l_whence = SEEK_SET; glock_args.lfd = pfd->fd; glock_args.lock_owner = p_owner; gpfs_sg_arg.mountdirfd = gpfs_op_cxt->export_context->mount_root_fd; gpfs_sg_arg.lock = &glock_args; errno = 0; retval = gpfs_ganesha(lock_op == FSAL_OP_LOCKT ? OPENHANDLE_GET_LOCK : OPENHANDLE_SET_LOCK, &gpfs_sg_arg); if(retval && lock_op == FSAL_OP_LOCK) { if(conflicting_lock != NULL) { glock_args.cmd = F_GETLK; retval = gpfs_ganesha(OPENHANDLE_GET_LOCK, &gpfs_sg_arg); if(retval) { LogCrit(COMPONENT_FSAL, "After failing a set lock request, An attempt to get the current owner details also failed."); Return(posix2fsal_error(errno), errno, INDEX_FSAL_lock_op); } conflicting_lock->lock_owner = glock_args.flock.l_pid; conflicting_lock->lock_length = glock_args.flock.l_len; conflicting_lock->lock_start = glock_args.flock.l_start; conflicting_lock->lock_type = glock_args.flock.l_type; } Return(posix2fsal_error(errno), errno, INDEX_FSAL_lock_op); } /* F_UNLCK is returned then the tested operation would be possible. */ if(conflicting_lock != NULL) { if(lock_op == FSAL_OP_LOCKT && glock_args.flock.l_type != F_UNLCK) { conflicting_lock->lock_owner = glock_args.flock.l_pid; conflicting_lock->lock_length = glock_args.flock.l_len; conflicting_lock->lock_start = glock_args.flock.l_start; conflicting_lock->lock_type = glock_args.flock.l_type; } else { conflicting_lock->lock_owner = 0; conflicting_lock->lock_length = 0; conflicting_lock->lock_start = 0; conflicting_lock->lock_type = FSAL_NO_LOCK; } } Return(ERR_FSAL_NO_ERROR, 0, INDEX_FSAL_lock_op); }
/** * @brief Grant a layout segment. * * Grant a layout on a subset of a file requested. As a special case, * lie and grant a whole-file layout if requested, because Linux will * ignore it otherwise. * * @param[in] obj_pub Public object handle * @param[in] req_ctx Request context * @param[out] loc_body An XDR stream to which the FSAL must encode * the layout specific portion of the granted * layout segment. * @param[in] arg Input arguments of the function * @param[in,out] res In/out and output arguments of the function * * @return Valid error codes in RFC 5661, pp. 366-7. */ static nfsstat4 layoutget(struct fsal_obj_handle *obj_hdl, struct req_op_context *req_ctx, XDR *loc_body, const struct fsal_layoutget_arg *arg, struct fsal_layoutget_res *res) { struct gpfs_fsal_obj_handle *myself; struct gpfs_file_handle gpfs_ds_handle; struct layoutget_arg larg; struct layoutreturn_arg lrarg; unsigned int rc, *fh; /* Structure containing the storage parameters of the file within the GPFS cluster. */ struct pnfs_filelayout_layout file_layout; /* Width of each stripe on the file */ uint32_t stripe_width = 0; /* Utility parameter */ nfl_util4 util = 0; /* The last byte that can be accessed through pNFS */ /* uint64_t last_possible_byte = 0; strict. set but unused */ /* The deviceid for this layout */ struct pnfs_deviceid deviceid = DEVICE_ID_INIT_ZERO(FSAL_ID_GPFS); /* NFS Status */ nfsstat4 nfs_status = 0; /* Descriptor for DS handle */ struct gsh_buffdesc ds_desc; int errsv = 0; struct gpfs_filesystem *gpfs_fs = obj_hdl->fs->private_data; myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); /* We support only LAYOUT4_NFSV4_1_FILES layouts */ if (arg->type != LAYOUT4_NFSV4_1_FILES) { LogCrit(COMPONENT_PNFS, "Unsupported layout type: %x", arg->type); return NFS4ERR_UNKNOWN_LAYOUTTYPE; } /* Get basic information on the file and calculate the dimensions of the layout we can support. */ memset(&file_layout, 0, sizeof(struct pnfs_filelayout_layout)); memcpy(&gpfs_ds_handle, myself->handle, sizeof(struct gpfs_file_handle)); larg.fd = gpfs_fs->root_fd; larg.args.lg_minlength = arg->minlength; larg.args.lg_sbid = arg->export_id; larg.args.lg_fh = &gpfs_ds_handle; larg.args.lg_iomode = res->segment.io_mode; larg.handle = &gpfs_ds_handle; larg.file_layout = &file_layout; larg.xdr = NULL; fh = (int *)&(gpfs_ds_handle.f_handle); LogDebug(COMPONENT_PNFS, "fh in len %d type %d key %d: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x", gpfs_ds_handle.handle_size, gpfs_ds_handle.handle_type, gpfs_ds_handle.handle_key_size, fh[0], fh[1], fh[2], fh[3], fh[4], fh[5], fh[6], fh[7], fh[8], fh[9]); rc = gpfs_ganesha(OPENHANDLE_LAYOUT_GET, &larg); errsv = errno; if (rc != 0) { LogDebug(COMPONENT_PNFS, "GPFSFSAL_layoutget rc %d", rc); if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); return NFS4ERR_UNKNOWN_LAYOUTTYPE; } fh = (int *)&(gpfs_ds_handle.f_handle); LogDebug(COMPONENT_PNFS, "fh out len %d type %d key %d: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x", gpfs_ds_handle.handle_size, gpfs_ds_handle.handle_type, gpfs_ds_handle.handle_key_size, fh[0], fh[1], fh[2], fh[3], fh[4], fh[5], fh[6], fh[7], fh[8], fh[9]); /* We grant only one segment, and we want it back when file is closed.*/ res->return_on_close = true; res->last_segment = true; res->segment.offset = 0; res->segment.length = NFS4_UINT64_MAX; stripe_width = file_layout.lg_stripe_unit; util |= stripe_width | NFL4_UFLG_COMMIT_THRU_MDS; deviceid.fsal_id = file_layout.device_id.fsal_id; deviceid.device_id2 = file_layout.device_id.device_id2; deviceid.device_id4 = file_layout.device_id.device_id4; deviceid.devid = file_layout.device_id.devid; /* last_possible_byte = NFS4_UINT64_MAX; strict. set but unused */ LogDebug(COMPONENT_PNFS, "fsal_id %d seq %d fd %d fsid 0x%" PRIx64 " index %d", deviceid.fsal_id, deviceid.device_id2, deviceid.device_id4, deviceid.devid, file_layout.lg_first_stripe_index); ds_desc.addr = &gpfs_ds_handle; ds_desc.len = sizeof(struct gpfs_file_handle); nfs_status = FSAL_encode_file_layout(loc_body, &deviceid, util, file_layout.lg_first_stripe_index, 0, &req_ctx->ctx_export->export_id, 1, &ds_desc); if (nfs_status) { if (arg->maxcount <= op_ctx->fsal_export-> exp_ops.fs_loc_body_size(op_ctx->fsal_export)) { nfs_status = NFS4ERR_TOOSMALL; LogDebug(COMPONENT_PNFS, "Failed to encode nfsv4_1_file_layout."); } else LogCrit(COMPONENT_PNFS, "Failed to encode nfsv4_1_file_layout."); goto relinquish; } return NFS4_OK; relinquish: /* If we failed in encoding the lo_content, relinquish what we reserved for it. */ lrarg.mountdirfd = gpfs_fs->root_fd; lrarg.handle = &gpfs_ds_handle; lrarg.args.lr_return_type = arg->type; lrarg.args.lr_reclaim = false; lrarg.args.lr_seg.clientid = 0; lrarg.args.lr_seg.layout_type = arg->type; lrarg.args.lr_seg.iomode = res->segment.io_mode; lrarg.args.lr_seg.offset = 0; lrarg.args.lr_seg.length = NFS4_UINT64_MAX; rc = gpfs_ganesha(OPENHANDLE_LAYOUT_RETURN, &lrarg); errsv = errno; LogDebug(COMPONENT_PNFS, "GPFSFSAL_layoutreturn rc %d", rc); if (rc != 0) { LogDebug(COMPONENT_PNFS, "GPFSFSAL_layoutget rc %d", rc); if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); } return nfs_status; }
struct fs_loc_arg fs_loc; fs_loc.fs_path_len = fs_locs->fs_root.pathname4_val->utf8string_len; fs_loc.fs_path = fs_locs->fs_root.pathname4_val->utf8string_val; fs_loc.fs_server_len = fs_locs->locations.locations_val-> server.server_val->utf8string_len; fs_loc.fs_server = fs_locs->locations.locations_val-> server.server_val->utf8string_val; fs_loc.fs_root_len = fs_locs->locations.locations_val-> rootpath.pathname4_val->utf8string_len; fs_loc.fs_root = fs_locs->locations.locations_val-> rootpath.pathname4_val->utf8string_val; fs_loc.mountdirfd = gpfs_fs->root_fd; fs_loc.handle = p_filehandle; rc = gpfs_ganesha(OPENHANDLE_FS_LOCATIONS, &fs_loc); errsv = errno; LogDebug(COMPONENT_FSAL, "gpfs_ganesha: FS_LOCATIONS returned, rc %d errsv %d", rc, errsv); if (rc) return fsalstat(ERR_FSAL_ATTRNOTSUPP, 0); fs_locs->fs_root.pathname4_val->utf8string_len = fs_loc.fs_path_len; fs_locs->locations.locations_val->server.server_val->utf8string_len = fs_loc.fs_server_len; fs_locs->locations.locations_val->rootpath.pathname4_val-> utf8string_len = fs_loc.fs_root_len; LogDebug(COMPONENT_FSAL,
/** * @brief Up Thread * * @param Arg reference to void * */ void *GPFSFSAL_UP_Thread(void *Arg) { struct gpfs_filesystem *gpfs_fs = Arg; struct fsal_up_vector *event_func; char thr_name[16]; int rc = 0; struct pnfs_deviceid devid; struct stat buf; struct glock fl; struct callback_arg callback; struct gpfs_file_handle handle; int reason = 0; int flags = 0; unsigned int *fhP; int retry = 0; struct gsh_buffdesc key; uint32_t expire_time_attr = 0; uint32_t upflags; int errsv = 0; fsal_status_t fsal_status = {0,}; #ifdef _VALGRIND_MEMCHECK memset(&handle, 0, sizeof(handle)); memset(&buf, 0, sizeof(buf)); memset(&fl, 0, sizeof(fl)); memset(&devid, 0, sizeof(devid)); #endif snprintf(thr_name, sizeof(thr_name), "fsal_up_%"PRIu64".%"PRIu64, gpfs_fs->fs->dev.major, gpfs_fs->fs->dev.minor); SetNameFunction(thr_name); LogFullDebug(COMPONENT_FSAL_UP, "Initializing FSAL Callback context for %d.", gpfs_fs->root_fd); /* wait for nfs init completion to get general_fridge * initialized which is needed for processing some upcall events */ nfs_init_wait(); /* Start querying for events and processing. */ while (1) { LogFullDebug(COMPONENT_FSAL_UP, "Requesting event from FSAL Callback interface for %d.", gpfs_fs->root_fd); handle.handle_size = GPFS_MAX_FH_SIZE; handle.handle_key_size = OPENHANDLE_KEY_LEN; handle.handle_version = OPENHANDLE_VERSION; callback.interface_version = GPFS_INTERFACE_VERSION + GPFS_INTERFACE_SUB_VER; callback.mountdirfd = gpfs_fs->root_fd; callback.handle = &handle; callback.reason = &reason; callback.flags = &flags; callback.buf = &buf; callback.fl = &fl; callback.dev_id = &devid; callback.expire_attr = &expire_time_attr; rc = gpfs_ganesha(OPENHANDLE_INODE_UPDATE, &callback); errsv = errno; if (rc != 0) { rc = -(rc); if (rc > GPFS_INTERFACE_VERSION) { LogFatal(COMPONENT_FSAL_UP, "Ganesha version %d mismatch GPFS version %d.", callback.interface_version, rc); return NULL; } if (errsv == EINTR) continue; LogCrit(COMPONENT_FSAL_UP, "OPENHANDLE_INODE_UPDATE failed for %d. rc %d, errno %d (%s) reason %d", gpfs_fs->root_fd, rc, errsv, strerror(errsv), reason); /* @todo 1000 retry logic will go away once the * OPENHANDLE_INODE_UPDATE ioctl separates EINTR * and EUNATCH. */ if (errsv == EUNATCH && ++retry > 1000) LogFatal(COMPONENT_FSAL_UP, "GPFS file system %d has gone away.", gpfs_fs->root_fd); continue; } retry = 0; /* flags is int, but only the least significant 2 bytes * are valid. We are getting random bits into the upper * 2 bytes! Workaround this until the kernel module * gets fixed. */ flags = flags & 0xffff; LogDebug(COMPONENT_FSAL_UP, "inode update: rc %d reason %d update ino %" PRId64 " flags:%x", rc, reason, callback.buf->st_ino, flags); LogFullDebug(COMPONENT_FSAL_UP, "inode update: flags:%x callback.handle:%p handle size = %u handle_type:%d handle_version:%d key_size = %u handle_fsid=%X.%X f_handle:%p expire: %d", *callback.flags, callback.handle, callback.handle->handle_size, callback.handle->handle_type, callback.handle->handle_version, callback.handle->handle_key_size, callback.handle->handle_fsid[0], callback.handle->handle_fsid[1], callback.handle->f_handle, expire_time_attr); callback.handle->handle_version = OPENHANDLE_VERSION; fhP = (int *)&(callback.handle->f_handle[0]); LogFullDebug(COMPONENT_FSAL_UP, " inode update: handle %08x %08x %08x %08x %08x %08x %08x", fhP[0], fhP[1], fhP[2], fhP[3], fhP[4], fhP[5], fhP[6]); /* Here is where we decide what type of event this is * ... open,close,read,...,invalidate? */ key.addr = &handle; key.len = handle.handle_key_size; LogDebug(COMPONENT_FSAL_UP, "Received event to process for %d", gpfs_fs->root_fd); /* We need valid up_vector while processing some of the * events below. Setup up vector and hold the mutex while * processing the event for the entire duration. */ PTHREAD_MUTEX_lock(&gpfs_fs->upvector_mutex); if (!setup_up_vector(gpfs_fs)) { PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex); return NULL; } event_func = gpfs_fs->up_vector; switch (reason) { case INODE_LOCK_GRANTED: /* Lock Event */ case INODE_LOCK_AGAIN: /* Lock Event */ { LogMidDebug(COMPONENT_FSAL_UP, "%s: owner %p pid %d type %d start %lld len %lld", reason == INODE_LOCK_GRANTED ? "inode lock granted" : "inode lock again", fl.lock_owner, fl.flock.l_pid, fl.flock.l_type, (long long)fl.flock.l_start, (long long)fl.flock.l_len); fsal_lock_param_t lockdesc = { .lock_sle_type = FSAL_POSIX_LOCK, .lock_type = fl.flock.l_type, .lock_start = fl.flock.l_start, .lock_length = fl.flock.l_len }; if (reason == INODE_LOCK_AGAIN) fsal_status = up_async_lock_avail( general_fridge, event_func, &key, fl.lock_owner, &lockdesc, NULL, NULL); else fsal_status = up_async_lock_grant( general_fridge, event_func, &key, fl.lock_owner, &lockdesc, NULL, NULL); } break; case BREAK_DELEGATION: /* Delegation Event */ LogDebug(COMPONENT_FSAL_UP, "delegation recall: flags:%x ino %" PRId64, flags, callback.buf->st_ino); fsal_status = up_async_delegrecall(general_fridge, event_func, &key, NULL, NULL); break; case LAYOUT_FILE_RECALL: /* Layout file recall Event */ { struct pnfs_segment segment = { .offset = 0, .length = UINT64_MAX, .io_mode = LAYOUTIOMODE4_ANY }; LogDebug(COMPONENT_FSAL_UP, "layout file recall: flags:%x ino %" PRId64, flags, callback.buf->st_ino); fsal_status = up_async_layoutrecall( general_fridge, event_func, &key, LAYOUT4_NFSV4_1_FILES, false, &segment, NULL, NULL, NULL, NULL); } break; case LAYOUT_RECALL_ANY: /* Recall all layouts Event */ LogDebug(COMPONENT_FSAL_UP, "layout recall any: flags:%x ino %" PRId64, flags, callback.buf->st_ino); /** * @todo This functionality needs to be implemented as a * bulk FSID CB_LAYOUTRECALL. RECALL_ANY isn't suitable * since it can't be restricted to just one FSAL. Also * an FSID LAYOUTRECALL lets you have multiplke * filesystems exported from one FSAL and not yank layouts * on all of them when you only need to recall them for one. */ break; case LAYOUT_NOTIFY_DEVICEID: /* Device update Event */ LogDebug(COMPONENT_FSAL_UP, "layout dev update: flags:%x ino %" PRId64 " seq %d fd %d fsid 0x%" PRIx64, flags, callback.buf->st_ino, devid.device_id2, devid.device_id4, devid.devid); memset(&devid, 0, sizeof(devid)); devid.fsal_id = FSAL_ID_GPFS; fsal_status = up_async_notify_device(general_fridge, event_func, NOTIFY_DEVICEID4_DELETE_MASK, LAYOUT4_NFSV4_1_FILES, &devid, true, NULL, NULL); break; case INODE_UPDATE: /* Update Event */ { struct attrlist attr; LogMidDebug(COMPONENT_FSAL_UP, "inode update: flags:%x update ino %" PRId64 " n_link:%d", flags, callback.buf->st_ino, (int)callback.buf->st_nlink); /** @todo: This notification is completely * asynchronous. If we happen to change some * of the attributes later, we end up over * writing those with these possibly stale * values as we don't know when we get to * update with these up call values. We should * probably use time stamp or let the up call * always provide UP_TIMES flag in which case * we can compare the current ctime vs up call * provided ctime before updating the * attributes. * * For now, we think size attribute is more * important than others, so invalidate the * attributes and let ganesha fetch attributes * as needed if this update includes a size * change. We are careless for other attribute * changes, and we may end up with stale values * until this gets fixed! */ if (flags & (UP_SIZE | UP_SIZE_BIG)) { fsal_status = event_func->invalidate( event_func, &key, FSAL_UP_INVALIDATE_CACHE); break; } /* Check for accepted flags, any other changes just invalidate. */ if (flags & ~(UP_SIZE | UP_NLINK | UP_MODE | UP_OWN | UP_TIMES | UP_ATIME | UP_SIZE_BIG)) { fsal_status = event_func->invalidate( event_func, &key, FSAL_UP_INVALIDATE_CACHE); } else { /* buf may not have all attributes set. * Set the mask to what is changed */ attr.valid_mask = 0; attr.acl = NULL; upflags = 0; if (flags & UP_SIZE) attr.valid_mask |= ATTR_CHGTIME | ATTR_CHANGE | ATTR_SIZE | ATTR_SPACEUSED; if (flags & UP_SIZE_BIG) { attr.valid_mask |= ATTR_CHGTIME | ATTR_CHANGE | ATTR_SIZE | ATTR_SPACEUSED; upflags |= fsal_up_update_filesize_inc | fsal_up_update_spaceused_inc; } if (flags & UP_MODE) attr.valid_mask |= ATTR_CHGTIME | ATTR_CHANGE | ATTR_MODE; if (flags & UP_OWN) attr.valid_mask |= ATTR_CHGTIME | ATTR_CHANGE | ATTR_OWNER | ATTR_GROUP | ATTR_MODE; if (flags & UP_TIMES) attr.valid_mask |= ATTR_CHGTIME | ATTR_CHANGE | ATTR_ATIME | ATTR_CTIME | ATTR_MTIME; if (flags & UP_ATIME) attr.valid_mask |= ATTR_CHGTIME | ATTR_CHANGE | ATTR_ATIME; if (flags & UP_NLINK) attr.valid_mask |= ATTR_NUMLINKS; attr.request_mask = attr.valid_mask; attr.expire_time_attr = expire_time_attr; posix2fsal_attributes(&buf, &attr); fsal_status = event_func->update( event_func, &key, &attr, upflags); if ((flags & UP_NLINK) && (attr.numlinks == 0)) { upflags = fsal_up_nlink; attr.valid_mask = 0; attr.request_mask = 0; fsal_status = up_async_update (general_fridge, event_func, &key, &attr, upflags, NULL, NULL); } } } break; case THREAD_STOP: /* We wanted to terminate this thread */ LogDebug(COMPONENT_FSAL_UP, "Terminating the GPFS up call thread for %d", gpfs_fs->root_fd); PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex); return NULL; case INODE_INVALIDATE: LogMidDebug(COMPONENT_FSAL_UP, "inode invalidate: flags:%x update ino %" PRId64, flags, callback.buf->st_ino); upflags = FSAL_UP_INVALIDATE_CACHE; fsal_status = event_func->invalidate_close( event_func, &key, upflags); break; case THREAD_PAUSE: /* File system image is probably going away, but * we don't need to do anything here as we * eventually get other errors that stop this * thread. */ PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex); continue; /* get next event */ default: PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex); LogWarn(COMPONENT_FSAL_UP, "Unknown event: %d", reason); continue; } PTHREAD_MUTEX_unlock(&gpfs_fs->upvector_mutex); if (FSAL_IS_ERROR(fsal_status) && fsal_status.major != ERR_FSAL_NOENT) { LogWarn(COMPONENT_FSAL_UP, "Event %d could not be processed for fd %d rc %s", reason, gpfs_fs->root_fd, fsal_err_txt(fsal_status)); } } return NULL; } /* GPFSFSAL_UP_Thread */
/** * GPFSFSAL_lock_op: * Lock/unlock/test an owner independent (anonymous) lock for a region in a file. * * \param obj_hdl (input): * File handle of the file to lock. * \param p_owner (input): * Owner for the requested lock; Opaque to FSAL. * \param lock_op (input): * Can be either FSAL_OP_LOCKT, FSAL_OP_LOCK, FSAL_OP_UNLOCK. * The operations are test if a file region is locked, lock a * file region, unlock a file region. * \param request_lock (input): * Lock information, type, byte range.... * \param conflicting_lock (output): * Conflicting lock information, type, byte range.... * * \return Major error codes: * - ERR_FSAL_NO_ERROR: no error. * - ERR_FSAL_FAULT: One of the in put parameters is NULL. * - ERR_FSAL_PERM: lock_op was FSAL_OP_LOCKT and the result was that the operation would not be possible. */ fsal_status_t GPFSFSAL_lock_op(struct fsal_obj_handle *obj_hdl, /* IN */ void *p_owner, /* IN */ fsal_lock_op_t lock_op, /* IN */ fsal_lock_param_t request_lock, /* IN */ fsal_lock_param_t *conflicting_lock) /* OUT */ { int retval; struct glock glock_args; struct set_get_lock_arg gpfs_sg_arg; struct gpfs_fsal_obj_handle *myself; if(obj_hdl == NULL) { LogDebug(COMPONENT_FSAL, "obj_hdl arg is NULL."); return fsalstat(ERR_FSAL_FAULT, 0); } if(p_owner == NULL) { LogDebug(COMPONENT_FSAL, "p_owner arg is NULL."); return fsalstat(ERR_FSAL_FAULT, 0); } if(conflicting_lock == NULL && lock_op == FSAL_OP_LOCKT) { LogDebug(COMPONENT_FSAL, "Conflicting_lock argument can't be NULL with lock_op = LOCKT"); return fsalstat(ERR_FSAL_FAULT, 0); } myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle); glock_args.lfd = myself->u.file.fd; LogFullDebug(COMPONENT_FSAL, "Locking: op:%d sle_type:%d type:%d start:%llu length:%llu owner:%p", lock_op, request_lock.lock_sle_type, request_lock.lock_type, (unsigned long long)request_lock.lock_start, (unsigned long long)request_lock.lock_length, p_owner); if(lock_op == FSAL_OP_LOCKT) glock_args.cmd = F_GETLK; else if(lock_op == FSAL_OP_LOCK || lock_op == FSAL_OP_UNLOCK) glock_args.cmd = F_SETLK; else if(lock_op == FSAL_OP_LOCKB) glock_args.cmd = F_SETLKW; else if(lock_op == FSAL_OP_CANCEL) glock_args.cmd = GPFS_F_CANCELLK; else { LogDebug(COMPONENT_FSAL, "ERROR: Lock operation requested was not TEST, GET, or SET."); return fsalstat(ERR_FSAL_NOTSUPP, 0); } if(request_lock.lock_type == FSAL_LOCK_R) glock_args.flock.l_type = F_RDLCK; else if(request_lock.lock_type == FSAL_LOCK_W) glock_args.flock.l_type = F_WRLCK; else { LogDebug(COMPONENT_FSAL, "ERROR: The requested lock type was not read or write."); return fsalstat(ERR_FSAL_NOTSUPP, 0); } if(lock_op == FSAL_OP_UNLOCK) glock_args.flock.l_type = F_UNLCK; glock_args.flock.l_len = request_lock.lock_length; glock_args.flock.l_start = request_lock.lock_start; glock_args.flock.l_whence = SEEK_SET; glock_args.lfd = myself->u.file.fd; glock_args.lock_owner = p_owner; gpfs_sg_arg.mountdirfd = gpfs_get_root_fd(obj_hdl->export); gpfs_sg_arg.lock = &glock_args; errno = 0; if(request_lock.lock_sle_type == FSAL_LEASE_LOCK) retval = gpfs_ganesha(OPENHANDLE_SET_DELEGATION, &gpfs_sg_arg); else retval = gpfs_ganesha(lock_op == FSAL_OP_LOCKT ? OPENHANDLE_GET_LOCK : OPENHANDLE_SET_LOCK, &gpfs_sg_arg); if(retval) { int errsv = errno; if((conflicting_lock != NULL) && (lock_op == FSAL_OP_LOCK || lock_op == FSAL_OP_LOCKB)) { int retval2; glock_args.cmd = F_GETLK; retval2 = gpfs_ganesha(OPENHANDLE_GET_LOCK, &gpfs_sg_arg); if(retval2) { LogCrit(COMPONENT_FSAL, "After failing a set lock request, An attempt to get the current owner details also failed."); } else { conflicting_lock->lock_length = glock_args.flock.l_len; conflicting_lock->lock_start = glock_args.flock.l_start; conflicting_lock->lock_type = glock_args.flock.l_type; } } if(retval == 1) { LogFullDebug(COMPONENT_FSAL, "GPFS queued blocked lock"); return fsalstat(ERR_FSAL_BLOCKED, 0); } else { LogFullDebug(COMPONENT_FSAL, "GPFS lock operation failed error %d %d (%s)", retval, errsv, strerror(errsv)); return fsalstat(posix2fsal_error(errsv), errsv); } } /* F_UNLCK is returned then the tested operation would be possible. */ if(conflicting_lock != NULL) { if(lock_op == FSAL_OP_LOCKT && glock_args.flock.l_type != F_UNLCK) { conflicting_lock->lock_length = glock_args.flock.l_len; conflicting_lock->lock_start = glock_args.flock.l_start; conflicting_lock->lock_type = glock_args.flock.l_type; } else { conflicting_lock->lock_length = 0; conflicting_lock->lock_start = 0; conflicting_lock->lock_type = FSAL_NO_LOCK; } } return fsalstat(ERR_FSAL_NO_ERROR, 0); }
/** * @brief Read plus from a data-server handle. * * NFSv4.2 data server handles are disjount from normal * filehandles (in Ganesha, there is a ds_flag in the filehandle_v4_t * structure) and do not get loaded into cache_inode or processed the * normal way. * * @param[in] ds_pub FSAL DS handle * @param[in] req_ctx Credentials * @param[in] stateid The stateid supplied with the READ operation, * for validation * @param[in] offset The offset at which to read * @param[in] requested_length Length of read requested (and size of buffer) * @param[out] buffer The buffer to which to store read data * @param[out] supplied_length Length of data read * @param[out] eof True on end of file * @param[out] info IO info * * @return An NFSv4.2 status code. */ static nfsstat4 ds_read_plus(struct fsal_ds_handle *const ds_pub, struct req_op_context *const req_ctx, const stateid4 *stateid, const offset4 offset, const count4 requested_length, void *const buffer, const count4 supplied_length, bool * const end_of_file, struct io_info *info) { /* The private 'full' DS handle */ struct gpfs_ds *ds = container_of(ds_pub, struct gpfs_ds, ds); struct gpfs_file_handle *gpfs_handle = &ds->wire; /* The amount actually read */ int amount_read = 0; struct dsread_arg rarg; unsigned int *fh; uint64_t filesize; int errsv = 0; fh = (int *)&(gpfs_handle->f_handle); rarg.mountdirfd = ds->gpfs_fs->root_fd; rarg.handle = gpfs_handle; rarg.bufP = buffer; rarg.offset = offset; rarg.length = requested_length; rarg.filesize = &filesize; rarg.options = IO_SKIP_HOLE; LogDebug(COMPONENT_PNFS, "fh len %d type %d key %d: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", gpfs_handle->handle_size, gpfs_handle->handle_type, gpfs_handle->handle_key_size, fh[0], fh[1], fh[2], fh[3], fh[4], fh[5], fh[6], fh[7], fh[8], fh[9]); amount_read = gpfs_ganesha(OPENHANDLE_DS_READ, &rarg); errsv = errno; if (amount_read < 0) { if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); if (errsv != ENODATA) return posix2nfs4_error(errsv); /* errsv == ENODATA */ info->io_content.what = NFS4_CONTENT_HOLE; info->io_content.hole.di_offset = offset; /*offset of hole*/ info->io_content.hole.di_length = requested_length;/*hole len*/ if ((requested_length + offset) > filesize) { amount_read = filesize - offset; if (amount_read < 0) { amount_read = 0; *end_of_file = true; } else if (amount_read < requested_length) *end_of_file = true; info->io_content.hole.di_length = amount_read; } } else { info->io_content.what = NFS4_CONTENT_DATA; info->io_content.data.d_offset = offset + amount_read; info->io_content.data.d_data.data_len = amount_read; info->io_content.data.d_data.data_val = buffer; if (amount_read == 0 || amount_read < requested_length) *end_of_file = true; } return NFS4_OK; }
/** * * @brief Write plus to a data-server handle. * * This performs a DS write not going through the data server unless * FILE_SYNC4 is specified, in which case it connects the filehandle * and performs an MDS write. * * @param[in] ds_pub FSAL DS handle * @param[in] req_ctx Credentials * @param[in] stateid The stateid supplied with the READ operation, * for validation * @param[in] offset The offset at which to read * @param[in] write_length Length of write requested (and size of buffer) * @param[out] buffer The buffer to which to store read data * @param[in] stability wanted Stability of write * @param[out] written_length Length of data written * @param[out] writeverf Write verifier * @param[out] stability_got Stability used for write (must be as * or more stable than request) * @param[in/out] info IO info * * @return An NFSv4.2 status code. */ static nfsstat4 ds_write_plus(struct fsal_ds_handle *const ds_pub, struct req_op_context *const req_ctx, const stateid4 *stateid, const offset4 offset, const count4 write_length, const void *buffer, const stable_how4 stability_wanted, count4 * const written_length, verifier4 * const writeverf, stable_how4 * const stability_got, struct io_info *info) { /* The private 'full' DS handle */ struct gpfs_ds *ds = container_of(ds_pub, struct gpfs_ds, ds); struct gpfs_file_handle *gpfs_handle = &ds->wire; /* The amount actually read */ int32_t amount_written = 0; struct dswrite_arg warg; unsigned int *fh; struct gsh_buffdesc key; int errsv = 0; fh = (int *)&(gpfs_handle->f_handle); memset(writeverf, 0, NFS4_VERIFIER_SIZE); warg.mountdirfd = ds->gpfs_fs->root_fd; warg.handle = gpfs_handle; warg.bufP = (char *)buffer; warg.offset = offset; warg.length = write_length; warg.stability_wanted = stability_wanted; warg.stability_got = stability_got; warg.verifier4 = (int32_t *) writeverf; warg.options = 0; if (info->io_content.what == NFS4_CONTENT_HOLE) warg.options = IO_SKIP_HOLE; LogDebug(COMPONENT_PNFS, "fh len %d type %d key %d: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", gpfs_handle->handle_size, gpfs_handle->handle_type, gpfs_handle->handle_key_size, fh[0], fh[1], fh[2], fh[3], fh[4], fh[5], fh[6], fh[7], fh[8], fh[9]); amount_written = gpfs_ganesha(OPENHANDLE_DS_WRITE, &warg); errsv = errno; if (amount_written < 0) { if (errsv == EUNATCH) LogFatal(COMPONENT_PNFS, "GPFS Returned EUNATCH"); return posix2nfs4_error(errsv); } LogDebug(COMPONENT_PNFS, "write verifier %d-%d\n", warg.verifier4[0], warg.verifier4[1]); key.addr = gpfs_handle; key.len = gpfs_handle->handle_key_size; req_ctx->fsal_export->up_ops->invalidate( req_ctx->fsal_export, &key, FSAL_UP_INVALIDATE_CACHE); set_gpfs_verifier(writeverf); *written_length = amount_written; return NFS4_OK; }