static int zfsfuse_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, ino, &znode, B_TRUE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *vp = ZTOV(znode); ASSERT(vp != NULL); cred_t cred; zfsfuse_getcred(req, &cred); struct stat stbuf; error = zfsfuse_stat(vp, &stbuf, &cred); VN_RELE(vp); ZFS_EXIT(zfsvfs); if(!error) fuse_reply_attr(req, &stbuf, 0.0); return error; }
static void workspace_nfs_fgetattr(fuse_req_t req, struct workspace_fh_struct *fh) { struct resource_struct *resource=fh->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; struct nfsfh *nfsfh=(struct nfsfh *) fh->handle.data; int result=0; struct stat st; logoutput("workspace_nfs_fgetattr"); memset(&st, 0, sizeof(struct stat)); pthread_mutex_lock(&nfs_export->mutex); result=nfs_fstat(nfs_ctx, nfsfh, &st); pthread_mutex_unlock(&nfs_export->mutex); if (result==0) { fuse_reply_attr(req, &st, fs_options.attr_timeout); } else { fuse_reply_err(req, -result); } }
static int handle_setattr(struct fuse* fuse, struct fuse_handler* handler, const struct fuse_in_header *hdr, const struct fuse_setattr_in *req) { struct node* node; char path[PATH_MAX]; struct timespec times[2]; pthread_mutex_lock(&fuse->lock); node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path)); TRACE("[%d] SETATTR fh=%llx valid=%x @ %llx (%s)\n", handler->token, req->fh, req->valid, hdr->nodeid, node ? node->name : "?"); pthread_mutex_unlock(&fuse->lock); if (!node) { return -ENOENT; } /* XXX: incomplete implementation on purpose. * chmod/chown should NEVER be implemented.*/ if ((req->valid & FATTR_SIZE) && truncate(path, req->size) < 0) { return -errno; } /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW * are both set, then set it to the current time. Else, set it to the * time specified in the request. Same goes for mtime. Use utimensat(2) * as it allows ATIME and MTIME to be changed independently, and has * nanosecond resolution which fuse also has. */ if (req->valid & (FATTR_ATIME | FATTR_MTIME)) { times[0].tv_nsec = UTIME_OMIT; times[1].tv_nsec = UTIME_OMIT; if (req->valid & FATTR_ATIME) { if (req->valid & FATTR_ATIME_NOW) { times[0].tv_nsec = UTIME_NOW; } else { times[0].tv_sec = req->atime; times[0].tv_nsec = req->atimensec; } } if (req->valid & FATTR_MTIME) { if (req->valid & FATTR_MTIME_NOW) { times[1].tv_nsec = UTIME_NOW; } else { times[1].tv_sec = req->mtime; times[1].tv_nsec = req->mtimensec; } } TRACE("[%d] Calling utimensat on %s with atime %ld, mtime=%ld\n", handler->token, path, times[0].tv_sec, times[1].tv_sec); if (utimensat(-1, path, times, 0) < 0) { return -errno; } } return fuse_reply_attr(fuse, hdr->unique, hdr->nodeid, path); }
void blob_ll_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, int to_set, struct fuse_file_info */*fi*/) { // no change for dirs allowed, nor change of size for files DBG("attr mask = " << to_set); if (ino_id(ino) == 0 || ino_version(ino) == 0 || ((to_set & FUSE_SET_ATTR_SIZE) != 0)) fuse_reply_err(req, EPERM); else fuse_reply_attr(req, attr, 1.0); }
void mfs_meta_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { struct stat o_stbuf; (void)fi; // if (ino==MASTER_INODE) { // memset(&o_stbuf, 0, sizeof(struct stat)); // mfs_attr_to_stat(ino,masterattr,&o_stbuf); // fuse_reply_attr(req, &o_stbuf, 3600.0); // } else if (ino==MASTERINFO_INODE) { memset(&o_stbuf, 0, sizeof(struct stat)); mfs_attr_to_stat(ino,masterinfoattr,&o_stbuf); fuse_reply_attr(req, &o_stbuf, 3600.0); } else if (ino>=META_INODE_MIN && ino<=META_INODE_MAX) { memset(&o_stbuf, 0, sizeof(struct stat)); mfs_meta_stat(ino,&o_stbuf); fuse_reply_attr(req, &o_stbuf, attr_cache_timeout); } else if ((ino & INODE_TYPE_MASK) == INODE_TYPE_TRASH) { int status; uint8_t attr[35]; status = fs_getdetachedattr(ino & INODE_VALUE_MASK,attr); status = mfs_errorconv(status); if (status!=0) { fuse_reply_err(req, status); } else { memset(&o_stbuf, 0, sizeof(struct stat)); mfs_attr_to_stat(ino,attr,&o_stbuf); fuse_reply_attr(req, &o_stbuf, attr_cache_timeout); } } else if ((ino & INODE_TYPE_MASK) == INODE_TYPE_RESERVED) { int status; uint8_t attr[35]; status = fs_getdetachedattr(ino & INODE_VALUE_MASK,attr); status = mfs_errorconv(status); if (status!=0) { fuse_reply_err(req, status); } else { memset(&o_stbuf, 0, sizeof(struct stat)); mfs_attr_to_stat(ino,attr,&o_stbuf); fuse_reply_attr(req, &o_stbuf, attr_cache_timeout); } } else { fuse_reply_err(req, ENOENT); } }
static void lfs_ll_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi){ struct stat stbuf; (void) fi; memset(&stbuf, 0, sizeof(stbuf)); if (lfs_stat(ino, &stbuf) == -1) fuse_reply_err(req, ENOENT); else fuse_reply_attr(req, &stbuf, 1.0); }
void blob_ll_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info */*fi*/) { struct stat stbuf; memset(&stbuf, 0, sizeof(stbuf)); int result = blob_stat(ino, &stbuf); if (result != 0) fuse_reply_err(req, ENOENT); else fuse_reply_attr(req, &stbuf, 1.0); }
static void lo_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { int res; struct stat buf; (void) fi; res = fstatat(lo_fd(req, ino), "", &buf, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW); if (res == -1) return (void) fuse_reply_err(req, errno); fuse_reply_attr(req, &buf, 1.0); }
static void blockstore_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { (void) fi; struct stat stbuf; memset(&stbuf, 0, sizeof(stbuf)); if (blockstore_stat(ino, &stbuf) < 0) { fuse_reply_err(req, ENOENT); } else { fuse_reply_attr(req, &stbuf, 1.0); } }
static void sqfs_ll_op_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { sqfs_ll_i lli; struct stat st; if (sqfs_ll_iget(req, &lli, ino)) return; if (sqfs_stat(&lli.ll->fs, &lli.inode, &st)) { fuse_reply_err(req, ENOENT); } else { st.st_ino = ino; fuse_reply_attr(req, &st, SQFS_TIMEOUT); } }
static void serve_getattr(fuse_req_t req, fuse_ino_t fuse_ino, struct fuse_file_info * fi) { Dprintf("%s(ino = %lu)\n", __FUNCTION__, fuse_ino); struct stat stbuf; int r; (void) fi; memset(&stbuf, 0, sizeof(stbuf)); r = fill_stat(reqmount(req), fusecfsino(req, fuse_ino), fuse_ino, &stbuf); if (r < 0) r = fuse_reply_err(req, -r); else r = fuse_reply_attr(req, &stbuf, STDTIMEOUT); fuse_reply_assert(!r); }
static int handle_getattr(struct fuse* fuse, struct fuse_handler* handler, const struct fuse_in_header *hdr, const struct fuse_getattr_in *req) { struct node* node; char path[PATH_MAX]; pthread_mutex_lock(&fuse->lock); node = lookup_node_and_path_by_id_locked(fuse, hdr->nodeid, path, sizeof(path)); TRACE("[%d] GETATTR flags=%x fh=%llx @ %llx (%s)\n", handler->token, req->getattr_flags, req->fh, hdr->nodeid, node ? node->name : "?"); pthread_mutex_unlock(&fuse->lock); if (!node) { return -ENOENT; } return fuse_reply_attr(fuse, hdr->unique, hdr->nodeid, path); }
static void compiz_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, int to_set, struct fuse_file_info *fi) { CompDisplay *d = (CompDisplay *)fuse_req_userdata(req); FuseInode *inode; inode = fuseFindInode(inodes, ino, WRITE_MASK); if (inode) { struct stat stbuf; if ((to_set & FUSE_SET_ATTR_SIZE) != FUSE_SET_ATTR_SIZE) { fuse_reply_err(req, EACCES); return; } if (attr->st_size != 0) { fuse_reply_err(req, EACCES); return; } inode->flags |= FUSE_INODE_FLAG_TRUNC; memset(&stbuf, 0, sizeof (stbuf)); fuseInodeStat(d, inode, &stbuf); fuse_reply_attr(req, &stbuf, 1.0); } else { fuse_reply_err(req, ENOENT); } }
static void compiz_getattr(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { CompDisplay *d = (CompDisplay *)fuse_req_userdata(req); FuseInode *inode; inode = fuseFindInode(inodes, ino, ~0); if (inode) { struct stat stbuf; memset(&stbuf, 0, sizeof (stbuf)); fuseInodeStat(d, inode, &stbuf); fuse_reply_attr(req, &stbuf, 1.0); } else { fuse_reply_err(req, ENOENT); } }
static void serve_setattr(fuse_req_t req, fuse_ino_t fuse_ino, struct stat * attr, int to_set, struct fuse_file_info * fi) { inode_t cfs_ino = fusecfsino(req, fuse_ino); int supported = FUSE_SET_ATTR_SIZE; bool uid_supported = feature_supported(reqcfs(req), FSTITCH_FEATURE_UID); bool gid_supported = feature_supported(reqcfs(req), FSTITCH_FEATURE_GID); bool perms_supported = feature_supported(reqcfs(req), FSTITCH_FEATURE_UNIX_PERM); bool mtime_supported = feature_supported(reqcfs(req), FSTITCH_FEATURE_MTIME); bool atime_supported = feature_supported(reqcfs(req), FSTITCH_FEATURE_MTIME); struct stat stbuf; int r; Dprintf("%s(ino = %lu, to_set = %d)\n", __FUNCTION__, fuse_ino, to_set); if (uid_supported) supported |= FUSE_SET_ATTR_UID; if (gid_supported) supported |= FUSE_SET_ATTR_GID; if (perms_supported) supported |= FUSE_SET_ATTR_MODE; if (mtime_supported) supported |= FUSE_SET_ATTR_MTIME; if (atime_supported) supported |= FUSE_SET_ATTR_ATIME; if (to_set != (to_set & supported)) { r = fuse_reply_err(req, ENOSYS); fuse_reply_assert(!r); return; } if (to_set & FUSE_SET_ATTR_SIZE) { fdesc_t * fdesc; uint32_t size; size = (uint32_t) attr->st_size; assert(size == attr->st_size); Dprintf("\tsize = %u\n", size); if (fi) fdesc = fi_get_fdesc(fi); else { r = CALL(reqcfs(req), open, cfs_ino, 0, &fdesc); if (r < 0) { r = fuse_reply_err(req, -r); fuse_reply_assert(!r); return; } fdesc->common->parent = (inode_t) hash_map_find_val(reqmount(req)->parents, (void *) cfs_ino); assert(fdesc->common->parent != INODE_NONE); } r = CALL(reqcfs(req), truncate, fdesc, size); if (!fi) { r = CALL(reqcfs(req), close, fdesc); if (r < 0) { r = fuse_reply_err(req, -r); fuse_reply_assert(!r); return; } } if (r < 0) { r = fuse_reply_err(req, -r); fuse_reply_assert(!r); return; } } fsmetadata_t fsm[5]; uint32_t nfsm = 0; if (to_set & FUSE_SET_ATTR_UID) { fsm[nfsm].fsm_feature = FSTITCH_FEATURE_UID; fsm[nfsm].fsm_value.u = attr->st_uid; nfsm++; } if (to_set & FUSE_SET_ATTR_GID) { fsm[nfsm].fsm_feature = FSTITCH_FEATURE_GID; fsm[nfsm].fsm_value.u = attr->st_gid; nfsm++; } if (to_set & FUSE_SET_ATTR_MODE) { fsm[nfsm].fsm_feature = FSTITCH_FEATURE_UNIX_PERM; fsm[nfsm].fsm_value.u = attr->st_mode; nfsm++; } if (to_set & FUSE_SET_ATTR_MTIME) { fsm[nfsm].fsm_feature = FSTITCH_FEATURE_MTIME; fsm[nfsm].fsm_value.u = attr->st_mtime; nfsm++; } if (to_set & FUSE_SET_ATTR_ATIME) { // XXX Why did we use attr->st_mtime here? fsm[nfsm].fsm_feature = FSTITCH_FEATURE_ATIME; fsm[nfsm].fsm_value.u = attr->st_atime; nfsm++; } if (nfsm > 0) { r = CALL(reqcfs(req), set_metadata2, cfs_ino, fsm, nfsm); if (r < 0) { r = fuse_reply_err(req, -r); fuse_reply_assert(!r); return; } } memset(&stbuf, 0, sizeof(stbuf)); r = fill_stat(reqmount(req), cfs_ino, fuse_ino, &stbuf); if (r < 0) r = fuse_reply_err(req, -r); else r = fuse_reply_attr(req, &stbuf, STDTIMEOUT); fuse_reply_assert(!r); }
static void overlay_getattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info) { struct resource_struct *resource=call_info->object->resource; struct localfile_struct *localfile=(struct localfile_struct *) resource->data; struct pathinfo_struct *pathinfo=&call_info->pathinfo; unsigned int len0=pathinfo->len - call_info->relpath, len1=localfile->pathinfo.len; char path[len0 + len1 + 1]; struct stat st; memcpy(path, localfile->pathinfo.path, len1); if (len0>0) { memcpy(path+len1, pathinfo->path + call_info->relpath, len0); len1+=len0; } path[len1]='\0'; memset(&st, 0, sizeof(struct stat)); logoutput("overlayfs_getattr, path %s", path); if (lstat(path, &st)==-1) { fuse_reply_err(req, ENOENT); } else { struct inode_struct *inode=entry->inode; inode->mode=st.st_mode; inode->nlink=st.st_nlink; inode->uid=st.st_uid; inode->gid=st.st_gid; inode->rdev=st.st_rdev; if (S_ISDIR(st.st_mode)) { st.st_size=0; } else { inode->size=st.st_size; } inode->mtim.tv_sec=st.st_mtim.tv_sec; inode->mtim.tv_nsec=st.st_mtim.tv_nsec; inode->ctim.tv_sec=st.st_ctim.tv_sec; inode->ctim.tv_nsec=st.st_ctim.tv_nsec; st.st_ino=inode->ino; st.st_dev=0; fuse_reply_attr(req, &st, fs_options.attr_timeout); } free_path_pathinfo(&call_info->pathinfo); }
void rozofs_ll_getattr_nb(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { (void) fi; ientry_t *ie = 0; epgw_mfile_arg_t arg; int ret; struct stat stbuf; DEBUG("getattr for inode: %lu\n", (unsigned long int) ino); void *buffer_p = NULL; /* ** allocate a context for saving the fuse parameters */ buffer_p = rozofs_fuse_alloc_saved_context(); if (buffer_p == NULL) { severe("out of fuse saved context"); errno = ENOMEM; goto error; } SAVE_FUSE_PARAM(buffer_p,req); SAVE_FUSE_PARAM(buffer_p,ino); SAVE_FUSE_STRUCT(buffer_p,fi,sizeof( struct fuse_file_info)); START_PROFILING_NB(buffer_p,rozofs_ll_getattr); if (!(ie = get_ientry_by_inode(ino))) { errno = ENOENT; goto error; } /* ** In block mode the attributes of regular files are directly retrieved ** from the ie entry. For directories and links one ask to the exportd ** */ if ((rozofs_mode == 1)&&(S_ISREG(ie->attrs.mode))) { mattr_to_stat(&ie->attrs, &stbuf); stbuf.st_ino = ino; fuse_reply_attr(req, &stbuf, rozofs_tmr_get(TMR_FUSE_ATTR_CACHE)); goto out; } /* ** fill up the structure that will be used for creating the xdr message */ arg.arg_gw.eid = exportclt.eid; memcpy(arg.arg_gw.fid, ie->fid, sizeof (uuid_t)); /* ** now initiates the transaction towards the remote end */ #if 1 ret = rozofs_expgateway_send_routing_common(arg.arg_gw.eid,ie->fid,EXPORT_PROGRAM, EXPORT_VERSION, EP_GETATTR,(xdrproc_t) xdr_epgw_mfile_arg_t,(void *)&arg, rozofs_ll_getattr_cbk,buffer_p); #else ret = rozofs_export_send_common(&exportclt,EXPORT_PROGRAM, EXPORT_VERSION, EP_GETATTR,(xdrproc_t) xdr_epgw_mfile_arg_t,(void *)&arg, rozofs_ll_getattr_cbk,buffer_p); #endif if (ret < 0) goto error; /* ** no error just waiting for the answer */ return; error: fuse_reply_err(req, errno); /* ** release the buffer if has been allocated */ out: STOP_PROFILING_NB(buffer_p,rozofs_ll_getattr); if (buffer_p != NULL) rozofs_fuse_release_saved_context(buffer_p); return; }
static int zfsfuse_setattr(fuse_req_t req, fuse_ino_t ino, struct stat *attr, int to_set, struct fuse_file_info *fi) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); vnode_t *vp; boolean_t release; int error; cred_t cred; zfsfuse_getcred(req, &cred); if(fi == NULL) { znode_t *znode; error = zfs_zget(zfsvfs, ino, &znode, B_TRUE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vp = ZTOV(znode); release = B_TRUE; } else { file_info_t *info = (file_info_t *)(uintptr_t) fi->fh; vp = info->vp; release = B_FALSE; /* * Special treatment for ftruncate(). * This is needed because otherwise ftruncate() would * fail with permission denied on read-only files. * (Solaris calls VOP_SPACE instead of VOP_SETATTR on * ftruncate). */ if(to_set & FUSE_SET_ATTR_SIZE) { /* Check if file is opened for writing */ if((info->flags & FWRITE) == 0) { error = EBADF; goto out; } /* Sanity check */ if(vp->v_type != VREG) { error = EINVAL; goto out; } flock64_t bf; bf.l_whence = 0; /* beginning of file */ bf.l_start = attr->st_size; bf.l_type = F_WRLCK; bf.l_len = (off_t) 0; /* FIXME: check locks */ error = VOP_SPACE(vp, F_FREESP, &bf, info->flags, 0, &cred, NULL); if(error) goto out; to_set &= ~FUSE_SET_ATTR_SIZE; if(to_set == 0) goto out; } } ASSERT(vp != NULL); vattr_t vattr = { 0 }; if(to_set & FUSE_SET_ATTR_MODE) { vattr.va_mask |= AT_MODE; vattr.va_mode = attr->st_mode; } if(to_set & FUSE_SET_ATTR_UID) { vattr.va_mask |= AT_UID; vattr.va_uid = attr->st_uid; } if(to_set & FUSE_SET_ATTR_GID) { vattr.va_mask |= AT_GID; vattr.va_gid = attr->st_gid; } if(to_set & FUSE_SET_ATTR_SIZE) { vattr.va_mask |= AT_SIZE; vattr.va_size = attr->st_size; } if(to_set & FUSE_SET_ATTR_ATIME) { vattr.va_mask |= AT_ATIME; TIME_TO_TIMESTRUC(attr->st_atime, &vattr.va_atime); } if(to_set & FUSE_SET_ATTR_MTIME) { vattr.va_mask |= AT_MTIME; TIME_TO_TIMESTRUC(attr->st_mtime, &vattr.va_mtime); } int flags = (to_set & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) ? ATTR_UTIME : 0; error = VOP_SETATTR(vp, &vattr, flags, &cred, NULL); out: ; struct stat stat_reply; if(!error) error = zfsfuse_stat(vp, &stat_reply, &cred); /* Do not release if vp was an opened inode */ if(release) VN_RELE(vp); ZFS_EXIT(zfsvfs); if(!error) fuse_reply_attr(req, &stat_reply, 0.0); return error; }
static void workspace_nfs_getattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info) { struct resource_struct *resource=call_info->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=call_info->pathinfo.path + call_info->relpath; struct stat st; int result=0; if (strlen(path)==0) path=(char *) rootpath; memset(&st, 0, sizeof(struct stat)); logoutput("workspace_nfs_getattr, path %s", path); pthread_mutex_lock(&nfs_export->mutex); result=nfs_stat(nfs_ctx, path, &st); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, abs(result)); } else { struct inode_struct *inode=entry->inode; inode->mode=st.st_mode; inode->nlink=st.st_nlink; inode->uid=st.st_uid; inode->gid=st.st_gid; inode->rdev=st.st_rdev; inode->size=st.st_size; st.st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % st.st_blksize == 0) { st.st_blocks=inode->size / st.st_blksize; } else { st.st_blocks=1 + inode->size / st.st_blksize; } inode->mtim.tv_sec=st.st_mtim.tv_sec; inode->mtim.tv_nsec=st.st_mtim.tv_nsec; inode->ctim.tv_sec=st.st_ctim.tv_sec; inode->ctim.tv_nsec=st.st_ctim.tv_nsec; st.st_ino=inode->ino; st.st_dev=0; fuse_reply_attr(req, &st, fs_options.attr_timeout); } free_path_pathinfo(&call_info->pathinfo); }
static void workspace_nfs_setattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info, struct stat *st, int fuse_set) { struct resource_struct *resource=call_info->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=call_info->pathinfo.path + call_info->relpath; int result=0; struct inode_struct *inode=entry->inode; if (strlen(path)==0) path=(char *) rootpath; logoutput("workspace_nfs_setattr, path %s", path); if (fuse_set & FUSE_SET_ATTR_MODE) { pthread_mutex_lock(&nfs_export->mutex); result=nfs_chmod(nfs_ctx, path, st->st_mode); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, -result); free_path_pathinfo(&call_info->pathinfo); return; } else { inode->mode=st->st_mode; } } if (fuse_set & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID)) { uid_t uid=inode->uid; gid_t gid=inode->gid; if (fuse_set & FUSE_SET_ATTR_UID) uid=st->st_uid; if (fuse_set & FUSE_SET_ATTR_GID) gid=st->st_gid; result=nfs_chown(nfs_ctx, path, uid, gid); if (result<0) { fuse_reply_err(req, -result); free_path_pathinfo(&call_info->pathinfo); return; } else { if (fuse_set & FUSE_SET_ATTR_UID) { inode->uid=st->st_uid; } else { st->st_uid=inode->uid; } if (fuse_set & FUSE_SET_ATTR_GID) { inode->gid=st->st_gid; } else { st->st_gid=inode->gid; } } } if (fuse_set & FUSE_SET_ATTR_SIZE) { pthread_mutex_lock(&nfs_export->mutex); result=nfs_truncate(nfs_ctx, path, st->st_size); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, -result); free_path_pathinfo(&call_info->pathinfo); return; } else { inode->size=st->st_size; } } if (fuse_set & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) { struct timespec rightnow; struct timeval newtimes[2]; newtimes[0].tv_sec=0; newtimes[0].tv_usec=0; newtimes[1].tv_sec=inode->mtim.tv_sec; newtimes[1].tv_usec=inode->mtim.tv_nsec / 1000; if (fuse_set & (FUSE_SET_ATTR_ATIME_NOW | FUSE_SET_ATTR_MTIME_NOW)) get_current_time(&rightnow); if (fuse_set & FUSE_SET_ATTR_ATIME) { if (fuse_set & FUSE_SET_ATTR_ATIME_NOW) { st->st_atim.tv_sec = rightnow.tv_sec; st->st_atim.tv_nsec = rightnow.tv_nsec; } newtimes[0].tv_sec=st->st_atim.tv_sec; newtimes[0].tv_usec = st->st_atim.tv_nsec / 1000; } if (fuse_set & FUSE_SET_ATTR_MTIME) { if (fuse_set & FUSE_SET_ATTR_MTIME_NOW) { st->st_mtim.tv_sec = rightnow.tv_sec; st->st_mtim.tv_nsec = rightnow.tv_nsec; } newtimes[1].tv_sec = st->st_mtim.tv_sec; newtimes[1].tv_usec = st->st_mtim.tv_nsec / 1000; } pthread_mutex_lock(&nfs_export->mutex); result=nfs_utimes(nfs_ctx, path, newtimes); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, -result); free_path_pathinfo(&call_info->pathinfo); return; } else { if (fuse_set & FUSE_SET_ATTR_MTIME) { inode->mtim.tv_sec=newtimes[1].tv_sec; inode->mtim.tv_nsec=newtimes[1].tv_usec * 1000; } } } out: st->st_dev=0; st->st_ino=inode->ino; st->st_mode=inode->mode; st->st_nlink=inode->nlink; st->st_uid=inode->uid; st->st_gid=inode->gid; st->st_rdev=inode->rdev; st->st_size=inode->size; st->st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % st->st_blksize == 0) { st->st_blocks = inode->size / st->st_blksize; } else { st->st_blocks = 1 + inode->size / st->st_blksize; } memcpy(&st->st_mtim, &inode->mtim, sizeof(struct timespec)); memcpy(&st->st_ctim, &inode->ctim, sizeof(struct timespec)); st->st_atim.tv_sec=0; st->st_atim.tv_nsec=0; fuse_reply_attr(req, st, fs_options.attr_timeout); free_path_pathinfo(&call_info->pathinfo); }
static void workspace_nfs_fsetattr(fuse_req_t req, struct workspace_fh_struct *fh, struct stat *st, int toset) { struct resource_struct *resource=fh->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; struct nfsfh *nfsfh=(struct nfsfh *) fh->handle.data; int result=0; struct inode_struct *inode=fh->entry->inode; logoutput("workspace_nfs_fsetattr"); if (toset & FUSE_SET_ATTR_MODE) { pthread_mutex_lock(&nfs_export->mutex); result=nfs_fchmod(nfs_ctx, nfsfh, st->st_mode); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, -result); return; } else { inode->mode=st->st_mode; } } if (toset & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID)) { uid_t uid=inode->uid; gid_t gid=inode->gid; if (toset & FUSE_SET_ATTR_UID) uid=st->st_uid; if (toset & FUSE_SET_ATTR_GID) gid=st->st_gid; pthread_mutex_lock(&nfs_export->mutex); result=nfs_fchown(nfs_ctx, nfsfh, uid, gid); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, -result); return; } else { if (toset & FUSE_SET_ATTR_UID) { inode->uid=st->st_uid; } else { st->st_uid=inode->uid; } if (toset & FUSE_SET_ATTR_GID) { inode->gid=st->st_gid; } else { st->st_gid=inode->gid; } } } if (toset & FUSE_SET_ATTR_SIZE) { pthread_mutex_lock(&nfs_export->mutex); result=nfs_ftruncate(nfs_ctx, nfsfh, st->st_size); pthread_mutex_unlock(&nfs_export->mutex); if (result<0) { fuse_reply_err(req, -result); return; } else { inode->size=st->st_size; } } if (toset & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) { logoutput("workspace_mfs_fsetattr: setting times through filehandle not supported yet"); } out: st->st_dev=0; st->st_ino=inode->ino; st->st_mode=inode->mode; st->st_nlink=inode->nlink; st->st_uid=inode->uid; st->st_gid=inode->gid; st->st_rdev=inode->rdev; st->st_size=inode->size; st->st_blksize=_DEFAULT_BLOCKSIZE; if (inode->size % st->st_blksize == 0) { st->st_blocks = inode->size / st->st_blksize; } else { st->st_blocks = 1 + inode->size / st->st_blksize; } memcpy(&st->st_mtim, &inode->mtim, sizeof(struct timespec)); memcpy(&st->st_ctim, &inode->ctim, sizeof(struct timespec)); st->st_atim.tv_sec=0; st->st_atim.tv_nsec=0; fuse_reply_attr(req, st, fs_options.attr_timeout); }