/* This, and hence fuse_reply, are called on a different thread to the one the * request came in on. This doesn't seem to matter. */ static void chunk_done (void *read_ctxt, int rc, size_t size) { read_context_t *ctxt = (read_context_t *)read_ctxt; if (size != ctxt->size) { read_trace("bytes read != request size => EOF / error\n"); } /* As a result of this read() operation we can say certain things * about the direntry. These professions can be made with confidence * because we've just performed an actual network transaction, so * our information is "live": * - If we actually read data from the file then it definitely still * exists, now. * - If we tried to read from it and found it didn't exist, then it * definitely doesn't exist any more. * - Any other error doesn't give enough info. */ if (!rc) { direntry_still_exists(ctxt->de); } else if (rc == ENOENT) { direntry_no_longer_exists(ctxt->de); } if (!rc) { assert(!fuse_reply_buf(ctxt->req, ctxt->buf, size)); } else { assert(!fuse_reply_err(ctxt->req, rc)); } direntry_delete(CALLER_INFO ctxt->de); free(ctxt->buf); free(read_ctxt); }
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e) { struct fuse_entry_out arg; size_t size = req->f->conn.proto_minor < 9 ? FUSE_COMPAT_ENTRY_OUT_SIZE : sizeof(arg); /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant negative entry */ if (!e->ino && req->f->conn.proto_minor < 4) return fuse_reply_err(req, ENOENT); memset(&arg, 0, sizeof(arg)); fill_entry(&arg, e); return send_reply_ok(req, &arg, size); }
static void sqfs_ll_op_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size) { sqfs_ll_i lli; char *buf; int ferr; if (sqfs_ll_iget(req, &lli, ino)) return; buf = NULL; if (size && !(buf = malloc(size))) { fuse_reply_err(req, ENOMEM); return; } ferr = sqfs_listxattr(&lli.ll->fs, &lli.inode, buf, &size); if (ferr) { fuse_reply_err(req, ferr); } else if (buf) { fuse_reply_buf(req, buf, size); } else { fuse_reply_xattr(req, size); } free(buf); }
static void sqfs_ll_op_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi) { sqfs_ll *ll = fuse_req_userdata(req); sqfs_inode *inode = (sqfs_inode*)(intptr_t)fi->fh; sqfs_err err = SQFS_OK; off_t osize; char *buf = malloc(size); if (!buf) { fuse_reply_err(req, ENOMEM); return; } osize = size; err = sqfs_read_range(&ll->fs, inode, off, &osize, buf); if (err) { fuse_reply_err(req, EIO); } else if (osize == 0) { /* EOF */ fuse_reply_buf(req, NULL, 0); } else { fuse_reply_buf(req, buf, osize); } free(buf); }
static void do_mknod(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_mknod_in *arg = (struct fuse_mknod_in *) inarg; char *name = PARAM(arg); if (req->f->conn.proto_minor >= 12) req->ctx.umask = arg->umask; else name = (char *) inarg + FUSE_COMPAT_MKNOD_IN_SIZE; if (req->f->op.mknod) req->f->op.mknod(req, nodeid, name, arg->mode, arg->rdev); else fuse_reply_err(req, ENOSYS); }
static void lfs_ll_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi){ (void) fi; if (ino != 1) fuse_reply_err(req, ENOTDIR); else { struct dirbuf b; memset(&b, 0, sizeof(b)); dirbuf_add(req, &b, ".", 1); dirbuf_add(req, &b, "..", 1); dirbuf_add(req, &b, hello_name, 2); reply_buf_limited(req, b.p, b.size, off, size); free(b.p); } }
static void compiz_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) { CompDisplay *d = (CompDisplay *)fuse_req_userdata(req); FuseInode *inode; struct fuse_entry_param e; inode = fuseFindInode(inodes, parent, DIR_MASK); if (!inode) { fuse_reply_err(req, ENOENT); return; } if (!inode->child || !(inode->type & CONST_DIR_MASK)) fuseUpdateInode(d, inode); inode = fuseLookupChild(inode, name); if (!inode) { fuse_reply_err(req, ENOENT); return; } memset(&e, 0, sizeof (e)); e.attr_timeout = 1.0; e.entry_timeout = 1.0; e.ino = inode->ino; fuseInodeStat(d, inode, &e.attr); fuse_reply_entry(req, &e); }
static void do_write(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_write_in *arg = (struct fuse_write_in *) inarg; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); fi.fh = arg->fh; fi.fh_old = fi.fh; fi.writepage = arg->write_flags & 1; if (req->f->op.write) req->f->op.write(req, nodeid, PARAM(arg), arg->size, arg->offset, &fi); else fuse_reply_err(req, ENOSYS); }
static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_release_in *arg = (struct fuse_release_in *) inarg; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); fi.flags = arg->flags; fi.fh = arg->fh; fi.fh_old = fi.fh; if (req->f->op.releasedir) req->f->op.releasedir(req, nodeid, &fi); else fuse_reply_err(req, 0); }
void blob_ll_ioctl(fuse_req_t req, fuse_ino_t ino, int cmd, void *arg, struct fuse_file_info *fi, unsigned flagsp, const void *in_buf, size_t in_bufsz, size_t out_bufszp) { blob_mirror_t *lm = (blob_mirror_t *)fi->fh; switch (cmd) { case COMMIT: fuse_reply_ioctl(req, lm->commit() ? 0 : -1, NULL, 0); break; case CLONE_AND_COMMIT: fuse_reply_ioctl(req, lm->clone_and_commit() ? 0 : -1, NULL, 0); break; default: fuse_reply_err(req, EINVAL); } }
static void do_getlk(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_lk_in *arg = (struct fuse_lk_in *) inarg; struct fuse_file_info fi; struct flock flock; memset(&fi, 0, sizeof(fi)); fi.fh = arg->fh; fi.lock_owner = arg->owner; convert_fuse_file_lock(&arg->lk, &flock); if (req->f->op.getlk) req->f->op.getlk(req, nodeid, &fi, &flock); else fuse_reply_err(req, ENOSYS); }
static void hello_ll_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) { struct fuse_entry_param e; if (parent != 1 || strcmp(name, hello_name) != 0) fuse_reply_err(req, ENOENT); else { memset(&e, 0, sizeof(e)); e.ino = 2; e.attr_timeout = 1.0; e.entry_timeout = 1.0; hello_stat(e.ino, &e.attr); fuse_reply_entry(req, &e); } }
static void serve_getattr(fuse_req_t req, fuse_ino_t fuse_ino, struct fuse_file_info * fi) { Dprintf("%s(ino = %lu)\n", __FUNCTION__, fuse_ino); struct stat stbuf; int r; (void) fi; memset(&stbuf, 0, sizeof(stbuf)); r = fill_stat(reqmount(req), fusecfsino(req, fuse_ino), fuse_ino, &stbuf); if (r < 0) r = fuse_reply_err(req, -r); else r = fuse_reply_attr(req, &stbuf, STDTIMEOUT); fuse_reply_assert(!r); }
void hsx_fuse_open (fuse_req_t req, fuse_ino_t ino _U_, struct fuse_file_info *fi) { int err=0; uint64_t fh =(uint64_t) malloc (FI_FH_LEN); DEBUG_IN ("ino : (%lu) fi->flags:%d",ino, fi->flags); if (!fh){ err = ENOMEM; ERR ("malloc failed:%d\n",err); fuse_reply_err(req, err); } else { fi->fh = fh; fuse_reply_open(req, fi); } DEBUG_OUT(" fh:%lu",fh); }
int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count) { int res; struct iovec *padded_iov; padded_iov = malloc((count + 1) * sizeof(struct iovec)); if (padded_iov == NULL) return fuse_reply_err(req, -ENOMEM); memcpy(padded_iov + 1, iov, count * sizeof(struct iovec)); count++; res = send_reply_iov(req, 0, padded_iov, count); free(padded_iov); return res; }
/* * When the device is created in QEMU it gets initialised here and added to the device linked list. */ static void vhost_net_open(fuse_req_t req, struct fuse_file_info *fi) { struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi); int err = 0; err = ops->new_device(ctx); if (err == -1) { fuse_reply_err(req, EPERM); return; } fi->fh = err; RTE_LOG(INFO, CONFIG, "(%"PRIu64") Device configuration started\n", fi->fh); fuse_reply_open(req, fi); }
static void do_flush(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_flush_in *arg = (struct fuse_flush_in *) inarg; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); fi.fh = arg->fh; fi.fh_old = fi.fh; fi.flush = 1; if (req->f->conn.proto_minor >= 7) fi.lock_owner = arg->lock_owner; if (req->f->op.flush) req->f->op.flush(req, nodeid, &fi); else fuse_reply_err(req, ENOSYS); }
static void overlay_releasedir(fuse_req_t req, struct workspace_dh_struct *dh) { struct overlay_readdir_struct *overlay_readdir=(struct overlay_readdir_struct *)dh->handle.data; struct directory_struct *directory=NULL; struct timespec synctime; unsigned int error=0; unsigned int mode=0; logoutput("RELEASEDIR"); directory=dh->directory; if (overlay_readdir) { mode=overlay_readdir->mode; close_readdir(overlay_readdir); if (overlay_readdir->fd>0) { close(overlay_readdir->fd); overlay_readdir->fd=0; } free(overlay_readdir); overlay_readdir=NULL; } fuse_reply_err(req, 0); if (directory) { /* when synced with backend and there were entries at start test these are not synced */ if ((dh->mode & _WORKSPACE_READDIR_MODE_NONEMPTY) && (mode & (_FW_READDIR_MODE_SIMPLE | _FW_READDIR_MODE_FULL))) remove_old_entries(dh->object, directory, &dh->synctime); memcpy(&directory->synctime, &dh->synctime, sizeof(struct timespec)); } clean_pathcache(); }
int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e) { struct fuse_entry_out arg; /* before ABI 7.4 e->ino == 0 was invalid, only ENOENT meant negative entry */ if (!e->ino && req->f->conn.proto_minor < 4) return fuse_reply_err(req, ENOENT); memset(&arg, 0, sizeof(arg)); fill_entry(&arg, e); #ifdef POSIXACLS return send_reply_ok(req, &arg, (req->f->conn.proto_minor >= 12 ? sizeof(arg) : FUSE_COMPAT_ENTRY_OUT_SIZE)); #else return send_reply_ok(req, &arg, sizeof(arg)); #endif }
static void do_read(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_read_in *arg = (struct fuse_read_in *) inarg; if (req->f->op.read) { struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); fi.fh = arg->fh; fi.fh_old = fi.fh; if (req->f->conn.proto_minor >= 9) { fi.lock_owner = arg->lock_owner; fi.flags = arg->flags; } req->f->op.read(req, nodeid, arg->size, arg->offset, &fi); } else fuse_reply_err(req, ENOSYS); }
static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_ioctl_in *arg = (struct fuse_ioctl_in *) inarg; unsigned int flags = arg->flags; void *in_buf = arg->in_size ? PARAM(arg) : NULL; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); fi.fh = arg->fh; fi.fh_old = fi.fh; if (req->f->op.ioctl) req->f->op.ioctl(req, nodeid, arg->cmd, (void *)(uintptr_t)arg->arg, &fi, flags, in_buf, arg->in_size, arg->out_size); else fuse_reply_err(req, ENOSYS); }
static void serve_symlink(fuse_req_t req, const char * link, fuse_ino_t parent, const char * local_name) { Dprintf("%s(parent = %lu, local_name = \"%s\", link = \"%s\")\n", __FUNCTION__, parent, local_name, link); CFS_t * cfs = reqcfs(req); inode_t cfs_parent = fusecfsino(req, parent); int mode = S_IFLNK | (S_IRWXU | S_IRWXG | S_IRWXO); fuse_metadata_t fusemd = { .ctx = fuse_req_ctx(req), .mode = mode, .type = TYPE_SYMLINK, .type_info.symlink = { .link = link, .link_len = strlen(link) } }; metadata_set_t initialmd = { .get = fuse_get_metadata, .arg = &fusemd }; inode_t cfs_ino; fdesc_t * fdesc; int r; struct fuse_entry_param e; if (!feature_supported(cfs, FSTITCH_FEATURE_SYMLINK)) { r = -ENOSYS; goto error; } r = CALL(cfs, create, cfs_parent, local_name, 0, &initialmd, &fdesc, &cfs_ino); if (r < 0) goto error; assert(cfs_ino != INODE_NONE); fdesc->common->parent = cfs_parent; r = CALL(cfs, close, fdesc); assert(r >= 0); fdesc = NULL; r = init_fuse_entry(reqmount(req), cfs_parent, cfs_ino, cfsfuseino(req, cfs_ino), &e); if (r < 0) { (void) CALL(reqmount(req)->cfs, unlink, parent, local_name); goto error; } r = fuse_reply_entry(req, &e); fuse_reply_assert(!r); return; error: r = fuse_reply_err(req, -r); fuse_reply_assert(!r); }
static void sqfs_ll_op_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off, struct fuse_file_info *fi) { sqfs_err sqerr; sqfs_dir dir; sqfs_name namebuf; sqfs_dir_entry entry; size_t esize; struct stat st; char *buf = NULL, *bufpos = NULL; sqfs_ll_i *lli = (sqfs_ll_i*)(intptr_t)fi->fh; int err = 0; if (sqfs_dir_open(&lli->ll->fs, &lli->inode, &dir, off)) err = EINVAL; if (!err && !(bufpos = buf = malloc(size))) err = ENOMEM; if (!err) { memset(&st, 0, sizeof(st)); sqfs_dentry_init(&entry, namebuf); while (sqfs_dir_next(&lli->ll->fs, &dir, &entry, &sqerr)) { st.st_ino = lli->ll->ino_fuse_num(lli->ll, &entry); st.st_mode = sqfs_dentry_mode(&entry); esize = sqfs_ll_add_direntry(req, bufpos, size, sqfs_dentry_name(&entry), &st, sqfs_dentry_next_offset(&entry)); if (esize > size) break; bufpos += esize; size -= esize; } if (sqerr) err = EIO; } if (err) fuse_reply_err(req, err); else fuse_reply_buf(req, buf, bufpos - buf); free(buf); }
static void workspace_nfs_fsync(fuse_req_t req, int datasync, struct workspace_fh_struct *fh) { struct resource_struct *resource=fh->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; struct nfsfh *nfsfh=(struct nfsfh *) fh->handle.data; int result=0; logoutput("workspace_nfs_fsync"); pthread_mutex_lock(&nfs_export->mutex); result=nfs_close(nfs_ctx, nfsfh); pthread_mutex_unlock(&nfs_export->mutex); fuse_reply_err(req, abs(result)); }
void mfs_meta_opendir(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi) { dirbuf *dirinfo; if (ino==META_ROOT_INODE || ino==META_TRASH_INODE || ino==META_UNDEL_INODE || ino==META_RESERVED_INODE) { dirinfo = malloc(sizeof(dirbuf)); pthread_mutex_init(&(dirinfo->lock),NULL); dirinfo->p = NULL; dirinfo->size = 0; dirinfo->wasread = 0; fi->fh = (unsigned long)dirinfo; if (fuse_reply_open(req,fi) == -ENOENT) { fi->fh = 0; pthread_mutex_destroy(&(dirinfo->lock)); free(dirinfo->p); free(dirinfo); } } else { fuse_reply_err(req, ENOTDIR); } }
static void do_release(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_release_in *arg = (struct fuse_release_in *) inarg; struct fuse_file_info fi; memset(&fi, 0, sizeof(fi)); fi.flags = arg->flags; fi.fh = arg->fh; fi.fh_old = fi.fh; if (req->f->conn.proto_minor >= 8) { fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0; fi.lock_owner = arg->lock_owner; } if (req->f->op.release) req->f->op.release(req, nodeid, &fi); else fuse_reply_err(req, 0); }
static void serve_statfs(fuse_req_t req) { Dprintf("%s()\n", __FUNCTION__); struct statvfs st; // For more info, see: man 2 statvfs int r; r = CALL(reqcfs(req), get_metadata, 0, FSTITCH_FEATURE_BLOCKSIZE, sizeof(st.f_frsize), &st.f_frsize); if (r < 0) goto serve_statfs_err; else if (sizeof(st.f_bsize) != r) { r = -1; goto serve_statfs_err; } st.f_bsize = st.f_frsize; r = CALL(reqcfs(req), get_metadata, 0, FSTITCH_FEATURE_DEVSIZE, sizeof(st.f_blocks), &st.f_blocks); if (sizeof(st.f_blocks) != r) st.f_blocks = st.f_bfree = st.f_bavail = 0; else { r = CALL(reqcfs(req), get_metadata, 0, FSTITCH_FEATURE_FREESPACE, sizeof(st.f_bavail), &st.f_bavail); if (sizeof(st.f_bavail) != r) st.f_bfree = st.f_bavail = 0; else st.f_bfree = st.f_bavail; } // TODO - add lfs features for these guys st.f_files = 0; st.f_ffree = st.f_favail = 0; st.f_flag = 0; st.f_namemax = NAME_MAX; r = fuse_reply_statfs(req, &st); fuse_reply_assert(!r); return; serve_statfs_err: r = fuse_reply_err(req, -r); fuse_reply_assert(!r); return; }
static void workspace_nfs_opendir(fuse_req_t req, struct workspace_dh_struct *dh) { struct resource_struct *resource=dh->object->resource; struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data; struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data; char *path=dh->pathinfo.path + dh->relpath; unsigned int error=0; struct directory_struct *directory=dh->directory; struct nfsdir *dir=NULL; int result=0; if (strlen(path)==0) path=(char *) rootpath; logoutput("workspace_nfs_opendir: path %s", path); pthread_mutex_lock(&nfs_export->mutex); result=nfs_opendir(nfs_ctx, path, &dir); pthread_mutex_unlock(&nfs_export->mutex); if (result==0) { dh->handle.data = (void *) dir; fuse_reply_open(req, dh->fi); free_path_pathinfo(&dh->pathinfo); return; } else { error=abs(result); } logoutput("workspace_opendir, error %i", error); fuse_reply_err(req, error); free_path_pathinfo(&dh->pathinfo); }
void blob_ll_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) { struct fuse_entry_param e; boost::uint32_t id = ino_id(parent), ver = 0; DBG("lookup name = " << name); if ((parent == 1 && sscanf(name, "blob-%d", &id) != 1) || (ino_id(parent) != 0 && ino_version(parent) == 0 && sscanf(name, "version-%d", &ver) != 1) || (ino_id(parent) != 0 && ino_version(parent) != 0)) fuse_reply_err(req, ENOENT); else { memset(&e, 0, sizeof(e)); e.ino = build_ino(id, ver); e.attr_timeout = 1.0; e.entry_timeout = 1.0; blob_stat(e.ino, &e.attr); fuse_reply_entry(req, &e); } }
static void do_create(fuse_req_t req, fuse_ino_t nodeid, const void *inarg) { struct fuse_create_in *arg = (struct fuse_create_in *) inarg; if (req->f->op.create) { struct fuse_file_info fi; char *name = PARAM(arg); memset(&fi, 0, sizeof(fi)); fi.flags = arg->flags; if (req->f->conn.proto_minor >= 12) req->ctx.umask = arg->umask; else name = (char *) inarg + sizeof(struct fuse_open_in); req->f->op.create(req, nodeid, name, arg->mode, &fi); } else fuse_reply_err(req, ENOSYS); }