static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; struct file *file; int err; if (!parse_fuse_opt((char *) data, &d)) return -EINVAL; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; #ifndef FUSE_MAINLINE #ifdef KERNEL_2_6 sb->s_export_op = &fuse_export_operations; #endif #endif file = fget(d.fd); if (!file) return -EINVAL; fc = get_conn(file, sb); fput(file); if (IS_ERR(fc)) return PTR_ERR(fc); fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = d.max_read; #ifdef KERNEL_2_6 if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages) fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE; #endif err = -ENOMEM; root = get_root_inode(sb, d.rootmode); if (root == NULL) goto err; sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); goto err; } fuse_send_init(fc); return 0; err: spin_lock(&fuse_lock); fuse_release_conn(fc); spin_unlock(&fuse_lock); return err; }
static errno_t fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata, vfs_context_t context) { int err = 0; int mntopts = 0; bool mounted = false; uint32_t max_read = ~0; size_t len; fuse_device_t fdev = NULL; struct fuse_data *data = NULL; fuse_mount_args fusefs_args; struct vfsstatfs *vfsstatfsp = vfs_statfs(mp); #if M_FUSE4X_ENABLE_BIGLOCK lck_mtx_t *biglock; #endif fuse_trace_printf_vfsop(); if (vfs_isupdate(mp)) { return ENOTSUP; } err = copyin(udata, &fusefs_args, sizeof(fusefs_args)); if (err) { return EINVAL; } /* * Interesting flags that we can receive from mount or may want to * otherwise forcibly set include: * * MNT_ASYNC * MNT_AUTOMOUNTED * MNT_DEFWRITE * MNT_DONTBROWSE * MNT_IGNORE_OWNERSHIP * MNT_JOURNALED * MNT_NODEV * MNT_NOEXEC * MNT_NOSUID * MNT_NOUSERXATTR * MNT_RDONLY * MNT_SYNCHRONOUS * MNT_UNION */ err = ENOTSUP; #if M_FUSE4X_ENABLE_UNSUPPORTED vfs_setlocklocal(mp); #endif /* M_FUSE4X_ENABLE_UNSUPPORTED */ /** Option Processing. **/ if (*fusefs_args.fstypename) { size_t typenamelen = strlen(fusefs_args.fstypename); if (typenamelen > FUSE_FSTYPENAME_MAXLEN) { return EINVAL; } snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s", FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename); } if (!*fusefs_args.fsname) return EINVAL; if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) || (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) { return EINVAL; } if ((fusefs_args.init_timeout > FUSE_MAX_INIT_TIMEOUT) || (fusefs_args.init_timeout < FUSE_MIN_INIT_TIMEOUT)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_SPARSE) { mntopts |= FSESS_SPARSE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) { mntopts |= FSESS_AUTO_CACHE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) { if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { return EINVAL; } mntopts |= FSESS_AUTO_XATTR; } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { mntopts |= FSESS_NATIVE_XATTR; } if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) { mntopts |= FSESS_JAIL_SYMLINKS; } /* * Note that unlike Linux, which keeps allow_root in user-space and * passes allow_other in that case to the kernel, we let allow_root * reach the kernel. The 'if' ordering is important here. */ if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } mntopts |= FSESS_ALLOW_ROOT; } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) { if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } } mntopts |= FSESS_ALLOW_OTHER; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) { mntopts |= FSESS_NO_APPLEDOUBLE; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) { mntopts |= FSESS_NO_APPLEXATTR; } if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) { fsid_t fsid; mount_t other_mp; uint32_t target_dev; target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR, fusefs_args.fsid); fsid.val[0] = target_dev; fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; other_mp = vfs_getvfs(&fsid); if (other_mp != NULL) { return EPERM; } vfsstatfsp->f_fsid.val[0] = target_dev; vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; } else { vfs_getnewfsid(mp); } if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) { mntopts |= FSESS_NO_ATTRCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) { mntopts |= FSESS_NO_READAHEAD; } if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) { mntopts |= FSESS_NO_UBC; } if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) { mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) { if (mntopts & FSESS_NO_VNCACHE) { return EINVAL; } mntopts |= FSESS_NEGATIVE_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) { /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */ if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) { log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n"); return EINVAL; } mntopts |= FSESS_NO_SYNCWRITES; vfs_clearflags(mp, MNT_SYNCHRONOUS); vfs_setflags(mp, MNT_ASYNC); /* We check for this only if we have nosyncwrites in the first place. */ if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) { mntopts |= FSESS_NO_SYNCONCLOSE; } } else { vfs_clearflags(mp, MNT_ASYNC); vfs_setflags(mp, MNT_SYNCHRONOUS); } if (mntopts & FSESS_NO_UBC) { /* If no buffer cache, disallow exec from file system. */ vfs_setflags(mp, MNT_NOEXEC); } vfs_setauthopaque(mp); vfs_setauthopaqueaccess(mp); if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) && (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) { mntopts |= FSESS_DEFAULT_PERMISSIONS; vfs_clearauthopaque(mp); } if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) { mntopts |= FSESS_DEFER_PERMISSIONS; } if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) { mntopts |= FSESS_EXTENDED_SECURITY; vfs_setextendedsecurity(mp); } if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) { vfs_setflags(mp, MNT_LOCAL); } /* done checking incoming option bits */ err = 0; vfs_setfsprivate(mp, NULL); fdev = fuse_device_get(fusefs_args.rdev); if (!fdev) { log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev); return EINVAL; } fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; if (!data) { fuse_lck_mtx_unlock(fdev->mtx); return ENXIO; } #if M_FUSE4X_ENABLE_BIGLOCK biglock = data->biglock; fuse_biglock_lock(biglock); #endif if (data->dataflags & FSESS_MOUNTED) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(biglock); #endif fuse_lck_mtx_unlock(fdev->mtx); return EALREADY; } if (!(data->dataflags & FSESS_OPENED)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENXIO; goto out; } data->dataflags |= FSESS_MOUNTED; OSAddAtomic(1, (SInt32 *)&fuse_mount_count); mounted = true; if (fdata_dead_get(data)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENOTCONN; goto out; } if (!data->daemoncred) { panic("fuse4x: daemon found but identity unknown"); } if (fuse_vfs_context_issuser(context) && kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) { fuse_lck_mtx_unlock(fdev->mtx); err = EPERM; log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n", kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context))); goto out; } data->mp = mp; data->fdev = fdev; data->dataflags |= mntopts; data->daemon_timeout.tv_sec = fusefs_args.daemon_timeout; data->daemon_timeout.tv_nsec = 0; if (data->daemon_timeout.tv_sec) { data->daemon_timeout_p = &(data->daemon_timeout); } else { data->daemon_timeout_p = NULL; } data->init_timeout.tv_sec = fusefs_args.init_timeout; data->init_timeout.tv_nsec = 0; data->max_read = max_read; data->fssubtype = fusefs_args.fssubtype; data->mountaltflags = fusefs_args.altflags; data->noimplflags = (uint64_t)0; data->blocksize = fuse_round_size(fusefs_args.blocksize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); data->iosize = fuse_round_size(fusefs_args.iosize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); if (data->iosize < data->blocksize) { data->iosize = data->blocksize; } data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE; copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname, MNAMELEN - 1, &len); bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len); copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len); bzero(data->volname + len, MAXPATHLEN - len); /* previous location of vfs_setioattr() */ vfs_setfsprivate(mp, data); fuse_lck_mtx_unlock(fdev->mtx); /* Send a handshake message to the daemon. */ fuse_send_init(data, context); struct vfs_attr vfs_attr; VFSATTR_INIT(&vfs_attr); // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s err = fuse_vfsop_getattr(mp, &vfs_attr, context); if (!err) { vfsstatfsp->f_bsize = vfs_attr.f_bsize; vfsstatfsp->f_iosize = vfs_attr.f_iosize; vfsstatfsp->f_blocks = vfs_attr.f_blocks; vfsstatfsp->f_bfree = vfs_attr.f_bfree; vfsstatfsp->f_bavail = vfs_attr.f_bavail; vfsstatfsp->f_bused = vfs_attr.f_bused; vfsstatfsp->f_files = vfs_attr.f_files; vfsstatfsp->f_ffree = vfs_attr.f_ffree; // vfsstatfsp->f_fsid already handled above vfsstatfsp->f_owner = kauth_cred_getuid(data->daemoncred); vfsstatfsp->f_flags = vfs_flags(mp); // vfsstatfsp->f_fstypename already handled above // vfsstatfsp->f_mntonname handled elsewhere // vfsstatfsp->f_mnfromname already handled above vfsstatfsp->f_fssubtype = data->fssubtype; } if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) { vfsstatfsp->f_bsize = data->blocksize; } else { //data->blocksize = vfsstatfsp->f_bsize; } if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) { vfsstatfsp->f_iosize = data->iosize; } else { //data->iosize = (uint32_t)vfsstatfsp->f_iosize; vfsstatfsp->f_iosize = data->iosize; } out: if (err) { vfs_setfsprivate(mp, NULL); fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* again */ if (mounted) { OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); } if (data) { data->dataflags &= ~FSESS_MOUNTED; if (!(data->dataflags & FSESS_OPENED)) { #if M_FUSE4X_ENABLE_BIGLOCK assert(biglock == data->biglock); fuse_biglock_unlock(biglock); #endif fuse_device_close_final(fdev); /* data is gone now */ } } fuse_lck_mtx_unlock(fdev->mtx); } else { vnode_t fuse_rootvp = NULLVP; err = fuse_vfsop_root(mp, &fuse_rootvp, context); if (err) { goto out; /* go back and follow error path */ } err = vnode_ref(fuse_rootvp); (void)vnode_put(fuse_rootvp); if (err) { goto out; /* go back and follow error path */ } else { struct vfsioattr ioattr; vfs_ioattr(mp, &ioattr); ioattr.io_devblocksize = data->blocksize; vfs_setioattr(mp, &ioattr); } } #if M_FUSE4X_ENABLE_BIGLOCK fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* ...and again */ if(data) { assert(data->biglock == biglock); fuse_biglock_unlock(biglock); } fuse_lck_mtx_unlock(fdev->mtx); #endif return err; }
static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; struct file *file; struct dentry *root_dentry; struct fuse_req *init_req; int err; int is_bdev = sb->s_bdev != NULL; if (sb->s_flags & MS_MANDLOCK) return -EINVAL; if (!parse_fuse_opt((char *) data, &d, is_bdev)) return -EINVAL; if (is_bdev) { #ifdef CONFIG_BLOCK if (!sb_set_blocksize(sb, d.blksize)) return -EINVAL; #endif } else { sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_export_op = &fuse_export_operations; file = fget(d.fd); if (!file) return -EINVAL; if (file->f_op != &fuse_dev_operations) return -EINVAL; fc = new_conn(sb); if (!fc) return -ENOMEM; fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; fc->max_read = max_t(unsigned, 4096, d.max_read); /* Used by get_root_inode() */ sb->s_fs_info = fc; err = -ENOMEM; root = get_root_inode(sb, d.rootmode); if (!root) goto err; root_dentry = d_alloc_root(root); if (!root_dentry) { iput(root); goto err; } init_req = fuse_request_alloc(); if (!init_req) goto err_put_root; if (is_bdev) { fc->destroy_req = fuse_request_alloc(); if (!fc->destroy_req) goto err_free_init_req; } mutex_lock(&fuse_mutex); err = -EINVAL; if (file->private_data) goto err_unlock; err = fuse_ctl_add_conn(fc); if (err) goto err_unlock; list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; fc->connected = 1; file->private_data = fuse_conn_get(fc); mutex_unlock(&fuse_mutex); /* * atomic_dec_and_test() in fput() provides the necessary * memory barrier for file->private_data to be visible on all * CPUs after this */ fput(file); fuse_send_init(fc, init_req); return 0; err_unlock: mutex_unlock(&fuse_mutex); err_free_init_req: fuse_request_free(init_req); err_put_root: dput(root_dentry); err: fput(file); fuse_conn_put(fc); return err; }