int fuse_vncache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp) { #if M_OSXFUSE_ENABLE_BIG_LOCK /* * Make sure that biglock is actually held by the thread calling us before * trying to unlock it. fuse_vncache_lookup is called by notification * handlers that do not hold biglock. Trying to unlock it in this case would * result in a kernel panic. */ struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp)); bool biglock_locked = fuse_biglock_have_lock(data->biglock); if (biglock_locked) { fuse_biglock_unlock(data->biglock); } #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ int ret = cache_lookup(dvp, vpp, cnp); #if M_OSXFUSE_ENABLE_BIG_LOCK if (biglock_locked) { fuse_biglock_lock(data->biglock); } #endif #if FUSE_TRACE_VNCACHE IOLog("osxfuse: cache lookup ret=%d, dvp=%p, *vpp=%p, %s\n", ret, dvp, *vpp, cnp->cn_nameptr); #endif return ret; }
static int fuse_sync_callback(vnode_t vp, void *cargs) { int type; struct fuse_sync_cargs *args; struct fuse_vnode_data *fvdat; struct fuse_filehandle *fufh; struct fuse_data *data; mount_t mp; if (!vnode_hasdirtyblks(vp)) { return VNODE_RETURNED; } mp = vnode_mount(vp); if (fuse_isdeadfs_mp(mp)) { return VNODE_RETURNED_DONE; } data = fuse_get_mpdata(mp); if (!fuse_implemented(data, (vnode_isdir(vp)) ? FSESS_NOIMPLBIT(FSYNCDIR) : FSESS_NOIMPLBIT(FSYNC))) { return VNODE_RETURNED; } args = (struct fuse_sync_cargs *)cargs; fvdat = VTOFUD(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif cluster_push(vp, 0); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { (void)fuse_internal_fsync_fh(vp, args->context, fufh, FUSE_OP_FOREGROUNDED); } } /* * In general: * * - can use vnode_isinuse() if the need be * - vnode and UBC are in lock-step * - note that umount will call ubc_sync_range() */ return VNODE_RETURNED; }
static errno_t fuse_vfsop_sync(mount_t mp, int waitfor, vfs_context_t context) { uint64_t mntflags; struct fuse_sync_cargs args; int allerror = 0; fuse_trace_printf_vfsop(); mntflags = vfs_flags(mp); if (fuse_isdeadfs_mp(mp)) { return 0; } if (vfs_isupdate(mp)) { return 0; } if (vfs_isrdonly(mp)) { return EROFS; // should panic!? } /* * Write back each (modified) fuse node. */ args.context = context; args.waitfor = waitfor; args.error = 0; #if M_FUSE4X_ENABLE_BIGLOCK struct fuse_data *data = fuse_get_mpdata(mp); fuse_biglock_unlock(data->biglock); #endif vnode_iterate(mp, 0, fuse_sync_callback, (void *)&args); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif if (args.error) { allerror = args.error; } /* * For other types of stale file system information, such as: * * - fs control info * - quota information * - modified superblock */ return allerror; }
__inline__ int fuse_vncache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp) { #if M_OSXFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_OSXFUSE_ENABLE_HUGE_LOCK struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp)); fuse_biglock_unlock(data->biglock); #endif int ret = cache_lookup(dvp, vpp, cnp); #if M_OSXFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_OSXFUSE_ENABLE_HUGE_LOCK fuse_biglock_lock(data->biglock); #endif #if FUSE_TRACE_VNCACHE IOLog("OSXFUSE: cache lookup ret=%d, dvp=%p, *vpp=%p, %s\n", ret, dvp, *vpp, cnp->cn_nameptr); #endif return ret; }
void fuse_vncache_purge(vnode_t vp) { #if FUSE_TRACE_VNCACHE IOLog("osxfuse: cache purge vp=%p\n", vp); #endif #if M_OSXFUSE_ENABLE_BIG_LOCK struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); bool biglock_locked = fuse_biglock_have_lock(data->biglock); if (biglock_locked) { fuse_biglock_unlock(data->biglock); } #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ cache_purge(vp); #if M_OSXFUSE_ENABLE_BIG_LOCK if (biglock_locked) { fuse_biglock_lock(data->biglock); } #endif }
void fuse_vncache_enter(vnode_t dvp, vnode_t vp, struct componentname *cnp) { #if FUSE_TRACE_VNCACHE IOLog("osxfuse: cache enter dvp=%p, vp=%p, %s\n", dvp, vp, cnp->cn_nameptr); #endif #if M_OSXFUSE_ENABLE_BIG_LOCK struct fuse_data *data = fuse_get_mpdata(vnode_mount(dvp)); bool biglock_locked = fuse_biglock_have_lock(data->biglock); if (biglock_locked) { fuse_biglock_unlock(data->biglock); } #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ cache_enter(dvp, vp, cnp); #if M_OSXFUSE_ENABLE_BIG_LOCK if (biglock_locked) { fuse_biglock_lock(data->biglock); } #endif }
static errno_t fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context) { int err = 0; int flags = 0; fuse_device_t fdev; struct fuse_data *data; struct fuse_dispatcher fdi; vnode_t fuse_rootvp = NULLVP; fuse_trace_printf_vfsop(); if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; } data = fuse_get_mpdata(mp); if (!data) { panic("fuse4x: no mount private data in vfs_unmount"); } #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fdev = data->fdev; if (fdata_dead_get(data)) { /* * If the file system daemon is dead, it's pointless to try to do * any unmount-time operations that go out to user space. Therefore, * we pretend that this is a force unmount. However, this isn't of much * use. That's because if any non-root vnode is in use, the vflush() * that the kernel does before calling our VFS_UNMOUNT will fail * if the original unmount wasn't forcible already. That earlier * vflush is called with SKIPROOT though, so it wouldn't bail out * on the root vnode being in use. * * If we want, we could set FORCECLOSE here so that a non-forced * unmount will be "upgraded" to a forced unmount if the root vnode * is busy (you are cd'd to the mount point, for example). It's not * quite pure to do that though. * * flags |= FORCECLOSE; * log("fuse4x: forcing unmount on a dead file system\n"); */ } else if (!(data->dataflags & FSESS_INITED)) { flags |= FORCECLOSE; log("fuse4x: forcing unmount on not-yet-alive file system\n"); fdata_set_dead(data); } fuse_rootvp = data->rootvp; fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, flags); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif err = vflush(mp, fuse_rootvp, flags); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fuse_trace_printf("%s: Done.\n", __FUNCTION__); if (err) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif return err; } if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif return EBUSY; } if (fdata_dead_get(data)) { goto alreadydead; } fdisp_init(&fdi, 0 /* no data to send along */); fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context); fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__); err = fdisp_wait_answ(&fdi); fuse_trace_printf("%s: Reply received.\n", __FUNCTION__); if (!err) { fuse_ticket_drop(fdi.tick); } /* * Note that dounmount() signals a VQ_UNMOUNT VFS event. */ fdata_set_dead(data); alreadydead: fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */ #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fuse_trace_printf("%s: Done.\n", __FUNCTION__); data->rootvp = NULLVP; fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif (void)vflush(mp, NULLVP, FORCECLOSE); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_lock(data->biglock); #endif fuse_trace_printf("%s: Done.\n", __FUNCTION__); fuse_lck_mtx_lock(fdev->mtx); vfs_setfsprivate(mp, NULL); data->dataflags &= ~FSESS_MOUNTED; OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(data->biglock); #endif if (!(data->dataflags & FSESS_OPENED)) { /* fdev->data was left for us to clean up */ fuse_device_close_final(fdev); /* fdev->data is gone now */ } fuse_lck_mtx_unlock(fdev->mtx); return 0; }
static errno_t fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata, vfs_context_t context) { int err = 0; int mntopts = 0; bool mounted = false; uint32_t max_read = ~0; size_t len; fuse_device_t fdev = NULL; struct fuse_data *data = NULL; fuse_mount_args fusefs_args; struct vfsstatfs *vfsstatfsp = vfs_statfs(mp); #if M_FUSE4X_ENABLE_BIGLOCK lck_mtx_t *biglock; #endif fuse_trace_printf_vfsop(); if (vfs_isupdate(mp)) { return ENOTSUP; } err = copyin(udata, &fusefs_args, sizeof(fusefs_args)); if (err) { return EINVAL; } /* * Interesting flags that we can receive from mount or may want to * otherwise forcibly set include: * * MNT_ASYNC * MNT_AUTOMOUNTED * MNT_DEFWRITE * MNT_DONTBROWSE * MNT_IGNORE_OWNERSHIP * MNT_JOURNALED * MNT_NODEV * MNT_NOEXEC * MNT_NOSUID * MNT_NOUSERXATTR * MNT_RDONLY * MNT_SYNCHRONOUS * MNT_UNION */ err = ENOTSUP; #if M_FUSE4X_ENABLE_UNSUPPORTED vfs_setlocklocal(mp); #endif /* M_FUSE4X_ENABLE_UNSUPPORTED */ /** Option Processing. **/ if (*fusefs_args.fstypename) { size_t typenamelen = strlen(fusefs_args.fstypename); if (typenamelen > FUSE_FSTYPENAME_MAXLEN) { return EINVAL; } snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s", FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename); } if (!*fusefs_args.fsname) return EINVAL; if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) || (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) { return EINVAL; } if ((fusefs_args.init_timeout > FUSE_MAX_INIT_TIMEOUT) || (fusefs_args.init_timeout < FUSE_MIN_INIT_TIMEOUT)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_SPARSE) { mntopts |= FSESS_SPARSE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) { mntopts |= FSESS_AUTO_CACHE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) { if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { return EINVAL; } mntopts |= FSESS_AUTO_XATTR; } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { mntopts |= FSESS_NATIVE_XATTR; } if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) { mntopts |= FSESS_JAIL_SYMLINKS; } /* * Note that unlike Linux, which keeps allow_root in user-space and * passes allow_other in that case to the kernel, we let allow_root * reach the kernel. The 'if' ordering is important here. */ if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } mntopts |= FSESS_ALLOW_ROOT; } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) { if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } } mntopts |= FSESS_ALLOW_OTHER; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) { mntopts |= FSESS_NO_APPLEDOUBLE; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) { mntopts |= FSESS_NO_APPLEXATTR; } if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) { fsid_t fsid; mount_t other_mp; uint32_t target_dev; target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR, fusefs_args.fsid); fsid.val[0] = target_dev; fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; other_mp = vfs_getvfs(&fsid); if (other_mp != NULL) { return EPERM; } vfsstatfsp->f_fsid.val[0] = target_dev; vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; } else { vfs_getnewfsid(mp); } if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) { mntopts |= FSESS_NO_ATTRCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) { mntopts |= FSESS_NO_READAHEAD; } if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) { mntopts |= FSESS_NO_UBC; } if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) { mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) { if (mntopts & FSESS_NO_VNCACHE) { return EINVAL; } mntopts |= FSESS_NEGATIVE_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) { /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */ if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) { log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n"); return EINVAL; } mntopts |= FSESS_NO_SYNCWRITES; vfs_clearflags(mp, MNT_SYNCHRONOUS); vfs_setflags(mp, MNT_ASYNC); /* We check for this only if we have nosyncwrites in the first place. */ if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) { mntopts |= FSESS_NO_SYNCONCLOSE; } } else { vfs_clearflags(mp, MNT_ASYNC); vfs_setflags(mp, MNT_SYNCHRONOUS); } if (mntopts & FSESS_NO_UBC) { /* If no buffer cache, disallow exec from file system. */ vfs_setflags(mp, MNT_NOEXEC); } vfs_setauthopaque(mp); vfs_setauthopaqueaccess(mp); if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) && (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) { mntopts |= FSESS_DEFAULT_PERMISSIONS; vfs_clearauthopaque(mp); } if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) { mntopts |= FSESS_DEFER_PERMISSIONS; } if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) { mntopts |= FSESS_EXTENDED_SECURITY; vfs_setextendedsecurity(mp); } if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) { vfs_setflags(mp, MNT_LOCAL); } /* done checking incoming option bits */ err = 0; vfs_setfsprivate(mp, NULL); fdev = fuse_device_get(fusefs_args.rdev); if (!fdev) { log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev); return EINVAL; } fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; if (!data) { fuse_lck_mtx_unlock(fdev->mtx); return ENXIO; } #if M_FUSE4X_ENABLE_BIGLOCK biglock = data->biglock; fuse_biglock_lock(biglock); #endif if (data->dataflags & FSESS_MOUNTED) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(biglock); #endif fuse_lck_mtx_unlock(fdev->mtx); return EALREADY; } if (!(data->dataflags & FSESS_OPENED)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENXIO; goto out; } data->dataflags |= FSESS_MOUNTED; OSAddAtomic(1, (SInt32 *)&fuse_mount_count); mounted = true; if (fdata_dead_get(data)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENOTCONN; goto out; } if (!data->daemoncred) { panic("fuse4x: daemon found but identity unknown"); } if (fuse_vfs_context_issuser(context) && kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) { fuse_lck_mtx_unlock(fdev->mtx); err = EPERM; log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n", kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context))); goto out; } data->mp = mp; data->fdev = fdev; data->dataflags |= mntopts; data->daemon_timeout.tv_sec = fusefs_args.daemon_timeout; data->daemon_timeout.tv_nsec = 0; if (data->daemon_timeout.tv_sec) { data->daemon_timeout_p = &(data->daemon_timeout); } else { data->daemon_timeout_p = NULL; } data->init_timeout.tv_sec = fusefs_args.init_timeout; data->init_timeout.tv_nsec = 0; data->max_read = max_read; data->fssubtype = fusefs_args.fssubtype; data->mountaltflags = fusefs_args.altflags; data->noimplflags = (uint64_t)0; data->blocksize = fuse_round_size(fusefs_args.blocksize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); data->iosize = fuse_round_size(fusefs_args.iosize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); if (data->iosize < data->blocksize) { data->iosize = data->blocksize; } data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE; copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname, MNAMELEN - 1, &len); bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len); copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len); bzero(data->volname + len, MAXPATHLEN - len); /* previous location of vfs_setioattr() */ vfs_setfsprivate(mp, data); fuse_lck_mtx_unlock(fdev->mtx); /* Send a handshake message to the daemon. */ fuse_send_init(data, context); struct vfs_attr vfs_attr; VFSATTR_INIT(&vfs_attr); // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s err = fuse_vfsop_getattr(mp, &vfs_attr, context); if (!err) { vfsstatfsp->f_bsize = vfs_attr.f_bsize; vfsstatfsp->f_iosize = vfs_attr.f_iosize; vfsstatfsp->f_blocks = vfs_attr.f_blocks; vfsstatfsp->f_bfree = vfs_attr.f_bfree; vfsstatfsp->f_bavail = vfs_attr.f_bavail; vfsstatfsp->f_bused = vfs_attr.f_bused; vfsstatfsp->f_files = vfs_attr.f_files; vfsstatfsp->f_ffree = vfs_attr.f_ffree; // vfsstatfsp->f_fsid already handled above vfsstatfsp->f_owner = kauth_cred_getuid(data->daemoncred); vfsstatfsp->f_flags = vfs_flags(mp); // vfsstatfsp->f_fstypename already handled above // vfsstatfsp->f_mntonname handled elsewhere // vfsstatfsp->f_mnfromname already handled above vfsstatfsp->f_fssubtype = data->fssubtype; } if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) { vfsstatfsp->f_bsize = data->blocksize; } else { //data->blocksize = vfsstatfsp->f_bsize; } if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) { vfsstatfsp->f_iosize = data->iosize; } else { //data->iosize = (uint32_t)vfsstatfsp->f_iosize; vfsstatfsp->f_iosize = data->iosize; } out: if (err) { vfs_setfsprivate(mp, NULL); fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* again */ if (mounted) { OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); } if (data) { data->dataflags &= ~FSESS_MOUNTED; if (!(data->dataflags & FSESS_OPENED)) { #if M_FUSE4X_ENABLE_BIGLOCK assert(biglock == data->biglock); fuse_biglock_unlock(biglock); #endif fuse_device_close_final(fdev); /* data is gone now */ } } fuse_lck_mtx_unlock(fdev->mtx); } else { vnode_t fuse_rootvp = NULLVP; err = fuse_vfsop_root(mp, &fuse_rootvp, context); if (err) { goto out; /* go back and follow error path */ } err = vnode_ref(fuse_rootvp); (void)vnode_put(fuse_rootvp); if (err) { goto out; /* go back and follow error path */ } else { struct vfsioattr ioattr; vfs_ioattr(mp, &ioattr); ioattr.io_devblocksize = data->blocksize; vfs_setioattr(mp, &ioattr); } } #if M_FUSE4X_ENABLE_BIGLOCK fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* ...and again */ if(data) { assert(data->biglock == biglock); fuse_biglock_unlock(biglock); } fuse_lck_mtx_unlock(fdev->mtx); #endif return err; }
errno_t FSNodeGetOrCreateFileVNodeByID(vnode_t *vnPtr, uint32_t flags, struct fuse_abi_data *feo, mount_t mp, vnode_t dvp, vfs_context_t context, uint32_t *oflags) { int err; vnode_t vn = NULLVP; HNodeRef hn = NULL; struct fuse_vnode_data *fvdat = NULL; struct fuse_data *mntdata = NULL; fuse_device_t dummy_device; struct fuse_abi_data fa; enum vtype vtyp; fuse_abi_data_init(&fa, feo->fad_version, fuse_entry_out_get_attr(feo)); vtyp = IFTOVT(fuse_attr_get_mode(&fa)); if ((vtyp >= VBAD) || (vtyp == VNON)) { return EINVAL; } int markroot = (flags & FN_IS_ROOT) ? 1 : 0; uint64_t size = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_size(&fa); uint32_t rdev = (flags & FN_IS_ROOT) ? 0 : fuse_attr_get_rdev(&fa); uint64_t generation = fuse_entry_out_get_generation(feo); mntdata = fuse_get_mpdata(mp); dummy_device = mntdata->fdev; err = HNodeLookupCreatingIfNecessary(dummy_device, fuse_entry_out_get_nodeid(feo), 0 /* fork index */, &hn, &vn); if ((err == 0) && (vn == NULL)) { struct vnode_fsparam params; fvdat = (struct fuse_vnode_data *)FSNodeGenericFromHNode(hn); if (!fvdat->fInitialised) { fvdat->fInitialised = true; /* self */ fvdat->vp = NULLVP; /* hold on */ fvdat->nodeid = fuse_entry_out_get_nodeid(feo); fvdat->generation = generation; /* parent */ fvdat->parentvp = dvp; if (dvp) { fvdat->parent_nodeid = VTOI(dvp); } else { fvdat->parent_nodeid = 0; } /* I/O */ { int k; for (k = 0; k < FUFH_MAXTYPE; k++) { FUFH_USE_RESET(&(fvdat->fufh[k])); } } /* flags */ fvdat->flag = flags; fvdat->c_flag = 0; /* meta */ /* XXX: truncation */ fvdat->entry_valid.tv_sec = (time_t)fuse_entry_out_get_entry_valid(feo); fvdat->entry_valid.tv_nsec = fuse_entry_out_get_entry_valid_nsec(feo); /* XXX: truncation */ fvdat->attr_valid.tv_sec = 0; fvdat->attr_valid.tv_nsec = 0; /* XXX: truncation */ fvdat->modify_time.tv_sec = (time_t)fuse_attr_get_mtime(&fa); fvdat->modify_time.tv_nsec = fuse_attr_get_mtimensec(&fa); fvdat->filesize = size; fvdat->nlookup = 0; fvdat->vtype = vtyp; /* locking */ fvdat->createlock = lck_mtx_alloc_init(fuse_lock_group, fuse_lock_attr); fvdat->creator = current_thread(); #if M_OSXFUSE_ENABLE_TSLOCKING fvdat->nodelock = lck_rw_alloc_init(fuse_lock_group, fuse_lock_attr); fvdat->nodelockowner = NULL; fvdat->truncatelock = lck_rw_alloc_init(fuse_lock_group, fuse_lock_attr); #endif } if (err == 0) { params.vnfs_mp = mp; params.vnfs_vtype = vtyp; params.vnfs_str = NULL; params.vnfs_dvp = dvp; /* NULLVP for the root vnode */ params.vnfs_fsnode = hn; #if M_OSXFUSE_ENABLE_SPECFS if ((vtyp == VBLK) || (vtyp == VCHR)) { params.vnfs_vops = fuse_spec_operations; params.vnfs_rdev = (dev_t)rdev; #else if (0) { #endif #if M_OSXFUSE_ENABLE_FIFOFS } else if (vtyp == VFIFO) { params.vnfs_vops = fuse_fifo_operations; params.vnfs_rdev = 0; (void)rdev; #else } else if (0) { #endif } else { params.vnfs_vops = fuse_vnode_operations; params.vnfs_rdev = 0; (void)rdev; } params.vnfs_marksystem = 0; params.vnfs_cnp = NULL; params.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE; params.vnfs_filesize = size; params.vnfs_markroot = markroot; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif err = vnode_create(VNCREATE_FLAVOR, (uint32_t)sizeof(params), ¶ms, &vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif } if (err == 0) { if (markroot) { fvdat->parentvp = vn; } else { fvdat->parentvp = dvp; } if (oflags) { *oflags |= MAKEENTRY; } /* Need VT_OSXFUSE from xnu */ vnode_settag(vn, VT_OTHER); cache_attrs(vn, fuse_entry_out, feo); HNodeAttachVNodeSucceeded(hn, 0 /* forkIndex */, vn); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_vnodes_current); } else { if (HNodeAttachVNodeFailed(hn, 0 /* forkIndex */)) { FSNodeScrub(fvdat); HNodeScrubDone(hn); } } } if (err == 0) { if (vnode_vtype(vn) != vtyp) { IOLog("osxfuse: vnode changed type behind us (old=%d, new=%d)\n", vnode_vtype(vn), vtyp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT); vnode_put(vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif err = EIO; } else if (VTOFUD(vn)->generation != generation) { IOLog("osxfuse: vnode changed generation\n"); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(mntdata->biglock); #endif fuse_internal_vnode_disappear(vn, context, REVOKE_SOFT); vnode_put(vn); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(mntdata->biglock); #endif err = ESTALE; } } if (err == 0) { *vnPtr = vn; } /* assert((err == 0) == (*vnPtr != NULL); */ return err; } int fuse_vget_i(vnode_t *vpp, uint32_t flags, struct fuse_abi_data *feo, struct componentname *cnp, vnode_t dvp, mount_t mp, vfs_context_t context) { int err = 0; if (!feo) { return EINVAL; } err = FSNodeGetOrCreateFileVNodeByID(vpp, flags, feo, mp, dvp, context, NULL); if (err) { return err; } if (!fuse_isnovncache_mp(mp) && (cnp->cn_flags & MAKEENTRY)) { fuse_vncache_enter(dvp, *vpp, cnp); } /* found: */ VTOFUD(*vpp)->nlookup++; return 0; }
/* * Because of the vagaries of how a filehandle can be used, we try not to * be too smart in here (we try to be smart elsewhere). It is required that * you come in here only if you really do not have the said filehandle--else * we panic. */ int fuse_filehandle_get(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type, int mode) { struct fuse_dispatcher fdi; struct fuse_abi_data foi; struct fuse_abi_data foo; struct fuse_filehandle *fufh; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp)); int err = 0; int oflags = 0; int op = FUSE_OPEN; fuse_trace_printf("fuse_filehandle_get(vp=%p, fufh_type=%d, mode=%x)\n", vp, fufh_type, mode); fufh = &(fvdat->fufh[fufh_type]); if (FUFH_IS_VALID(fufh)) { panic("osxfuse: filehandle_get called despite valid fufh (type=%d)", fufh_type); /* NOTREACHED */ } /* * Note that this means we are effectively FILTERING OUT open() flags. */ (void)mode; oflags = fuse_filehandle_xlate_to_oflags(fufh_type); if (vnode_isdir(vp)) { op = FUSE_OPENDIR; if (fufh_type != FUFH_RDONLY) { IOLog("osxfuse: non-rdonly fufh requested for directory\n"); fufh_type = FUFH_RDONLY; } } if (vnode_islnk(vp) && (mode & O_SYMLINK)) { oflags |= O_SYMLINK; } if ((mode & O_TRUNC) && (data->dataflags & FSESS_ATOMIC_O_TRUNC)) { oflags |= O_TRUNC; } fdisp_init_abi(&fdi, fuse_open_in, data); fdisp_make_vp(&fdi, op, vp, context); fuse_abi_data_init(&foi, DATOI(data), fdi.indata); fuse_open_in_set_flags(&foi, oflags); FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_upcall_count); err = fdisp_wait_answ(&fdi); if (err) { #if M_OSXFUSE_ENABLE_UNSUPPORTED const char *vname = vnode_getname(vp); #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { /* * See comment in fuse_vnop_reclaim(). */ cache_purge(vp); } #if M_OSXFUSE_ENABLE_UNSUPPORTED IOLog("osxfuse: filehandle_get: failed for %s " "(type=%d, err=%d, caller=%p)\n", (vname) ? vname : "?", fufh_type, err, __builtin_return_address(0)); if (vname) { vnode_putname(vname); } #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ if (err == ENOENT) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif } return err; } FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_current); fuse_abi_data_init(&foo, DATOI(data), fdi.answ); fufh->fh_id = fuse_open_out_get_fh(&foo); fufh->open_count = 1; fufh->open_flags = oflags; fufh->fuse_open_flags = fuse_open_out_get_open_flags(&foo); fufh->aux_count = 0; fuse_ticket_release(fdi.tick); return 0; }
__private_extern__ int fuse_internal_remove(vnode_t dvp, vnode_t vp, struct componentname *cnp, enum fuse_opcode op, vfs_context_t context) { struct fuse_dispatcher fdi; struct vnode_attr *vap = VTOVA(vp); int need_invalidate = 0; uint64_t target_nlink = 0; mount_t mp = vnode_mount(vp); int err = 0; fdisp_init(&fdi, cnp->cn_namelen + 1); fdisp_make_vp(&fdi, op, dvp, context); memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen); ((char *)fdi.indata)[cnp->cn_namelen] = '\0'; if ((vap->va_nlink > 1) && vnode_isreg(vp)) { need_invalidate = 1; target_nlink = vap->va_nlink; } if (!(err = fdisp_wait_answ(&fdi))) { fuse_ticket_drop(fdi.tick); } fuse_invalidate_attr(dvp); fuse_invalidate_attr(vp); /* * XXX: M_MACFUSE_INVALIDATE_CACHED_VATTRS_UPON_UNLINK * * Consider the case where vap->va_nlink > 1 for the entity being * removed. In our world, other in-memory vnodes that share a link * count each with this one may not know right way that this one just * got deleted. We should let them know, say, through a vnode_iterate() * here and a callback that does fuse_invalidate_attr(vp) on each * relevant vnode. */ if (need_invalidate && !err) { if (!vfs_busy(mp, LK_NOWAIT)) { #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK struct fuse_data *data = fuse_get_mpdata(mp); fuse_biglock_unlock(data->biglock); #endif vnode_iterate(mp, 0, fuse_internal_remove_callback, (void *)&target_nlink); #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK fuse_biglock_lock(data->biglock); #endif vfs_unbusy(mp); } else { IOLog("MacFUSE: skipping link count fixup upon remove\n"); } } return err; }
__private_extern__ int fuse_internal_access(vnode_t vp, int action, vfs_context_t context, struct fuse_access_param *facp) { int err = 0; int default_error = 0; uint32_t mask = 0; int dataflags; mount_t mp; struct fuse_dispatcher fdi; struct fuse_access_in *fai; struct fuse_data *data; fuse_trace_printf_func(); mp = vnode_mount(vp); data = fuse_get_mpdata(mp); dataflags = data->dataflags; /* Allow for now; let checks be handled inline later. */ if (fuse_isdeferpermissions_mp(mp)) { return 0; } if (facp->facc_flags & FACCESS_FROM_VNOP) { default_error = ENOTSUP; } /* * (action & KAUTH_VNODE_GENERIC_WRITE_BITS) on a read-only file system * would have been handled by higher layers. */ if (!fuse_implemented(data, FSESS_NOIMPLBIT(ACCESS))) { return default_error; } /* Unless explicitly permitted, deny everyone except the fs owner. */ if (!vnode_isvroot(vp) && !(facp->facc_flags & FACCESS_NOCHECKSPY)) { if (!(dataflags & FSESS_ALLOW_OTHER)) { int denied = fuse_match_cred(data->daemoncred, vfs_context_ucred(context)); if (denied) { return EPERM; } } facp->facc_flags |= FACCESS_NOCHECKSPY; } if (!(facp->facc_flags & FACCESS_DO_ACCESS)) { return default_error; } if (vnode_isdir(vp)) { if (action & (KAUTH_VNODE_LIST_DIRECTORY | KAUTH_VNODE_READ_EXTATTRIBUTES)) { mask |= R_OK; } if (action & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_DELETE_CHILD)) { mask |= W_OK; } if (action & KAUTH_VNODE_SEARCH) { mask |= X_OK; } } else { if (action & (KAUTH_VNODE_READ_DATA | KAUTH_VNODE_READ_EXTATTRIBUTES)) { mask |= R_OK; } if (action & (KAUTH_VNODE_WRITE_DATA | KAUTH_VNODE_APPEND_DATA)) { mask |= W_OK; } if (action & KAUTH_VNODE_EXECUTE) { mask |= X_OK; } } if (action & (KAUTH_VNODE_WRITE_ATTRIBUTES | KAUTH_VNODE_WRITE_EXTATTRIBUTES | KAUTH_VNODE_WRITE_SECURITY)) { mask |= W_OK; } bzero(&fdi, sizeof(fdi)); fdisp_init(&fdi, sizeof(*fai)); fdisp_make_vp(&fdi, FUSE_ACCESS, vp, context); fai = fdi.indata; fai->mask = F_OK; fai->mask |= mask; if (!(err = fdisp_wait_answ(&fdi))) { fuse_ticket_drop(fdi.tick); } if (err == ENOSYS) { /* * Make sure we don't come in here again. */ vfs_clearauthopaque(mp); fuse_clear_implemented(data, FSESS_NOIMPLBIT(ACCESS)); err = default_error; } if (err == ENOENT) { const char *vname = NULL; #if M_MACFUSE_ENABLE_UNSUPPORTED vname = vnode_getname(vp); #endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ IOLog("MacFUSE: disappearing vnode %p (name=%s type=%d action=%x)\n", vp, (vname) ? vname : "?", vnode_vtype(vp), action); #if M_MACFUSE_ENABLE_UNSUPPORTED if (vname) { vnode_putname(vname); } #endif /* M_MACFUSE_ENABLE_UNSUPPORTED */ /* * On 10.4, I think I can get Finder to lock because of /.Trashes/<uid> * unless I use REVOKE_NONE here. */ #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK fuse_biglock_unlock(data->biglock); #endif fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT); #if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK fuse_biglock_lock(data->biglock); #endif } return err; }
static errno_t fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata, vfs_context_t context) { int err = 0; int mntopts = 0; bool mounted = false; uint32_t drandom = 0; uint32_t max_read = ~0; size_t len; fuse_device_t fdev = NULL; struct fuse_data *data = NULL; fuse_mount_args fusefs_args; struct vfsstatfs *vfsstatfsp = vfs_statfs(mp); kern_return_t kr; thread_t init_thread; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_t *biglock; #endif fuse_trace_printf_vfsop(); if (vfs_isupdate(mp)) { return ENOTSUP; } err = copyin(udata, &fusefs_args, sizeof(fusefs_args)); if (err) { return EINVAL; } /* * Interesting flags that we can receive from mount or may want to * otherwise forcibly set include: * * MNT_ASYNC * MNT_AUTOMOUNTED * MNT_DEFWRITE * MNT_DONTBROWSE * MNT_IGNORE_OWNERSHIP * MNT_JOURNALED * MNT_NODEV * MNT_NOEXEC * MNT_NOSUID * MNT_NOUSERXATTR * MNT_RDONLY * MNT_SYNCHRONOUS * MNT_UNION */ #if M_OSXFUSE_ENABLE_UNSUPPORTED vfs_setlocklocal(mp); #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ /** Option Processing. **/ if (fusefs_args.altflags & FUSE_MOPT_FSTYPENAME) { size_t typenamelen = strlen(fusefs_args.fstypename); if ((typenamelen <= 0) || (typenamelen > FUSE_FSTYPENAME_MAXLEN)) { return EINVAL; } snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s", OSXFUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename); } if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) || (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_SPARSE) { mntopts |= FSESS_SPARSE; } if (fusefs_args.altflags & FUSE_MOPT_SLOW_STATFS) { mntopts |= FSESS_SLOW_STATFS; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) { mntopts |= FSESS_AUTO_CACHE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) { if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { return EINVAL; } mntopts |= FSESS_AUTO_XATTR; } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { mntopts |= FSESS_NATIVE_XATTR; } if (fusefs_args.altflags & FUSE_MOPT_NO_BROWSE) { vfs_setflags(mp, MNT_DONTBROWSE); } if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) { mntopts |= FSESS_JAIL_SYMLINKS; } /* * Note that unlike Linux, which keeps allow_root in user-space and * passes allow_other in that case to the kernel, we let allow_root * reach the kernel. The 'if' ordering is important here. */ if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) == 0) && is_member) { mntopts |= FSESS_ALLOW_ROOT; } else { IOLog("OSXFUSE: caller not a member of OSXFUSE admin group (%d)\n", fuse_admin_group); return EPERM; } } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) { if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { return EPERM; } } mntopts |= FSESS_ALLOW_OTHER; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) { mntopts |= FSESS_NO_APPLEDOUBLE; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) { mntopts |= FSESS_NO_APPLEXATTR; } if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) { fsid_t fsid; mount_t other_mp; uint32_t target_dev; target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR, fusefs_args.fsid); fsid.val[0] = target_dev; fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; other_mp = vfs_getvfs(&fsid); if (other_mp != NULL) { return EPERM; } vfsstatfsp->f_fsid.val[0] = target_dev; vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; } else { vfs_getnewfsid(mp); } if (fusefs_args.altflags & FUSE_MOPT_NO_LOCALCACHES) { mntopts |= FSESS_NO_ATTRCACHE; mntopts |= FSESS_NO_READAHEAD; mntopts |= FSESS_NO_UBC; mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) { mntopts |= FSESS_NO_ATTRCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) { mntopts |= FSESS_NO_READAHEAD; } if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) { mntopts |= FSESS_NO_UBC; } if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) { mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) { if (mntopts & FSESS_NO_VNCACHE) { return EINVAL; } mntopts |= FSESS_NEGATIVE_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) { /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */ if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) { return EINVAL; } mntopts |= FSESS_NO_SYNCWRITES; vfs_clearflags(mp, MNT_SYNCHRONOUS); vfs_setflags(mp, MNT_ASYNC); /* We check for this only if we have nosyncwrites in the first place. */ if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) { mntopts |= FSESS_NO_SYNCONCLOSE; } } else { vfs_clearflags(mp, MNT_ASYNC); vfs_setflags(mp, MNT_SYNCHRONOUS); } if (mntopts & FSESS_NO_UBC) { /* If no buffer cache, disallow exec from file system. */ vfs_setflags(mp, MNT_NOEXEC); } vfs_setauthopaque(mp); vfs_setauthopaqueaccess(mp); if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) && (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) { mntopts |= FSESS_DEFAULT_PERMISSIONS; vfs_clearauthopaque(mp); } if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) { mntopts |= FSESS_DEFER_PERMISSIONS; } if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) { mntopts |= FSESS_EXTENDED_SECURITY; vfs_setextendedsecurity(mp); } if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) { mntopts |= FSESS_LOCALVOL; vfs_setflags(mp, MNT_LOCAL); } /* done checking incoming option bits */ err = 0; vfs_setfsprivate(mp, NULL); fdev = fuse_device_get(fusefs_args.rdev); if (!fdev) { return EINVAL; } fuse_device_lock(fdev); drandom = fuse_device_get_random(fdev); if (fusefs_args.random != drandom) { fuse_device_unlock(fdev); IOLog("OSXFUSE: failing mount because of mismatched random\n"); return EINVAL; } data = fuse_device_get_mpdata(fdev); if (!data) { fuse_device_unlock(fdev); return ENXIO; } #if M_OSXFUSE_ENABLE_BIG_LOCK biglock = data->biglock; fuse_biglock_lock(biglock); #endif if (data->mount_state != FM_NOTMOUNTED) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(biglock); #endif fuse_device_unlock(fdev); return EALREADY; } if (!(data->dataflags & FSESS_OPENED)) { fuse_device_unlock(fdev); err = ENXIO; goto out; } data->mount_state = FM_MOUNTED; OSAddAtomic(1, (SInt32 *)&fuse_mount_count); mounted = true; if (fdata_dead_get(data)) { fuse_device_unlock(fdev); err = ENOTCONN; goto out; } if (!data->daemoncred) { panic("OSXFUSE: daemon found but identity unknown"); } if (fuse_vfs_context_issuser(context) && kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) { fuse_device_unlock(fdev); err = EPERM; goto out; } data->mp = mp; data->fdev = fdev; data->dataflags |= mntopts; data->daemon_timeout.tv_sec = fusefs_args.daemon_timeout; data->daemon_timeout.tv_nsec = 0; if (data->daemon_timeout.tv_sec) { data->daemon_timeout_p = &(data->daemon_timeout); } else { data->daemon_timeout_p = (struct timespec *)0; } data->max_read = max_read; data->fssubtype = fusefs_args.fssubtype; data->mountaltflags = fusefs_args.altflags; data->noimplflags = (uint64_t)0; data->blocksize = fuse_round_size(fusefs_args.blocksize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); data->iosize = fuse_round_size(fusefs_args.iosize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); if (data->iosize < data->blocksize) { data->iosize = data->blocksize; } data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE; copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname, MNAMELEN - 1, &len); bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len); copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len); bzero(data->volname + len, MAXPATHLEN - len); /* previous location of vfs_setioattr() */ vfs_setfsprivate(mp, data); fuse_device_unlock(fdev); /* Send a handshake message to the daemon. */ kr = kernel_thread_start(fuse_internal_init, data, &init_thread); if (kr != KERN_SUCCESS) { IOLog("OSXFUSE: could not start init thread\n"); err = ENOTCONN; } else { thread_deallocate(init_thread); } out: if (err) { vfs_setfsprivate(mp, NULL); fuse_device_lock(fdev); data = fuse_device_get_mpdata(fdev); /* again */ if (mounted) { OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); } if (data) { data->mount_state = FM_NOTMOUNTED; if (!(data->dataflags & FSESS_OPENED)) { #if M_OSXFUSE_ENABLE_BIG_LOCK assert(biglock == data->biglock); fuse_biglock_unlock(biglock); #endif fuse_device_close_final(fdev); /* data is gone now */ } } fuse_device_unlock(fdev); } else { vnode_t fuse_rootvp = NULLVP; err = fuse_vfsop_root(mp, &fuse_rootvp, context); if (err) { goto out; /* go back and follow error path */ } err = vnode_ref(fuse_rootvp); #if M_OSXFUSE_ENABLE_BIG_LOCK /* * Even though fuse_rootvp will not be reclaimed when calling vnode_put * because we incremented its usecount by calling vnode_ref release * biglock just to be safe. */ fuse_biglock_unlock(biglock); #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ (void)vnode_put(fuse_rootvp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(biglock); #endif if (err) { goto out; /* go back and follow error path */ } else { struct vfsioattr ioattr; vfs_ioattr(mp, &ioattr); ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize; ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE; ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize = data->iosize; ioattr.io_devblocksize = data->blocksize; vfs_setioattr(mp, &ioattr); } } #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_device_lock(fdev); data = fuse_device_get_mpdata(fdev); /* ...and again */ if(data) { assert(data->biglock == biglock); fuse_biglock_unlock(biglock); } fuse_device_unlock(fdev); #endif return err; }
static errno_t fuse_vfsop_setattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t context) { int error = 0; fuse_trace_printf_vfsop(); kauth_cred_t cred = vfs_context_ucred(context); if (!fuse_vfs_context_issuser(context) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner)) { return EACCES; } struct fuse_data *data = fuse_get_mpdata(mp); if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) { if (!fuse_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME))) { error = ENOTSUP; goto out; } if (fsap->f_vol_name[0] == 0) { error = EINVAL; goto out; } size_t namelen = strlen(fsap->f_vol_name); if (namelen >= MAXPATHLEN) { error = ENAMETOOLONG; goto out; } vnode_t root_vp; error = fuse_vfsop_root(mp, &root_vp, context); if (error) { goto out; } struct fuse_dispatcher fdi; fdisp_init(&fdi, namelen + 1); fdisp_make_vp(&fdi, FUSE_SETVOLNAME, root_vp, context); memcpy((char *)fdi.indata, fsap->f_vol_name, namelen); ((char *)fdi.indata)[namelen] = '\0'; error = fdisp_wait_answ(&fdi); if (!error) { fuse_ticket_release(fdi.tick); } #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(data->biglock); #endif (void)vnode_put(root_vp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(data->biglock); #endif if (error) { if (error == ENOSYS) { error = ENOTSUP; fuse_clear_implemented(data, FSESS_NOIMPLBIT(SETVOLNAME)); } goto out; } copystr(fsap->f_vol_name, data->volname, MAXPATHLEN - 1, &namelen); bzero(data->volname + namelen, MAXPATHLEN - namelen); VFSATTR_SET_SUPPORTED(fsap, f_vol_name); } out: return error; }