static errno_t fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata, vfs_context_t context) { int err = 0; int mntopts = 0; bool mounted = false; uint32_t max_read = ~0; size_t len; fuse_device_t fdev = NULL; struct fuse_data *data = NULL; fuse_mount_args fusefs_args; struct vfsstatfs *vfsstatfsp = vfs_statfs(mp); #if M_FUSE4X_ENABLE_BIGLOCK lck_mtx_t *biglock; #endif fuse_trace_printf_vfsop(); if (vfs_isupdate(mp)) { return ENOTSUP; } err = copyin(udata, &fusefs_args, sizeof(fusefs_args)); if (err) { return EINVAL; } /* * Interesting flags that we can receive from mount or may want to * otherwise forcibly set include: * * MNT_ASYNC * MNT_AUTOMOUNTED * MNT_DEFWRITE * MNT_DONTBROWSE * MNT_IGNORE_OWNERSHIP * MNT_JOURNALED * MNT_NODEV * MNT_NOEXEC * MNT_NOSUID * MNT_NOUSERXATTR * MNT_RDONLY * MNT_SYNCHRONOUS * MNT_UNION */ err = ENOTSUP; #if M_FUSE4X_ENABLE_UNSUPPORTED vfs_setlocklocal(mp); #endif /* M_FUSE4X_ENABLE_UNSUPPORTED */ /** Option Processing. **/ if (*fusefs_args.fstypename) { size_t typenamelen = strlen(fusefs_args.fstypename); if (typenamelen > FUSE_FSTYPENAME_MAXLEN) { return EINVAL; } snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s", FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename); } if (!*fusefs_args.fsname) return EINVAL; if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) || (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) { return EINVAL; } if ((fusefs_args.init_timeout > FUSE_MAX_INIT_TIMEOUT) || (fusefs_args.init_timeout < FUSE_MIN_INIT_TIMEOUT)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_SPARSE) { mntopts |= FSESS_SPARSE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) { mntopts |= FSESS_AUTO_CACHE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) { if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { return EINVAL; } mntopts |= FSESS_AUTO_XATTR; } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { mntopts |= FSESS_NATIVE_XATTR; } if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) { mntopts |= FSESS_JAIL_SYMLINKS; } /* * Note that unlike Linux, which keeps allow_root in user-space and * passes allow_other in that case to the kernel, we let allow_root * reach the kernel. The 'if' ordering is important here. */ if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } mntopts |= FSESS_ALLOW_ROOT; } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) { if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { log("fuse4x: caller is not a member of fuse4x admin group. " "Either add user (id=%d) to group (id=%d), " "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n", kauth_cred_getuid(kauth_cred_get()), fuse_admin_group); return EPERM; } } mntopts |= FSESS_ALLOW_OTHER; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) { mntopts |= FSESS_NO_APPLEDOUBLE; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) { mntopts |= FSESS_NO_APPLEXATTR; } if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) { fsid_t fsid; mount_t other_mp; uint32_t target_dev; target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR, fusefs_args.fsid); fsid.val[0] = target_dev; fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; other_mp = vfs_getvfs(&fsid); if (other_mp != NULL) { return EPERM; } vfsstatfsp->f_fsid.val[0] = target_dev; vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; } else { vfs_getnewfsid(mp); } if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) { mntopts |= FSESS_NO_ATTRCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) { mntopts |= FSESS_NO_READAHEAD; } if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) { mntopts |= FSESS_NO_UBC; } if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) { mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) { if (mntopts & FSESS_NO_VNCACHE) { return EINVAL; } mntopts |= FSESS_NEGATIVE_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) { /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */ if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) { log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n"); return EINVAL; } mntopts |= FSESS_NO_SYNCWRITES; vfs_clearflags(mp, MNT_SYNCHRONOUS); vfs_setflags(mp, MNT_ASYNC); /* We check for this only if we have nosyncwrites in the first place. */ if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) { mntopts |= FSESS_NO_SYNCONCLOSE; } } else { vfs_clearflags(mp, MNT_ASYNC); vfs_setflags(mp, MNT_SYNCHRONOUS); } if (mntopts & FSESS_NO_UBC) { /* If no buffer cache, disallow exec from file system. */ vfs_setflags(mp, MNT_NOEXEC); } vfs_setauthopaque(mp); vfs_setauthopaqueaccess(mp); if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) && (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) { mntopts |= FSESS_DEFAULT_PERMISSIONS; vfs_clearauthopaque(mp); } if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) { mntopts |= FSESS_DEFER_PERMISSIONS; } if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) { mntopts |= FSESS_EXTENDED_SECURITY; vfs_setextendedsecurity(mp); } if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) { vfs_setflags(mp, MNT_LOCAL); } /* done checking incoming option bits */ err = 0; vfs_setfsprivate(mp, NULL); fdev = fuse_device_get(fusefs_args.rdev); if (!fdev) { log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev); return EINVAL; } fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; if (!data) { fuse_lck_mtx_unlock(fdev->mtx); return ENXIO; } #if M_FUSE4X_ENABLE_BIGLOCK biglock = data->biglock; fuse_biglock_lock(biglock); #endif if (data->dataflags & FSESS_MOUNTED) { #if M_FUSE4X_ENABLE_BIGLOCK fuse_biglock_unlock(biglock); #endif fuse_lck_mtx_unlock(fdev->mtx); return EALREADY; } if (!(data->dataflags & FSESS_OPENED)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENXIO; goto out; } data->dataflags |= FSESS_MOUNTED; OSAddAtomic(1, (SInt32 *)&fuse_mount_count); mounted = true; if (fdata_dead_get(data)) { fuse_lck_mtx_unlock(fdev->mtx); err = ENOTCONN; goto out; } if (!data->daemoncred) { panic("fuse4x: daemon found but identity unknown"); } if (fuse_vfs_context_issuser(context) && kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) { fuse_lck_mtx_unlock(fdev->mtx); err = EPERM; log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n", kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context))); goto out; } data->mp = mp; data->fdev = fdev; data->dataflags |= mntopts; data->daemon_timeout.tv_sec = fusefs_args.daemon_timeout; data->daemon_timeout.tv_nsec = 0; if (data->daemon_timeout.tv_sec) { data->daemon_timeout_p = &(data->daemon_timeout); } else { data->daemon_timeout_p = NULL; } data->init_timeout.tv_sec = fusefs_args.init_timeout; data->init_timeout.tv_nsec = 0; data->max_read = max_read; data->fssubtype = fusefs_args.fssubtype; data->mountaltflags = fusefs_args.altflags; data->noimplflags = (uint64_t)0; data->blocksize = fuse_round_size(fusefs_args.blocksize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); data->iosize = fuse_round_size(fusefs_args.iosize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); if (data->iosize < data->blocksize) { data->iosize = data->blocksize; } data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE; copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname, MNAMELEN - 1, &len); bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len); copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len); bzero(data->volname + len, MAXPATHLEN - len); /* previous location of vfs_setioattr() */ vfs_setfsprivate(mp, data); fuse_lck_mtx_unlock(fdev->mtx); /* Send a handshake message to the daemon. */ fuse_send_init(data, context); struct vfs_attr vfs_attr; VFSATTR_INIT(&vfs_attr); // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s err = fuse_vfsop_getattr(mp, &vfs_attr, context); if (!err) { vfsstatfsp->f_bsize = vfs_attr.f_bsize; vfsstatfsp->f_iosize = vfs_attr.f_iosize; vfsstatfsp->f_blocks = vfs_attr.f_blocks; vfsstatfsp->f_bfree = vfs_attr.f_bfree; vfsstatfsp->f_bavail = vfs_attr.f_bavail; vfsstatfsp->f_bused = vfs_attr.f_bused; vfsstatfsp->f_files = vfs_attr.f_files; vfsstatfsp->f_ffree = vfs_attr.f_ffree; // vfsstatfsp->f_fsid already handled above vfsstatfsp->f_owner = kauth_cred_getuid(data->daemoncred); vfsstatfsp->f_flags = vfs_flags(mp); // vfsstatfsp->f_fstypename already handled above // vfsstatfsp->f_mntonname handled elsewhere // vfsstatfsp->f_mnfromname already handled above vfsstatfsp->f_fssubtype = data->fssubtype; } if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) { vfsstatfsp->f_bsize = data->blocksize; } else { //data->blocksize = vfsstatfsp->f_bsize; } if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) { vfsstatfsp->f_iosize = data->iosize; } else { //data->iosize = (uint32_t)vfsstatfsp->f_iosize; vfsstatfsp->f_iosize = data->iosize; } out: if (err) { vfs_setfsprivate(mp, NULL); fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* again */ if (mounted) { OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); } if (data) { data->dataflags &= ~FSESS_MOUNTED; if (!(data->dataflags & FSESS_OPENED)) { #if M_FUSE4X_ENABLE_BIGLOCK assert(biglock == data->biglock); fuse_biglock_unlock(biglock); #endif fuse_device_close_final(fdev); /* data is gone now */ } } fuse_lck_mtx_unlock(fdev->mtx); } else { vnode_t fuse_rootvp = NULLVP; err = fuse_vfsop_root(mp, &fuse_rootvp, context); if (err) { goto out; /* go back and follow error path */ } err = vnode_ref(fuse_rootvp); (void)vnode_put(fuse_rootvp); if (err) { goto out; /* go back and follow error path */ } else { struct vfsioattr ioattr; vfs_ioattr(mp, &ioattr); ioattr.io_devblocksize = data->blocksize; vfs_setioattr(mp, &ioattr); } } #if M_FUSE4X_ENABLE_BIGLOCK fuse_lck_mtx_lock(fdev->mtx); data = fdev->data; /* ...and again */ if(data) { assert(data->biglock == biglock); fuse_biglock_unlock(biglock); } fuse_lck_mtx_unlock(fdev->mtx); #endif return err; }
static int zfs_vfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context) { char *osname = NULL; size_t osnamelen = 0; int error = 0; int canwrite; /* * Get the objset name (the "special" mount argument). * The filesystem that we mount as root is defined in the * "zfs-bootfs" property. */ if (data) { user_addr_t fspec = USER_ADDR_NULL; #ifndef __APPLE__ if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS, "zfs-bootfs", &zfs_bootpath) != DDI_SUCCESS) return (EIO); error = parse_bootpath(zfs_bootpath, rootfs.bo_name); ddi_prop_free(zfs_bootpath); #endif osname = kmem_alloc(MAXPATHLEN, KM_SLEEP); if (vfs_context_is64bit(context)) { if ( (error = copyin(data, (caddr_t)&fspec, sizeof(fspec))) ) goto out; } else { #ifdef ZFS_LEOPARD_ONLY char *tmp; #else user32_addr_t tmp; #endif if ( (error = copyin(data, (caddr_t)&tmp, sizeof(tmp))) ) goto out; /* munge into LP64 addr */ fspec = CAST_USER_ADDR_T(tmp); } if ( (error = copyinstr(fspec, osname, MAXPATHLEN, &osnamelen)) ) goto out; } #if 0 if (mvp->v_type != VDIR) return (ENOTDIR); mutex_enter(&mvp->v_lock); if ((uap->flags & MS_REMOUNT) == 0 && (uap->flags & MS_OVERLAY) == 0 && (mvp->v_count != 1 || (mvp->v_flag & VROOT))) { mutex_exit(&mvp->v_lock); return (EBUSY); } mutex_exit(&mvp->v_lock); /* * ZFS does not support passing unparsed data in via MS_DATA. * Users should use the MS_OPTIONSTR interface; this means * that all option parsing is already done and the options struct * can be interrogated. */ if ((uap->flags & MS_DATA) && uap->datalen > 0) return (EINVAL); /* * Get the objset name (the "special" mount argument). */ if (error = pn_get(uap->spec, fromspace, &spn)) return (error); osname = spn.pn_path; #endif /* * Check for mount privilege? * * If we don't have privilege then see if * we have local permission to allow it */ #ifndef __APPLE__ error = secpolicy_fs_mount(cr, mvp, vfsp); if (error) { error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr); if (error == 0) { vattr_t vattr; /* * Make sure user is the owner of the mount point * or has sufficient privileges. */ vattr.va_mask = AT_UID; if (error = VOP_GETATTR(mvp, &vattr, 0, cr)) { goto out; } if (error = secpolicy_vnode_owner(cr, vattr.va_uid)) { goto out; } if (error = VOP_ACCESS(mvp, VWRITE, 0, cr)) { goto out; } secpolicy_fs_mount_clearopts(cr, vfsp); } else { goto out; } } #endif error = zfs_domount(mp, 0, osname, context); if (error) printf("zfs_vfs_mount: error %d\n", error); if (error == 0) { zfsvfs_t *zfsvfs = NULL; /* Make the Finder treat sub file systems just like a folder */ if (strpbrk(osname, "/")) vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DONTBROWSE)); /* Indicate to VFS that we support ACLs. */ vfs_setextendedsecurity(mp); /* Advisory locking should be handled at the VFS layer */ vfs_setlocklocal(mp); /* * Mac OS X needs a file system modify time * * We use the mtime of the "com.apple.system.mtime" * extended attribute, which is associated with the * file system root directory. * * Here we need to take a ref on z_mtime_vp to keep it around. * If the attribute isn't there, attempt to create it. */ zfsvfs = vfs_fsprivate(mp); if (zfsvfs->z_mtime_vp == NULL) { struct vnode * rvp; struct vnode *xdvp = NULLVP; struct vnode *xvp = NULLVP; znode_t *rootzp; timestruc_t modify_time; cred_t *cr; timestruc_t now; int flag; int result; if (zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp) != 0) { goto out; } rvp = ZTOV(rootzp); cr = (cred_t *)vfs_context_ucred(context); /* Grab the hidden attribute directory vnode. */ result = zfs_get_xattrdir(rootzp, &xdvp, cr, CREATE_XATTR_DIR); vnode_put(rvp); /* all done with root vnode */ rvp = NULL; if (result) { goto out; } /* * HACK - workaround missing vnode_setnoflush() KPI... * * We tag zfsvfs so that zfs_attach_vnode() can then set * vnfs_marksystem when the vnode gets created. */ zfsvfs->z_last_unmount_time = 0xBADC0DE; zfsvfs->z_last_mtime_synced = VTOZ(xdvp)->z_id; flag = vfs_isrdonly(mp) ? 0 : ZEXISTS; /* Lookup or create the named attribute. */ if ( zfs_obtain_xattr(VTOZ(xdvp), ZFS_MTIME_XATTR, S_IRUSR | S_IWUSR, cr, &xvp, flag) ) { zfsvfs->z_last_unmount_time = 0; zfsvfs->z_last_mtime_synced = 0; vnode_put(xdvp); goto out; } gethrestime(&now); ZFS_TIME_ENCODE(&now, VTOZ(xvp)->z_phys->zp_mtime); vnode_put(xdvp); vnode_ref(xvp); zfsvfs->z_mtime_vp = xvp; ZFS_TIME_DECODE(&modify_time, VTOZ(xvp)->z_phys->zp_mtime); zfsvfs->z_last_unmount_time = modify_time.tv_sec; zfsvfs->z_last_mtime_synced = modify_time.tv_sec; /* * Keep this referenced vnode from impeding an unmount. * * XXX vnode_setnoflush() is MIA from KPI (see workaround above). */ #if 0 vnode_setnoflush(xvp); #endif vnode_put(xvp); } } out: if (osname) { kmem_free(osname, MAXPATHLEN); } return (error); }
/*proto*/ int devfs_mount(struct mount *mp, __unused vnode_t devvp, __unused user_addr_t data, vfs_context_t ctx) { struct devfsmount *devfs_mp_p; /* devfs specific mount info */ int error; /*- * If they just want to update, we don't need to do anything. */ if (mp->mnt_flag & MNT_UPDATE) { return 0; } /* Advisory locking should be handled at the VFS layer */ vfs_setlocklocal(mp); /*- * Well, it's not an update, it's a real mount request. * Time to get dirty. * HERE we should check to see if we are already mounted here. */ MALLOC(devfs_mp_p, struct devfsmount *, sizeof(struct devfsmount), M_DEVFSMNT, M_WAITOK); if (devfs_mp_p == NULL) return (ENOMEM); bzero(devfs_mp_p,sizeof(*devfs_mp_p)); devfs_mp_p->mount = mp; /*- * Fill out some fields */ mp->mnt_data = (qaddr_t)devfs_mp_p; mp->mnt_vfsstat.f_fsid.val[0] = (int32_t)(uintptr_t)devfs_mp_p; mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp); mp->mnt_flag |= MNT_LOCAL; DEVFS_LOCK(); error = dev_dup_plane(devfs_mp_p); DEVFS_UNLOCK(); if (error) { mp->mnt_data = (qaddr_t)0; FREE((caddr_t)devfs_mp_p, M_DEVFSMNT); return (error); } else DEVFS_INCR_MOUNTS(); /*- * Copy in the name of the directory the filesystem * is to be mounted on. * And we clear the remainder of the character strings * to be tidy. */ bzero(mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN); bcopy("devfs",mp->mnt_vfsstat.f_mntfromname, 5); (void)devfs_statfs(mp, &mp->mnt_vfsstat, ctx); return 0; }
/* * Mount null layer */ static int nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, vfs_context_t ctx) { int error = 0; struct vnode *lowerrootvp = NULL, *vp = NULL; struct vfsstatfs * sp = NULL; struct null_mount * xmp = NULL; char data[MAXPATHLEN]; size_t count; struct vfs_attr vfa; /* set defaults (arbitrary since this file system is readonly) */ uint32_t bsize = BLKDEV_IOSIZE; size_t iosize = BLKDEV_IOSIZE; uint64_t blocks = 4711 * 4711; uint64_t bfree = 0; uint64_t bavail = 0; uint64_t bused = 4711; uint64_t files = 4711; uint64_t ffree = 0; kauth_cred_t cred = vfs_context_ucred(ctx); NULLFSDEBUG("nullfs_mount(mp = %p) %llx\n", (void *)mp, vfs_flags(mp)); if (vfs_flags(mp) & MNT_ROOTFS) return (EOPNOTSUPP); /* * Update is a no-op */ if (vfs_isupdate(mp)) { return ENOTSUP; } /* check entitlement */ if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT)) { return EPERM; } /* * Get argument */ error = copyinstr(user_data, data, MAXPATHLEN - 1, &count); if (error) { NULLFSDEBUG("nullfs: error copying data form user %d\n", error); goto error; } /* This could happen if the system is configured for 32 bit inodes instead of * 64 bit */ if (count > MAX_MNT_FROM_LENGTH) { error = EINVAL; NULLFSDEBUG("nullfs: path to translocate too large for this system %d vs %d\n", count, MAX_MNT_FROM_LENGTH); goto error; } error = vnode_lookup(data, 0, &lowerrootvp, ctx); if (error) { NULLFSDEBUG("lookup %s -> %d\n", data, error); goto error; } /* lowervrootvp has an iocount after vnode_lookup, drop that for a usecount. Keep this to signal what we want to keep around the thing we are mirroring. Drop it in unmount.*/ error = vnode_ref(lowerrootvp); vnode_put(lowerrootvp); if (error) { // If vnode_ref failed, then null it out so it can't be used anymore in cleanup. lowerrootvp = NULL; goto error; } NULLFSDEBUG("mount %s\n", data); MALLOC(xmp, struct null_mount *, sizeof(*xmp), M_TEMP, M_WAITOK | M_ZERO); if (xmp == NULL) { error = ENOMEM; goto error; } /* * Save reference to underlying FS */ xmp->nullm_lowerrootvp = lowerrootvp; xmp->nullm_lowerrootvid = vnode_vid(lowerrootvp); error = null_getnewvnode(mp, NULL, NULL, &vp, NULL, 1); if (error) { goto error; } /* vp has an iocount on it from vnode_create. drop that for a usecount. This * is our root vnode so we drop the ref in unmount * * Assuming for now that because we created this vnode and we aren't finished mounting we can get a ref*/ vnode_ref(vp); vnode_put(vp); error = nullfs_init_lck(&xmp->nullm_lock); if (error) { goto error; } xmp->nullm_rootvp = vp; /* read the flags the user set, but then ignore some of them, we will only allow them if they are set on the lower file system */ uint64_t flags = vfs_flags(mp) & (~(MNT_IGNORE_OWNERSHIP | MNT_LOCAL)); uint64_t lowerflags = vfs_flags(vnode_mount(lowerrootvp)) & (MNT_LOCAL | MNT_QUARANTINE | MNT_IGNORE_OWNERSHIP | MNT_NOEXEC); if (lowerflags) { flags |= lowerflags; } /* force these flags */ flags |= (MNT_DONTBROWSE | MNT_MULTILABEL | MNT_NOSUID | MNT_RDONLY); vfs_setflags(mp, flags); vfs_setfsprivate(mp, xmp); vfs_getnewfsid(mp); vfs_setlocklocal(mp); /* fill in the stat block */ sp = vfs_statfs(mp); strlcpy(sp->f_mntfromname, data, MAX_MNT_FROM_LENGTH); sp->f_flags = flags; xmp->nullm_flags = NULLM_CASEINSENSITIVE; /* default to case insensitive */ error = nullfs_vfs_getlowerattr(vnode_mount(lowerrootvp), &vfa, ctx); if (error == 0) { if (VFSATTR_IS_SUPPORTED(&vfa, f_bsize)) { bsize = vfa.f_bsize; } if (VFSATTR_IS_SUPPORTED(&vfa, f_iosize)) { iosize = vfa.f_iosize; } if (VFSATTR_IS_SUPPORTED(&vfa, f_blocks)) { blocks = vfa.f_blocks; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bfree)) { bfree = vfa.f_bfree; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bavail)) { bavail = vfa.f_bavail; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bused)) { bused = vfa.f_bused; } if (VFSATTR_IS_SUPPORTED(&vfa, f_files)) { files = vfa.f_files; } if (VFSATTR_IS_SUPPORTED(&vfa, f_ffree)) { ffree = vfa.f_ffree; } if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { if ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE)) && (vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE))) { xmp->nullm_flags &= ~NULLM_CASEINSENSITIVE; } } } else { goto error; } sp->f_bsize = bsize; sp->f_iosize = iosize; sp->f_blocks = blocks; sp->f_bfree = bfree; sp->f_bavail = bavail; sp->f_bused = bused; sp->f_files = files; sp->f_ffree = ffree; /* Associate the mac label information from the mirrored filesystem with the * mirror */ MAC_PERFORM(mount_label_associate, cred, vnode_mount(lowerrootvp), vfs_mntlabel(mp)); NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", sp->f_mntfromname, sp->f_mntonname); return (0); error: if (xmp) { FREE(xmp, M_TEMP); } if (lowerrootvp) { vnode_getwithref(lowerrootvp); vnode_rele(lowerrootvp); vnode_put(lowerrootvp); } if (vp) { /* we made the root vnode but the mount is failed, so clean it up */ vnode_getwithref(vp); vnode_rele(vp); /* give vp back */ vnode_recycle(vp); vnode_put(vp); } return error; }
static int vfs_mount_9p(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx) { #pragma unused(devvp) struct sockaddr *addr, *authaddr; struct vfsstatfs *sp; char authkey[DESKEYLEN+1]; kauth_cred_t cred; user_args_9p args; mount_9p *nmp; size_t size; fid_9p fid; qid_9p qid; char *vers; int e; TRACE(); nmp = NULL; addr = NULL; authaddr = NULL; fid = NOFID; if (vfs_isupdate(mp)) return ENOTSUP; if (vfs_context_is64bit(ctx)) { if ((e=copyin(data, &args, sizeof(args)))) goto error; } else { args_9p args32; if ((e=copyin(data, &args32, sizeof(args32)))) goto error; args.spec = CAST_USER_ADDR_T(args32.spec); args.addr = CAST_USER_ADDR_T(args32.addr); args.addrlen = args32.addrlen; args.authaddr = CAST_USER_ADDR_T(args32.authaddr); args.authaddrlen = args32.authaddrlen; args.volume = CAST_USER_ADDR_T(args32.volume); args.uname = CAST_USER_ADDR_T(args32.uname); args.aname = CAST_USER_ADDR_T(args32.aname); args.authkey = CAST_USER_ADDR_T(args32.authkey); args.flags = args32.flags; } e = ENOMEM; nmp = malloc_9p(sizeof(*nmp)); if (nmp == NULL) return e; nmp->mp = mp; TAILQ_INIT(&nmp->req); nmp->lck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL); nmp->reqlck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL); nmp->nodelck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL); nmp->node = hashinit(desiredvnodes, M_TEMP, &nmp->nodelen); if (nmp->lck==NULL || nmp->reqlck==NULL || nmp->nodelck==NULL || nmp->node==NULL) goto error; if ((e=nameget_9p(args.volume, &nmp->volume))) goto error; if ((e=nameget_9p(args.uname, &nmp->uname))) goto error; if ((e=nameget_9p(args.aname, &nmp->aname))) goto error; cred = vfs_context_ucred(ctx); if (IS_VALID_CRED(cred)) { nmp->uid = kauth_cred_getuid(cred); nmp->gid = kauth_cred_getgid(cred); } else { nmp->uid = KAUTH_UID_NONE; nmp->gid = KAUTH_GID_NONE; } vfs_getnewfsid(mp); vfs_setfsprivate(mp, nmp); nmp->flags = args.flags; if ((e=addrget_9p(args.addr, args.addrlen, &addr))) goto error; if ((e=connect_9p(nmp, addr))) goto error; vers = VERSION9P; if (ISSET(nmp->flags, FLAG_DOTU)) vers = VERSION9PDOTU; if ((e=version_9p(nmp, vers, &nmp->version))) goto error; if (ISSET(nmp->flags, FLAG_DOTU) && strcmp(VERSION9PDOTU, nmp->version)==0) SET(nmp->flags, F_DOTU); nmp->afid = NOFID; if (args.authaddr && args.authaddrlen && args.authkey) { if ((e=copyin(args.authkey, authkey, DESKEYLEN))) goto error; if ((e=addrget_9p(args.authaddr, args.authaddrlen, &authaddr))) goto error; if ((e=auth_9p(nmp, nmp->uname, nmp->aname, nmp->uid, &nmp->afid, &qid))) goto error; if (nmp->afid!=NOFID && (e=authp9any_9p(nmp, nmp->afid, authaddr, nmp->uname, authkey))) goto error; bzero(authkey, DESKEYLEN); } if ((e=attach_9p(nmp, nmp->uname, nmp->aname, nmp->afid, nmp->uid, &fid, &qid))) goto error; if ((e=nget_9p(nmp, fid, qid, NULL, &nmp->root, NULL, ctx))) goto error; nunlock_9p(NTO9P(nmp->root)); e = vnode_ref(nmp->root); vnode_put(nmp->root); if (e) goto error; vfs_setauthopaque(mp); vfs_clearauthopaqueaccess(mp); vfs_setlocklocal(mp); // init stats sp = vfs_statfs(nmp->mp); copyinstr(args.spec, sp->f_mntfromname, MNAMELEN-1, &size); bzero(sp->f_mntfromname+size, MNAMELEN-size); sp->f_bsize = PAGE_SIZE; sp->f_iosize = nmp->msize-IOHDRSZ; sp->f_blocks = sp->f_bfree = sp->f_bavail = sp->f_bused = 0; sp->f_files = 65535; sp->f_ffree = sp->f_files-2; sp->f_flags = vfs_flags(mp); free_9p(addr); free_9p(authaddr); return 0; error: bzero(authkey, DESKEYLEN); free_9p(addr); free_9p(authaddr); if (nmp->so) { clunk_9p(nmp, fid); disconnect_9p(nmp); } freemount_9p(nmp); vfs_setfsprivate(mp, NULL); return e; }
static errno_t fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata, vfs_context_t context) { int err = 0; int mntopts = 0; bool mounted = false; uint32_t drandom = 0; uint32_t max_read = ~0; size_t len; fuse_device_t fdev = NULL; struct fuse_data *data = NULL; fuse_mount_args fusefs_args; struct vfsstatfs *vfsstatfsp = vfs_statfs(mp); kern_return_t kr; thread_t init_thread; #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_t *biglock; #endif fuse_trace_printf_vfsop(); if (vfs_isupdate(mp)) { return ENOTSUP; } err = copyin(udata, &fusefs_args, sizeof(fusefs_args)); if (err) { return EINVAL; } /* * Interesting flags that we can receive from mount or may want to * otherwise forcibly set include: * * MNT_ASYNC * MNT_AUTOMOUNTED * MNT_DEFWRITE * MNT_DONTBROWSE * MNT_IGNORE_OWNERSHIP * MNT_JOURNALED * MNT_NODEV * MNT_NOEXEC * MNT_NOSUID * MNT_NOUSERXATTR * MNT_RDONLY * MNT_SYNCHRONOUS * MNT_UNION */ #if M_OSXFUSE_ENABLE_UNSUPPORTED vfs_setlocklocal(mp); #endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */ /** Option Processing. **/ if (fusefs_args.altflags & FUSE_MOPT_FSTYPENAME) { size_t typenamelen = strlen(fusefs_args.fstypename); if ((typenamelen <= 0) || (typenamelen > FUSE_FSTYPENAME_MAXLEN)) { return EINVAL; } snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s", OSXFUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename); } if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) || (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_SPARSE) { mntopts |= FSESS_SPARSE; } if (fusefs_args.altflags & FUSE_MOPT_SLOW_STATFS) { mntopts |= FSESS_SLOW_STATFS; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) { mntopts |= FSESS_AUTO_CACHE; } if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) { if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { return EINVAL; } mntopts |= FSESS_AUTO_XATTR; } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) { mntopts |= FSESS_NATIVE_XATTR; } if (fusefs_args.altflags & FUSE_MOPT_NO_BROWSE) { vfs_setflags(mp, MNT_DONTBROWSE); } if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) { mntopts |= FSESS_JAIL_SYMLINKS; } /* * Note that unlike Linux, which keeps allow_root in user-space and * passes allow_other in that case to the kernel, we let allow_root * reach the kernel. The 'if' ordering is important here. */ if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) == 0) && is_member) { mntopts |= FSESS_ALLOW_ROOT; } else { IOLog("OSXFUSE: caller not a member of OSXFUSE admin group (%d)\n", fuse_admin_group); return EPERM; } } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) { if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) { int is_member = 0; if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) { return EPERM; } } mntopts |= FSESS_ALLOW_OTHER; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) { mntopts |= FSESS_NO_APPLEDOUBLE; } if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) { mntopts |= FSESS_NO_APPLEXATTR; } if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) { fsid_t fsid; mount_t other_mp; uint32_t target_dev; target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR, fusefs_args.fsid); fsid.val[0] = target_dev; fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; other_mp = vfs_getvfs(&fsid); if (other_mp != NULL) { return EPERM; } vfsstatfsp->f_fsid.val[0] = target_dev; vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1; } else { vfs_getnewfsid(mp); } if (fusefs_args.altflags & FUSE_MOPT_NO_LOCALCACHES) { mntopts |= FSESS_NO_ATTRCACHE; mntopts |= FSESS_NO_READAHEAD; mntopts |= FSESS_NO_UBC; mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) { mntopts |= FSESS_NO_ATTRCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) { mntopts |= FSESS_NO_READAHEAD; } if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) { mntopts |= FSESS_NO_UBC; } if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) { mntopts |= FSESS_NO_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) { if (mntopts & FSESS_NO_VNCACHE) { return EINVAL; } mntopts |= FSESS_NEGATIVE_VNCACHE; } if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) { /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */ if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) { return EINVAL; } mntopts |= FSESS_NO_SYNCWRITES; vfs_clearflags(mp, MNT_SYNCHRONOUS); vfs_setflags(mp, MNT_ASYNC); /* We check for this only if we have nosyncwrites in the first place. */ if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) { mntopts |= FSESS_NO_SYNCONCLOSE; } } else { vfs_clearflags(mp, MNT_ASYNC); vfs_setflags(mp, MNT_SYNCHRONOUS); } if (mntopts & FSESS_NO_UBC) { /* If no buffer cache, disallow exec from file system. */ vfs_setflags(mp, MNT_NOEXEC); } vfs_setauthopaque(mp); vfs_setauthopaqueaccess(mp); if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) && (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) { return EINVAL; } if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) { mntopts |= FSESS_DEFAULT_PERMISSIONS; vfs_clearauthopaque(mp); } if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) { mntopts |= FSESS_DEFER_PERMISSIONS; } if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) { mntopts |= FSESS_EXTENDED_SECURITY; vfs_setextendedsecurity(mp); } if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) { mntopts |= FSESS_LOCALVOL; vfs_setflags(mp, MNT_LOCAL); } /* done checking incoming option bits */ err = 0; vfs_setfsprivate(mp, NULL); fdev = fuse_device_get(fusefs_args.rdev); if (!fdev) { return EINVAL; } fuse_device_lock(fdev); drandom = fuse_device_get_random(fdev); if (fusefs_args.random != drandom) { fuse_device_unlock(fdev); IOLog("OSXFUSE: failing mount because of mismatched random\n"); return EINVAL; } data = fuse_device_get_mpdata(fdev); if (!data) { fuse_device_unlock(fdev); return ENXIO; } #if M_OSXFUSE_ENABLE_BIG_LOCK biglock = data->biglock; fuse_biglock_lock(biglock); #endif if (data->mount_state != FM_NOTMOUNTED) { #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_unlock(biglock); #endif fuse_device_unlock(fdev); return EALREADY; } if (!(data->dataflags & FSESS_OPENED)) { fuse_device_unlock(fdev); err = ENXIO; goto out; } data->mount_state = FM_MOUNTED; OSAddAtomic(1, (SInt32 *)&fuse_mount_count); mounted = true; if (fdata_dead_get(data)) { fuse_device_unlock(fdev); err = ENOTCONN; goto out; } if (!data->daemoncred) { panic("OSXFUSE: daemon found but identity unknown"); } if (fuse_vfs_context_issuser(context) && kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) { fuse_device_unlock(fdev); err = EPERM; goto out; } data->mp = mp; data->fdev = fdev; data->dataflags |= mntopts; data->daemon_timeout.tv_sec = fusefs_args.daemon_timeout; data->daemon_timeout.tv_nsec = 0; if (data->daemon_timeout.tv_sec) { data->daemon_timeout_p = &(data->daemon_timeout); } else { data->daemon_timeout_p = (struct timespec *)0; } data->max_read = max_read; data->fssubtype = fusefs_args.fssubtype; data->mountaltflags = fusefs_args.altflags; data->noimplflags = (uint64_t)0; data->blocksize = fuse_round_size(fusefs_args.blocksize, FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE); data->iosize = fuse_round_size(fusefs_args.iosize, FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE); if (data->iosize < data->blocksize) { data->iosize = data->blocksize; } data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE; copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname, MNAMELEN - 1, &len); bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len); copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len); bzero(data->volname + len, MAXPATHLEN - len); /* previous location of vfs_setioattr() */ vfs_setfsprivate(mp, data); fuse_device_unlock(fdev); /* Send a handshake message to the daemon. */ kr = kernel_thread_start(fuse_internal_init, data, &init_thread); if (kr != KERN_SUCCESS) { IOLog("OSXFUSE: could not start init thread\n"); err = ENOTCONN; } else { thread_deallocate(init_thread); } out: if (err) { vfs_setfsprivate(mp, NULL); fuse_device_lock(fdev); data = fuse_device_get_mpdata(fdev); /* again */ if (mounted) { OSAddAtomic(-1, (SInt32 *)&fuse_mount_count); } if (data) { data->mount_state = FM_NOTMOUNTED; if (!(data->dataflags & FSESS_OPENED)) { #if M_OSXFUSE_ENABLE_BIG_LOCK assert(biglock == data->biglock); fuse_biglock_unlock(biglock); #endif fuse_device_close_final(fdev); /* data is gone now */ } } fuse_device_unlock(fdev); } else { vnode_t fuse_rootvp = NULLVP; err = fuse_vfsop_root(mp, &fuse_rootvp, context); if (err) { goto out; /* go back and follow error path */ } err = vnode_ref(fuse_rootvp); #if M_OSXFUSE_ENABLE_BIG_LOCK /* * Even though fuse_rootvp will not be reclaimed when calling vnode_put * because we incremented its usecount by calling vnode_ref release * biglock just to be safe. */ fuse_biglock_unlock(biglock); #endif /* M_OSXFUSE_ENABLE_BIG_LOCK */ (void)vnode_put(fuse_rootvp); #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_biglock_lock(biglock); #endif if (err) { goto out; /* go back and follow error path */ } else { struct vfsioattr ioattr; vfs_ioattr(mp, &ioattr); ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize; ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE; ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize = data->iosize; ioattr.io_devblocksize = data->blocksize; vfs_setioattr(mp, &ioattr); } } #if M_OSXFUSE_ENABLE_BIG_LOCK fuse_device_lock(fdev); data = fuse_device_get_mpdata(fdev); /* ...and again */ if(data) { assert(data->biglock == biglock); fuse_biglock_unlock(biglock); } fuse_device_unlock(fdev); #endif return err; }