コード例 #1
0
ファイル: VBoxVFS-VFSOps.cpp プロジェクト: miguelinux/vbox
/**
 * Unmount VBoxVFS.
 *
 * @param mp        Mount data provided by VFS layer.
 * @param fFlags    Unmounting flags.
 * @param pContext  kAuth context needed in order to authentificate mount operation.
 *
 * @return 0 on success or BSD error code otherwise.
 */
static int
vboxvfs_unmount(struct mount *mp, int fFlags, vfs_context_t pContext)
{
    NOREF(pContext);

    vboxvfs_mount_t *pMount;
    int                  rc = EBUSY;
    int                  fFlush = (fFlags & MNT_FORCE) ? FORCECLOSE : 0;

    PDEBUG("Attempting to %s unmount a shared folder", (fFlags & MNT_FORCE) ? "forcibly" : "normally");

    AssertReturn(mp, EINVAL);

    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);

    AssertReturn(pMount,             EINVAL);
    AssertReturn(pMount->pRootVnode, EINVAL);

    /* Check if we can do unmount at the moment */
    if (!vnode_isinuse(pMount->pRootVnode, 1))
    {
        /* Flush child vnodes first */
        rc = vflush(mp, pMount->pRootVnode, fFlush);
        if (rc == 0)
        {
            /* Flush root vnode */
            rc = vflush(mp, NULL, fFlush);
            if (rc == 0)
            {
                vfs_setfsprivate(mp, NULL);

                rc = VbglR0SfUnmapFolder(&g_vboxSFClient, &pMount->pMap);
                if (RT_SUCCESS(rc))
                {
                    vboxvfs_destroy_internal_data(&pMount);
                    PDEBUG("A shared folder has been successfully unmounted");
                    return 0;
                }

                PDEBUG("Unable to unmount shared folder");
                rc = EPROTO;
            }
            else
                PDEBUG("Unable to flush filesystem before unmount, some data might be lost");
        }
        else
            PDEBUG("Unable to flush child vnodes");

    }
    else
        PDEBUG("Root vnode is in use, can't unmount");

    vnode_put(pMount->pRootVnode);

    return rc;
}
コード例 #2
0
ファイル: VBoxVFS-VFSOps.cpp プロジェクト: miguelinux/vbox
/**
 * Mount VBoxVFS.
 *
 * @param mp        Mount data provided by VFS layer.
 * @param pDev      Device structure provided by VFS layer.
 * @param pUserData Mounting parameters provided by user space mount tool.
 * @param pContext  kAuth context needed in order to authentificate mount operation.
 *
 * @return 0 on success or BSD error code otherwise.
 */
static int
vboxvfs_mount(struct mount *mp, vnode_t pDev, user_addr_t pUserData, vfs_context_t pContext)
{
    NOREF(pDev);
    NOREF(pContext);

    vboxvfs_mount_t *pMount;

    int rc = ENOMEM;

    PDEBUG("Mounting...");

    pMount = vboxvfs_alloc_internal_data(mp, pUserData);
    if (pMount)
    {
        rc = VbglR0SfMapFolder(&g_vboxSFClient, pMount->pShareName, &pMount->pMap);
        if (RT_SUCCESS(rc))
        {
            /* Private data should be set before vboxvfs_create_vnode_internal() call
             * because we need it in order to create vnode. */
            vfs_setfsprivate(mp, pMount);

            /* Reset root vnode */
            pMount->pRootVnode = NULL;

            vfs_setflags(mp, MNT_RDONLY | MNT_SYNCHRONOUS | MNT_NOEXEC | MNT_NOSUID | MNT_NODEV);

            PDEBUG("VirtualBox shared folder successfully mounted");

            return 0;
        }

        PDEBUG("Unable to map shared folder");
        vboxvfs_destroy_internal_data(&pMount);
    }
    else
        PDEBUG("Unable to allocate internal data");

    return rc;
}
コード例 #3
0
ファイル: vfsops.c プロジェクト: joushou/mac9p
static int
vfs_unmount_9p(mount_t mp, int mntflags, __unused vfs_context_t ctx)
{
	mount_9p *nmp;
	vnode_t vp;
	int e, flags;

	TRACE();
	nmp = MTO9P(mp);
	flags = 0;
	if(ISSET(mntflags,MNT_FORCE))
		SET(flags, FORCECLOSE);

	OSBitOrAtomic(F_UNMOUNTING, &nmp->flags);
	vp = nmp->root;
	if ((e=vflush(mp, vp, flags)))
		goto error;

	if (vnode_isinuse(vp, 1) && !ISSET(flags, FORCECLOSE)) {
		e = EBUSY;
		goto error;
	}

	clunk_9p(nmp, NTO9P(vp)->fid);
	vnode_rele(vp);
	vflush(mp, NULL, FORCECLOSE);
	vfs_setfsprivate(mp, NULL);
	disconnect_9p(nmp);
	cancelrpcs_9p(nmp);
	freemount_9p(nmp);
    return 0;

error:
	OSBitAndAtomic(~F_UNMOUNTING, &nmp->flags);
	return e;
}
コード例 #4
0
ファイル: fuse_vfsops.c プロジェクト: pwasmund/kext
static errno_t
fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context)
{
    int   err        = 0;
    int   flags      = 0;

    fuse_device_t          fdev;
    struct fuse_data      *data;
    struct fuse_dispatcher fdi;

    vnode_t fuse_rootvp = NULLVP;

    fuse_trace_printf_vfsop();

    if (mntflags & MNT_FORCE) {
        flags |= FORCECLOSE;
    }

    data = fuse_get_mpdata(mp);
    if (!data) {
        panic("fuse4x: no mount private data in vfs_unmount");
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif

    fdev = data->fdev;

    if (fdata_dead_get(data)) {

        /*
         * If the file system daemon is dead, it's pointless to try to do
         * any unmount-time operations that go out to user space. Therefore,
         * we pretend that this is a force unmount. However, this isn't of much
         * use. That's because if any non-root vnode is in use, the vflush()
         * that the kernel does before calling our VFS_UNMOUNT will fail
         * if the original unmount wasn't forcible already. That earlier
         * vflush is called with SKIPROOT though, so it wouldn't bail out
         * on the root vnode being in use.
         *
         * If we want, we could set FORCECLOSE here so that a non-forced
         * unmount will be "upgraded" to a forced unmount if the root vnode
         * is busy (you are cd'd to the mount point, for example). It's not
         * quite pure to do that though.
         *
         *    flags |= FORCECLOSE;
         *    log("fuse4x: forcing unmount on a dead file system\n");
         */

    } else if (!(data->dataflags & FSESS_INITED)) {
        flags |= FORCECLOSE;
        log("fuse4x: forcing unmount on not-yet-alive file system\n");
        fdata_set_dead(data);
    }

    fuse_rootvp = data->rootvp;

    fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    err = vflush(mp, fuse_rootvp, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);
    if (err) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return err;
    }

    if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return EBUSY;
    }

    if (fdata_dead_get(data)) {
        goto alreadydead;
    }

    fdisp_init(&fdi, 0 /* no data to send along */);
    fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context);

    fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__);
    err = fdisp_wait_answ(&fdi);
    fuse_trace_printf("%s:   Reply received.\n", __FUNCTION__);
    if (!err) {
        fuse_ticket_drop(fdi.tick);
    }

    /*
     * Note that dounmount() signals a VQ_UNMOUNT VFS event.
     */

    fdata_set_dead(data);

alreadydead:

    fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    data->rootvp = NULLVP;

    fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    (void)vflush(mp, NULLVP, FORCECLOSE);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    fuse_lck_mtx_lock(fdev->mtx);

    vfs_setfsprivate(mp, NULL);
    data->dataflags &= ~FSESS_MOUNTED;
    OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif

    if (!(data->dataflags & FSESS_OPENED)) {

        /* fdev->data was left for us to clean up */

        fuse_device_close_final(fdev);

        /* fdev->data is gone now */
    }

    fuse_lck_mtx_unlock(fdev->mtx);

    return 0;
}
コード例 #5
0
ファイル: fuse_vfsops.c プロジェクト: pwasmund/kext
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

#if M_FUSE4X_ENABLE_BIGLOCK
    lck_mtx_t         *biglock;
#endif

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

    err = ENOTSUP;

#if M_FUSE4X_ENABLE_UNSUPPORTED
    vfs_setlocklocal(mp);
#endif /* M_FUSE4X_ENABLE_UNSUPPORTED */

    /** Option Processing. **/

    if (*fusefs_args.fstypename) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if (typenamelen > FUSE_FSTYPENAME_MAXLEN) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if (!*fusefs_args.fsname)
        return EINVAL;

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
            (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if ((fusefs_args.init_timeout > FUSE_MAX_INIT_TIMEOUT) ||
            (fusefs_args.init_timeout < FUSE_MIN_INIT_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
            log("fuse4x: caller is not a member of fuse4x admin group. "
                "Either add user (id=%d) to group (id=%d), "
                "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
            return EPERM;
        }
        mntopts |= FSESS_ALLOW_ROOT;
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
                log("fuse4x: caller is not a member of fuse4x admin group. "
                    "Either add user (id=%d) to group (id=%d), "
                    "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                    kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n");
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
            (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        vfs_setflags(mp, MNT_LOCAL);
    }
    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev);
        return EINVAL;
    }

    fuse_lck_mtx_lock(fdev->mtx);

    data = fdev->data;

    if (!data) {
        fuse_lck_mtx_unlock(fdev->mtx);
        return ENXIO;
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    biglock = data->biglock;
    fuse_biglock_lock(biglock);
#endif

    if (data->dataflags & FSESS_MOUNTED) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(biglock);
#endif
        fuse_lck_mtx_unlock(fdev->mtx);
        return EALREADY;
    }

    if (!(data->dataflags & FSESS_OPENED)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENXIO;
        goto out;
    }

    data->dataflags |= FSESS_MOUNTED;
    OSAddAtomic(1, (SInt32 *)&fuse_mount_count);
    mounted = true;

    if (fdata_dead_get(data)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("fuse4x: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
            kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = EPERM;
        log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n",
            kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context)));
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = NULL;
    }

    data->init_timeout.tv_sec = fusefs_args.init_timeout;
    data->init_timeout.tv_nsec = 0;

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->mountaltflags = fusefs_args.altflags;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_lck_mtx_unlock(fdev->mtx);

    /* Send a handshake message to the daemon. */
    fuse_send_init(data, context);

    struct vfs_attr vfs_attr;
    VFSATTR_INIT(&vfs_attr);
    // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s
    err = fuse_vfsop_getattr(mp, &vfs_attr, context);
    if (!err) {
        vfsstatfsp->f_bsize  = vfs_attr.f_bsize;
        vfsstatfsp->f_iosize = vfs_attr.f_iosize;
        vfsstatfsp->f_blocks = vfs_attr.f_blocks;
        vfsstatfsp->f_bfree  = vfs_attr.f_bfree;
        vfsstatfsp->f_bavail = vfs_attr.f_bavail;
        vfsstatfsp->f_bused  = vfs_attr.f_bused;
        vfsstatfsp->f_files  = vfs_attr.f_files;
        vfsstatfsp->f_ffree  = vfs_attr.f_ffree;
        // vfsstatfsp->f_fsid already handled above
        vfsstatfsp->f_owner  = kauth_cred_getuid(data->daemoncred);
        vfsstatfsp->f_flags  = vfs_flags(mp);
        // vfsstatfsp->f_fstypename already handled above
        // vfsstatfsp->f_mntonname handled elsewhere
        // vfsstatfsp->f_mnfromname already handled above
        vfsstatfsp->f_fssubtype = data->fssubtype;
    }
    if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) {
        vfsstatfsp->f_bsize = data->blocksize;
    } else {
        //data->blocksize = vfsstatfsp->f_bsize;
    }
    if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) {
        vfsstatfsp->f_iosize = data->iosize;
    } else {
        //data->iosize = (uint32_t)vfsstatfsp->f_iosize;
        vfsstatfsp->f_iosize = data->iosize;
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_lck_mtx_lock(fdev->mtx);
        data = fdev->data; /* again */
        if (mounted) {
            OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->dataflags &= ~FSESS_MOUNTED;
            if (!(data->dataflags & FSESS_OPENED)) {
#if M_FUSE4X_ENABLE_BIGLOCK
                assert(biglock == data->biglock);
                fuse_biglock_unlock(biglock);
#endif
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_lck_mtx_unlock(fdev->mtx);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
        (void)vnode_put(fuse_rootvp);
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_devblocksize = data->blocksize;
            vfs_setioattr(mp, &ioattr);
        }
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_lck_mtx_lock(fdev->mtx);
    data = fdev->data; /* ...and again */
    if(data) {
        assert(data->biglock == biglock);
        fuse_biglock_unlock(biglock);
    }
    fuse_lck_mtx_unlock(fdev->mtx);
#endif

    return err;
}
コード例 #6
0
ファイル: zfs_vfsops.c プロジェクト: roddi/mac-zfs
static int
zfs_domount(struct mount *mp, dev_t mount_dev, char *osname, vfs_context_t ctx)
{
	uint64_t readonly;
	int error = 0;
	int mode;
	zfsvfs_t *zfsvfs;
	znode_t *zp = NULL;
	struct timeval tv;

	ASSERT(mp);
	ASSERT(osname);

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
	zfsvfs->z_vfs = mp;
	zfsvfs->z_parent = zfsvfs;
	zfsvfs->z_assign = TXG_NOWAIT;
	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;

	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rw_init(&zfsvfs->z_unmount_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zfsvfs->z_unmount_inactive_lock, NULL, RW_DEFAULT, NULL);
#ifndef __APPLE__
	/* Initialize the generic filesystem structure. */
	vfsp->vfs_bcount = 0;
	vfsp->vfs_data = NULL;

	if (zfs_create_unique_device(&mount_dev) == -1) {
		error = ENODEV;
		goto out;
	}
	ASSERT(vfs_devismounted(mount_dev) == 0);
#endif

	vfs_setfsprivate(mp, zfsvfs);

	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
		goto out;

	if (readonly) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_RDONLY));
	} else {
		mode = DS_MODE_PRIMARY;
	}
	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (error == EROFS) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
		    &zfsvfs->z_os);
	}

	if (error)
		goto out;

	if (error = zfs_init_fs(zfsvfs, &zp, (cred_t *) vfs_context_ucred(ctx)))
		goto out;

	/* The call to zfs_init_fs leaves the vnode held, release it here. */
	vnode_put(ZTOV(zp));

	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
		uint64_t xattr;

		ASSERT(mode & DS_MODE_READONLY);
#if 0
		atime_changed_cb(zfsvfs, B_FALSE);
		readonly_changed_cb(zfsvfs, B_TRUE);
		if (error = dsl_prop_get_integer(osname, "xattr", &xattr, NULL))
			goto out;
		xattr_changed_cb(zfsvfs, xattr);
#endif
		zfsvfs->z_issnap = B_TRUE;
	} else {
		
		if (!vfs_isrdonly(mp))
			zfs_unlinked_drain(zfsvfs);

#ifndef __APPLE__
		/*
		 * Parse and replay the intent log.
		 *
		 * Because of ziltest, this must be done after
		 * zfs_unlinked_drain().  (Further note: ziltest doesn't
		 * use readonly mounts, where zfs_unlinked_drain() isn't
		 * called.)  This is because ziltest causes spa_sync()
		 * to think it's committed, but actually it is not, so
		 * the intent log contains many txg's worth of changes.
		 *
		 * In particular, if object N is in the unlinked set in
		 * the last txg to actually sync, then it could be
		 * actually freed in a later txg and then reallocated in
		 * a yet later txg.  This would write a "create object
		 * N" record to the intent log.  Normally, this would be
		 * fine because the spa_sync() would have written out
		 * the fact that object N is free, before we could write
		 * the "create object N" intent log record.
		 *
		 * But when we are in ziltest mode, we advance the "open
		 * txg" without actually spa_sync()-ing the changes to
		 * disk.  So we would see that object N is still
		 * allocated and in the unlinked set, and there is an
		 * intent log record saying to allocate it.
		 */
		zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
		    zfs_replay_vector);

		if (!zil_disable)
			zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
#endif
	}

#if 0
	if (!zfsvfs->z_issnap)
		zfsctl_create(zfsvfs);
#endif

	/*
	 * Record the mount time (for Spotlight)
	 */
	microtime(&tv);
	zfsvfs->z_mount_time = tv.tv_sec;
	
out:
	if (error) {
		if (zfsvfs->z_os)
			dmu_objset_close(zfsvfs->z_os);
		mutex_destroy(&zfsvfs->z_znodes_lock);
		list_destroy(&zfsvfs->z_all_znodes);
		rw_destroy(&zfsvfs->z_unmount_lock);
		rw_destroy(&zfsvfs->z_unmount_inactive_lock);
		kmem_free(zfsvfs, sizeof (zfsvfs_t));
	} else {
		OSIncrementAtomic(&zfs_active_fs_count);
		(void) copystr(osname, vfs_statfs(mp)->f_mntfromname, MNAMELEN - 1, 0);
		vfs_getnewfsid(mp);
	}

	return (error);
}
コード例 #7
0
ファイル: null_vfsops.c プロジェクト: aglab2/darwin-xnu
/*
 * Free reference to null layer
 */
static int
nullfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx)
{
	struct null_mount * mntdata;
	struct vnode * vp;
	int error, flags;

	NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp);

	/* check entitlement or superuser*/
	if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT) &&
		vfs_context_suser(ctx) != 0) {
		return EPERM;
	}

	if (mntflags & MNT_FORCE) {
		flags = FORCECLOSE;
	} else {
		flags = 0;
	}

	mntdata = MOUNTTONULLMOUNT(mp);
	vp      = mntdata->nullm_rootvp;

	// release our reference on the root before flushing.
	// it will get pulled out of the mount structure by reclaim
	vnode_getalways(vp);

	error = vflush(mp, vp, flags);
	if (error)
	{
		vnode_put(vp);
		return (error);
	}

	if (vnode_isinuse(vp,1) && flags == 0)
	{
		vnode_put(vp);
		return EBUSY;
	}

	vnode_rele(vp); // Drop reference taken by nullfs_mount
	vnode_put(vp); // Drop ref taken above

	//Force close to get rid of the last vnode
	(void)vflush(mp, NULL, FORCECLOSE);

	/* no more vnodes, so tear down the mountpoint */

	lck_mtx_lock(&mntdata->nullm_lock);

	vfs_setfsprivate(mp, NULL);

	vnode_getalways(mntdata->nullm_lowerrootvp);
	vnode_rele(mntdata->nullm_lowerrootvp);
	vnode_put(mntdata->nullm_lowerrootvp);

	lck_mtx_unlock(&mntdata->nullm_lock);

	nullfs_destroy_lck(&mntdata->nullm_lock);

	FREE(mntdata, M_TEMP);

	uint64_t vflags = vfs_flags(mp);
	vfs_setflags(mp, vflags & ~MNT_LOCAL);

	return (0);
}
コード例 #8
0
ファイル: null_vfsops.c プロジェクト: aglab2/darwin-xnu
/*
 * Mount null layer
 */
static int
nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, vfs_context_t ctx)
{
	int error                 = 0;
	struct vnode *lowerrootvp = NULL, *vp = NULL;
	struct vfsstatfs * sp   = NULL;
	struct null_mount * xmp = NULL;
	char data[MAXPATHLEN];
	size_t count;
	struct vfs_attr vfa;
	/* set defaults (arbitrary since this file system is readonly) */
	uint32_t bsize  = BLKDEV_IOSIZE;
	size_t iosize   = BLKDEV_IOSIZE;
	uint64_t blocks = 4711 * 4711;
	uint64_t bfree  = 0;
	uint64_t bavail = 0;
	uint64_t bused  = 4711;
	uint64_t files  = 4711;
	uint64_t ffree  = 0;

	kauth_cred_t cred = vfs_context_ucred(ctx);

	NULLFSDEBUG("nullfs_mount(mp = %p) %llx\n", (void *)mp, vfs_flags(mp));

	if (vfs_flags(mp) & MNT_ROOTFS)
		return (EOPNOTSUPP);

	/*
	 * Update is a no-op
	 */
	if (vfs_isupdate(mp)) {
		return ENOTSUP;
	}

	/* check entitlement */
	if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT)) {
		return EPERM;
	}

	/*
	 * Get argument
	 */
	error = copyinstr(user_data, data, MAXPATHLEN - 1, &count);
	if (error) {
		NULLFSDEBUG("nullfs: error copying data form user %d\n", error);
		goto error;
	}

	/* This could happen if the system is configured for 32 bit inodes instead of
	 * 64 bit */
	if (count > MAX_MNT_FROM_LENGTH) {
		error = EINVAL;
		NULLFSDEBUG("nullfs: path to translocate too large for this system %d vs %d\n", count, MAX_MNT_FROM_LENGTH);
		goto error;
	}

	error = vnode_lookup(data, 0, &lowerrootvp, ctx);
	if (error) {
		NULLFSDEBUG("lookup %s -> %d\n", data, error);
		goto error;
	}

	/* lowervrootvp has an iocount after vnode_lookup, drop that for a usecount.
	   Keep this to signal what we want to keep around the thing we are mirroring.
	   Drop it in unmount.*/
	error = vnode_ref(lowerrootvp);
	vnode_put(lowerrootvp);
	if (error)
	{
		// If vnode_ref failed, then null it out so it can't be used anymore in cleanup.
		lowerrootvp = NULL;
		goto error;
	}

	NULLFSDEBUG("mount %s\n", data);

	MALLOC(xmp, struct null_mount *, sizeof(*xmp), M_TEMP, M_WAITOK | M_ZERO);
	if (xmp == NULL) {
		error = ENOMEM;
		goto error;
	}

	/*
	 * Save reference to underlying FS
	 */
	xmp->nullm_lowerrootvp  = lowerrootvp;
	xmp->nullm_lowerrootvid = vnode_vid(lowerrootvp);

	error = null_getnewvnode(mp, NULL, NULL, &vp, NULL, 1);
	if (error) {
		goto error;
	}

	/* vp has an iocount on it from vnode_create. drop that for a usecount. This
	 * is our root vnode so we drop the ref in unmount
	 *
	 * Assuming for now that because we created this vnode and we aren't finished mounting we can get a ref*/
	vnode_ref(vp);
	vnode_put(vp);

	error = nullfs_init_lck(&xmp->nullm_lock);
	if (error) {
		goto error;
	}

	xmp->nullm_rootvp = vp;

	/* read the flags the user set, but then ignore some of them, we will only
	   allow them if they are set on the lower file system */
	uint64_t flags      = vfs_flags(mp) & (~(MNT_IGNORE_OWNERSHIP | MNT_LOCAL));
	uint64_t lowerflags = vfs_flags(vnode_mount(lowerrootvp)) & (MNT_LOCAL | MNT_QUARANTINE | MNT_IGNORE_OWNERSHIP | MNT_NOEXEC);

	if (lowerflags) {
		flags |= lowerflags;
	}

	/* force these flags */
	flags |= (MNT_DONTBROWSE | MNT_MULTILABEL | MNT_NOSUID | MNT_RDONLY);
	vfs_setflags(mp, flags);

	vfs_setfsprivate(mp, xmp);
	vfs_getnewfsid(mp);
	vfs_setlocklocal(mp);

	/* fill in the stat block */
	sp = vfs_statfs(mp);
	strlcpy(sp->f_mntfromname, data, MAX_MNT_FROM_LENGTH);

	sp->f_flags = flags;

	xmp->nullm_flags = NULLM_CASEINSENSITIVE; /* default to case insensitive */

	error = nullfs_vfs_getlowerattr(vnode_mount(lowerrootvp), &vfa, ctx);
	if (error == 0) {
		if (VFSATTR_IS_SUPPORTED(&vfa, f_bsize)) {
			bsize = vfa.f_bsize;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_iosize)) {
			iosize = vfa.f_iosize;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_blocks)) {
			blocks = vfa.f_blocks;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_bfree)) {
			bfree = vfa.f_bfree;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_bavail)) {
			bavail = vfa.f_bavail;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_bused)) {
			bused = vfa.f_bused;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_files)) {
			files = vfa.f_files;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_ffree)) {
			ffree = vfa.f_ffree;
		}
		if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) {
			if ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE)) &&
			    (vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE))) {
				xmp->nullm_flags &= ~NULLM_CASEINSENSITIVE;
			}
		}
	} else {
		goto error;
	}

	sp->f_bsize  = bsize;
	sp->f_iosize = iosize;
	sp->f_blocks = blocks;
	sp->f_bfree  = bfree;
	sp->f_bavail = bavail;
	sp->f_bused  = bused;
	sp->f_files  = files;
	sp->f_ffree  = ffree;

	/* Associate the mac label information from the mirrored filesystem with the
	 * mirror */
	MAC_PERFORM(mount_label_associate, cred, vnode_mount(lowerrootvp), vfs_mntlabel(mp));

	NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", sp->f_mntfromname, sp->f_mntonname);
	return (0);

error:
	if (xmp) {
		FREE(xmp, M_TEMP);
	}
	if (lowerrootvp) {
		vnode_getwithref(lowerrootvp);
		vnode_rele(lowerrootvp);
		vnode_put(lowerrootvp);
	}
	if (vp) {
		/* we made the root vnode but the mount is failed, so clean it up */
		vnode_getwithref(vp);
		vnode_rele(vp);
		/* give vp back */
		vnode_recycle(vp);
		vnode_put(vp);
	}
	return error;
}
コード例 #9
0
ファイル: osi_vfsops.c プロジェクト: adeason/openafs
int
afs_mount(struct mount *mp, char *path, caddr_t data, struct nameidata *ndp, CTX_TYPE ctx)
#endif
{
    /* ndp contains the mounted-from device.  Just ignore it.
     * we also don't care about our proc struct. */
    size_t size;
    int error;
#ifdef AFS_DARWIN80_ENV
    struct vfsioattr ioattr;
    /* vfs_statfs advertised as RO, but isn't */
    /* new api will be needed to initialize this information (nfs needs to
       set mntfromname too) */
#endif
    STATFS_TYPE *mnt_stat = vfs_statfs(mp); 

    if (vfs_isupdate(mp))
	return EINVAL;

    AFS_GLOCK();
    AFS_STATCNT(afs_mount);

    if (data == 0 && afs_globalVFS) {	/* Don't allow remounts. */
	AFS_GUNLOCK();
	return (EBUSY);
    }

    afs_globalVFS = mp;
#ifdef AFS_DARWIN80_ENV
    vfs_ioattr(mp, &ioattr);
    ioattr.io_devblocksize = (16 * 32768);
    vfs_setioattr(mp, &ioattr);
    /* f_iosize is handled in VFS_GETATTR */
#else
    mp->vfs_bsize = 8192;
    mp->mnt_stat.f_iosize = 8192;
#endif
    vfs_getnewfsid(mp);

#ifndef AFS_DARWIN80_ENV
    (void)copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
    memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size);
#endif
    memset(mnt_stat->f_mntfromname, 0, MNAMELEN);

    if (data == 0) {
	strcpy(mnt_stat->f_mntfromname, "AFS");
	/* null terminated string "AFS" will fit, just leave it be. */
	vfs_setfsprivate(mp, NULL);
    } else {
	struct VenusFid *rootFid = NULL;
	struct volume *tvp;
	char volName[MNAMELEN];

	(void)copyinstr(data, volName, MNAMELEN - 1, &size);
	memset(volName + size, 0, MNAMELEN - size);

	if (volName[0] == 0) {
	    strcpy(mnt_stat->f_mntfromname, "AFS");
	    vfs_setfsprivate(mp, &afs_rootFid);
	} else {
	    struct cell *localcell = afs_GetPrimaryCell(READ_LOCK);
	    if (localcell == NULL) {
		AFS_GUNLOCK();
		return ENODEV;
	    }

	    /* Set the volume identifier to "AFS:volume.name" */
	    snprintf(mnt_stat->f_mntfromname, MNAMELEN - 1, "AFS:%s",
		     volName);
	    tvp =
		afs_GetVolumeByName(volName, localcell->cellNum, 1,
				    (struct vrequest *)0, READ_LOCK);

	    if (tvp) {
		int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
		MALLOC(rootFid, struct VenusFid *, sizeof(*rootFid), M_UFSMNT,
		       M_WAITOK);
		rootFid->Cell = localcell->cellNum;
		rootFid->Fid.Volume = volid;
		rootFid->Fid.Vnode = 1;
		rootFid->Fid.Unique = 1;
	    } else {
		AFS_GUNLOCK();
		return ENODEV;
	    }

	    vfs_setfsprivate(mp, &rootFid);
	}
    }
コード例 #10
0
ファイル: vfsops.c プロジェクト: joushou/mac9p
static int
vfs_mount_9p(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
{
#pragma unused(devvp)
	struct sockaddr *addr, *authaddr;
	struct vfsstatfs *sp;
	char authkey[DESKEYLEN+1];
	kauth_cred_t cred;
	user_args_9p args;
	mount_9p *nmp;
	size_t size;
	fid_9p fid;
	qid_9p qid;
	char *vers;
	int e;

	TRACE();
	nmp = NULL;
	addr = NULL;
	authaddr = NULL;
	fid = NOFID;

	if (vfs_isupdate(mp))
		return ENOTSUP;

	if (vfs_context_is64bit(ctx)) {
		if ((e=copyin(data, &args, sizeof(args))))
			goto error;
	} else {
		args_9p args32;
		if ((e=copyin(data, &args32, sizeof(args32))))
			goto error;
		args.spec			= CAST_USER_ADDR_T(args32.spec);
		args.addr			= CAST_USER_ADDR_T(args32.addr);
		args.addrlen		= args32.addrlen;
		args.authaddr		= CAST_USER_ADDR_T(args32.authaddr);
		args.authaddrlen	= args32.authaddrlen;
		args.volume			= CAST_USER_ADDR_T(args32.volume);
		args.uname			= CAST_USER_ADDR_T(args32.uname);
		args.aname			= CAST_USER_ADDR_T(args32.aname);
		args.authkey		= CAST_USER_ADDR_T(args32.authkey);
		args.flags			= args32.flags;
	}
	e = ENOMEM;
	nmp = malloc_9p(sizeof(*nmp));
	if (nmp == NULL)
		return e;

	nmp->mp = mp;
	TAILQ_INIT(&nmp->req);
	nmp->lck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	nmp->reqlck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	nmp->nodelck = lck_mtx_alloc_init(lck_grp_9p, LCK_ATTR_NULL);
	nmp->node = hashinit(desiredvnodes, M_TEMP, &nmp->nodelen);
	if (nmp->lck==NULL || nmp->reqlck==NULL || nmp->nodelck==NULL || nmp->node==NULL)
		goto error;

	if ((e=nameget_9p(args.volume, &nmp->volume)))
		goto error;
	if ((e=nameget_9p(args.uname, &nmp->uname)))
		goto error;
	if ((e=nameget_9p(args.aname, &nmp->aname)))
		goto error;

	cred = vfs_context_ucred(ctx);
	if (IS_VALID_CRED(cred)) {
		nmp->uid = kauth_cred_getuid(cred);
		nmp->gid = kauth_cred_getgid(cred);
	} else {
		nmp->uid = KAUTH_UID_NONE;
		nmp->gid = KAUTH_GID_NONE;
	}
	
	vfs_getnewfsid(mp);
	vfs_setfsprivate(mp, nmp);
	
	nmp->flags = args.flags;
	if ((e=addrget_9p(args.addr, args.addrlen, &addr)))
		goto error;
	if ((e=connect_9p(nmp, addr)))
		goto error;

	vers = VERSION9P;
	if (ISSET(nmp->flags, FLAG_DOTU))
		vers = VERSION9PDOTU;
	if ((e=version_9p(nmp, vers, &nmp->version)))
		goto error;
	if (ISSET(nmp->flags, FLAG_DOTU) && strcmp(VERSION9PDOTU, nmp->version)==0)
		SET(nmp->flags, F_DOTU);

	nmp->afid = NOFID;
	if (args.authaddr && args.authaddrlen && args.authkey) {
		if ((e=copyin(args.authkey, authkey, DESKEYLEN)))
			goto error;
		if ((e=addrget_9p(args.authaddr, args.authaddrlen, &authaddr)))
			goto error;
		if ((e=auth_9p(nmp, nmp->uname, nmp->aname, nmp->uid, &nmp->afid, &qid)))
			goto error;
		if (nmp->afid!=NOFID &&
			(e=authp9any_9p(nmp, nmp->afid, authaddr, nmp->uname, authkey)))
			goto error;
		bzero(authkey, DESKEYLEN);
	}
	if ((e=attach_9p(nmp, nmp->uname, nmp->aname, nmp->afid, nmp->uid, &fid, &qid)))
		goto error;

	if ((e=nget_9p(nmp, fid, qid, NULL, &nmp->root, NULL, ctx)))
		goto error;

	nunlock_9p(NTO9P(nmp->root));
	e = vnode_ref(nmp->root);
	vnode_put(nmp->root);
	if (e)
		goto error;

	vfs_setauthopaque(mp);
	vfs_clearauthopaqueaccess(mp);
	vfs_setlocklocal(mp);

	// init stats
	sp = vfs_statfs(nmp->mp);
	copyinstr(args.spec, sp->f_mntfromname, MNAMELEN-1, &size);
	bzero(sp->f_mntfromname+size, MNAMELEN-size);
	sp->f_bsize = PAGE_SIZE;
	sp->f_iosize = nmp->msize-IOHDRSZ;
	sp->f_blocks = sp->f_bfree = sp->f_bavail = sp->f_bused = 0;
	sp->f_files = 65535;
	sp->f_ffree = sp->f_files-2;
	sp->f_flags = vfs_flags(mp);
	
	free_9p(addr);
	free_9p(authaddr);
	return 0;

error:
	bzero(authkey, DESKEYLEN);
	free_9p(addr);
	free_9p(authaddr);
	if (nmp->so) {
		clunk_9p(nmp, fid);
		disconnect_9p(nmp);
	}
	freemount_9p(nmp);
	vfs_setfsprivate(mp, NULL);
	return e;
}
コード例 #11
0
ファイル: fuse_vfsops.c プロジェクト: arritjenof/kext
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t drandom  = 0;
    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

    kern_return_t kr;
    thread_t      init_thread;

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_biglock_t    *biglock;
#endif

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

#if M_OSXFUSE_ENABLE_UNSUPPORTED
    vfs_setlocklocal(mp);
#endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */

    /** Option Processing. **/

    if (fusefs_args.altflags & FUSE_MOPT_FSTYPENAME) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if ((typenamelen <= 0) || (typenamelen > FUSE_FSTYPENAME_MAXLEN)) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 OSXFUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
        (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SLOW_STATFS) {
        mntopts |= FSESS_SLOW_STATFS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_BROWSE) {
        vfs_setflags(mp, MNT_DONTBROWSE);
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group,
                                     &is_member) == 0) && is_member) {
            mntopts |= FSESS_ALLOW_ROOT;
        } else {
            IOLog("OSXFUSE: caller not a member of OSXFUSE admin group (%d)\n",
                  fuse_admin_group);
            return EPERM;
        }
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group,
                                         &is_member) != 0) || !is_member) {
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_LOCALCACHES) {
        mntopts |= FSESS_NO_ATTRCACHE;
        mntopts |= FSESS_NO_READAHEAD;
        mntopts |= FSESS_NO_UBC;
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
        (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        mntopts |= FSESS_LOCALVOL;
        vfs_setflags(mp, MNT_LOCAL);
    }

    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        return EINVAL;
    }

    fuse_device_lock(fdev);

    drandom = fuse_device_get_random(fdev);
    if (fusefs_args.random != drandom) {
        fuse_device_unlock(fdev);
        IOLog("OSXFUSE: failing mount because of mismatched random\n");
        return EINVAL;
    }

    data = fuse_device_get_mpdata(fdev);

    if (!data) {
        fuse_device_unlock(fdev);
        return ENXIO;
    }

#if M_OSXFUSE_ENABLE_BIG_LOCK
    biglock = data->biglock;
    fuse_biglock_lock(biglock);
#endif

    if (data->mount_state != FM_NOTMOUNTED) {
#if M_OSXFUSE_ENABLE_BIG_LOCK
        fuse_biglock_unlock(biglock);
#endif
        fuse_device_unlock(fdev);
        return EALREADY;
    }

    if (!(data->dataflags & FSESS_OPENED)) {
        fuse_device_unlock(fdev);
        err = ENXIO;
        goto out;
    }

    data->mount_state = FM_MOUNTED;
    OSAddAtomic(1, (SInt32 *)&fuse_mount_count);
    mounted = true;

    if (fdata_dead_get(data)) {
        fuse_device_unlock(fdev);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("OSXFUSE: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
        kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_device_unlock(fdev);
        err = EPERM;
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = (struct timespec *)0;
    }

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->mountaltflags = fusefs_args.altflags;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_device_unlock(fdev);

    /* Send a handshake message to the daemon. */
    kr = kernel_thread_start(fuse_internal_init, data, &init_thread);
    if (kr != KERN_SUCCESS) {
        IOLog("OSXFUSE: could not start init thread\n");
        err = ENOTCONN;
    } else {
        thread_deallocate(init_thread);
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_device_lock(fdev);
        data = fuse_device_get_mpdata(fdev); /* again */
        if (mounted) {
            OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->mount_state = FM_NOTMOUNTED;
            if (!(data->dataflags & FSESS_OPENED)) {
#if M_OSXFUSE_ENABLE_BIG_LOCK
                assert(biglock == data->biglock);
                fuse_biglock_unlock(biglock);
#endif
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_device_unlock(fdev);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
        /*
         * Even though fuse_rootvp will not be reclaimed when calling vnode_put
         * because we incremented its usecount by calling vnode_ref release
         * biglock just to be safe.
         */
        fuse_biglock_unlock(biglock);
#endif /* M_OSXFUSE_ENABLE_BIG_LOCK */
        (void)vnode_put(fuse_rootvp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
        fuse_biglock_lock(biglock);
#endif
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize;
            ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE;
            ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize = data->iosize;
            ioattr.io_devblocksize = data->blocksize;
            vfs_setioattr(mp, &ioattr);
        }
    }

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_device_lock(fdev);
    data = fuse_device_get_mpdata(fdev); /* ...and again */
    if(data) {
        assert(data->biglock == biglock);
        fuse_biglock_unlock(biglock);
    }
    fuse_device_unlock(fdev);
#endif

    return err;
}