示例#1
0
static int
fuse_device_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
{
	struct fuse_data *data;
	struct fuse_ticket *tick;
	int error;

	error = devfs_get_cdevpriv((void **)&data);
	if (error != 0)
		return (error);
	if (!data)
		panic("no fuse data upon fuse device close");
	fdata_set_dead(data);

	FUSE_LOCK();
	fuse_lck_mtx_lock(data->aw_mtx);
	/* wakup poll()ers */
	selwakeuppri(&data->ks_rsel, PZERO + 1);
	/* Don't let syscall handlers wait in vain */
	while ((tick = fuse_aw_pop(data))) {
		fuse_lck_mtx_lock(tick->tk_aw_mtx);
		fticket_set_answered(tick);
		tick->tk_aw_errno = ENOTCONN;
		wakeup(tick);
		fuse_lck_mtx_unlock(tick->tk_aw_mtx);
		FUSE_ASSERT_AW_DONE(tick);
		fuse_ticket_drop(tick);
	}
	fuse_lck_mtx_unlock(data->aw_mtx);
	FUSE_UNLOCK();

	SDT_PROBE2(fuse, , device, trace, 1, "device close");
	return (0);
}
示例#2
0
int
fuse_internal_init_callback(struct fuse_ticket *tick, struct uio *uio)
{
	int err = 0;
	struct fuse_data *data = tick->tk_data;
	struct fuse_init_out *fiio;

	if ((err = tick->tk_aw_ohead.error)) {
		goto out;
	}
	if ((err = fticket_pull(tick, uio))) {
		goto out;
	}
	fiio = fticket_resp(tick)->base;

	/* XXX: Do we want to check anything further besides this? */
	if (fiio->major < 7) {
		debug_printf("userpace version too low\n");
		err = EPROTONOSUPPORT;
		goto out;
	}
	data->fuse_libabi_major = fiio->major;
	data->fuse_libabi_minor = fiio->minor;

	if (fuse_libabi_geq(data, 7, 5)) {
		if (fticket_resp(tick)->len == sizeof(struct fuse_init_out)) {
			data->max_write = fiio->max_write;
		} else {
			err = EINVAL;
		}
	} else {
		/* Old fix values */
		data->max_write = 4096;
	}

out:
	if (err) {
		fdata_set_dead(data);
	}
	FUSE_LOCK();
	data->dataflags |= FSESS_INITED;
	wakeup(&data->ticketer);
	FUSE_UNLOCK();

	return 0;
}
示例#3
0
static int
fticket_wait_answer(struct fuse_ticket *ftick)
{
	sigset_t tset;
	int err = 0;
	struct fuse_data *data;

	debug_printf("ftick=%p\n", ftick);
	fuse_lck_mtx_lock(ftick->tk_aw_mtx);

	if (fticket_answered(ftick)) {
		goto out;
	}
	data = ftick->tk_data;

	if (fdata_get_dead(data)) {
		err = ENOTCONN;
		fticket_set_answered(ftick);
		goto out;
	}
	fuse_block_sigs(&tset);
	err = msleep(ftick, &ftick->tk_aw_mtx, PCATCH, "fu_ans",
	    data->daemon_timeout * hz);
	fuse_restore_sigs(&tset);
	if (err == EAGAIN) {		/* same as EWOULDBLOCK */
#ifdef XXXIP				/* die conditionally */
		if (!fdata_get_dead(data)) {
			fdata_set_dead(data);
		}
#endif
		err = ETIMEDOUT;
		fticket_set_answered(ftick);
	}
out:
	if (!(err || fticket_answered(ftick))) {
		debug_printf("FUSE: requester was woken up but still no answer");
		err = ENXIO;
	}
	fuse_lck_mtx_unlock(ftick->tk_aw_mtx);

	return err;
}
示例#4
0
struct fuse_ticket *
fuse_ticket_fetch(struct fuse_data *data)
{
	int err = 0;
	struct fuse_ticket *ftick;

	debug_printf("data=%p\n", data);

	ftick = fticket_alloc(data);

	if (!(data->dataflags & FSESS_INITED)) {
		/* Sleep until get answer for INIT messsage */
		FUSE_LOCK();
		if (!(data->dataflags & FSESS_INITED) && data->ticketer > 2) {
			err = msleep(&data->ticketer, &fuse_mtx, PCATCH | PDROP,
			    "fu_ini", 0);
			if (err)
				fdata_set_dead(data);
		} else
			FUSE_UNLOCK();
	}
	return ftick;
}
示例#5
0
static errno_t
fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context)
{
    int   err        = 0;
    int   flags      = 0;

    fuse_device_t          fdev;
    struct fuse_data      *data;
    struct fuse_dispatcher fdi;

    vnode_t fuse_rootvp = NULLVP;

    fuse_trace_printf_vfsop();

    if (mntflags & MNT_FORCE) {
        flags |= FORCECLOSE;
    }

    data = fuse_get_mpdata(mp);
    if (!data) {
        panic("fuse4x: no mount private data in vfs_unmount");
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif

    fdev = data->fdev;

    if (fdata_dead_get(data)) {

        /*
         * If the file system daemon is dead, it's pointless to try to do
         * any unmount-time operations that go out to user space. Therefore,
         * we pretend that this is a force unmount. However, this isn't of much
         * use. That's because if any non-root vnode is in use, the vflush()
         * that the kernel does before calling our VFS_UNMOUNT will fail
         * if the original unmount wasn't forcible already. That earlier
         * vflush is called with SKIPROOT though, so it wouldn't bail out
         * on the root vnode being in use.
         *
         * If we want, we could set FORCECLOSE here so that a non-forced
         * unmount will be "upgraded" to a forced unmount if the root vnode
         * is busy (you are cd'd to the mount point, for example). It's not
         * quite pure to do that though.
         *
         *    flags |= FORCECLOSE;
         *    log("fuse4x: forcing unmount on a dead file system\n");
         */

    } else if (!(data->dataflags & FSESS_INITED)) {
        flags |= FORCECLOSE;
        log("fuse4x: forcing unmount on not-yet-alive file system\n");
        fdata_set_dead(data);
    }

    fuse_rootvp = data->rootvp;

    fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    err = vflush(mp, fuse_rootvp, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);
    if (err) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return err;
    }

    if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return EBUSY;
    }

    if (fdata_dead_get(data)) {
        goto alreadydead;
    }

    fdisp_init(&fdi, 0 /* no data to send along */);
    fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context);

    fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__);
    err = fdisp_wait_answ(&fdi);
    fuse_trace_printf("%s:   Reply received.\n", __FUNCTION__);
    if (!err) {
        fuse_ticket_drop(fdi.tick);
    }

    /*
     * Note that dounmount() signals a VQ_UNMOUNT VFS event.
     */

    fdata_set_dead(data);

alreadydead:

    fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    data->rootvp = NULLVP;

    fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    (void)vflush(mp, NULLVP, FORCECLOSE);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    fuse_lck_mtx_lock(fdev->mtx);

    vfs_setfsprivate(mp, NULL);
    data->dataflags &= ~FSESS_MOUNTED;
    OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif

    if (!(data->dataflags & FSESS_OPENED)) {

        /* fdev->data was left for us to clean up */

        fuse_device_close_final(fdev);

        /* fdev->data is gone now */
    }

    fuse_lck_mtx_unlock(fdev->mtx);

    return 0;
}
示例#6
0
/*
    struct vnop_getattr_args {
	struct vnode *a_vp;
	struct vattr *a_vap;
	struct ucred *a_cred;
	struct thread *a_td;
    };
*/
static int
fuse_vnop_getattr(struct vop_getattr_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct vattr *vap = ap->a_vap;
	struct ucred *cred = ap->a_cred;
	struct thread *td = curthread;
	struct fuse_vnode_data *fvdat = VTOFUD(vp);

	int err = 0;
	int dataflags;
	struct fuse_dispatcher fdi;

	FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));

	dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;

	/* Note that we are not bailing out on a dead file system just yet. */

	if (!(dataflags & FSESS_INITED)) {
		if (!vnode_isvroot(vp)) {
			fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
			err = ENOTCONN;
			debug_printf("fuse_getattr b: returning ENOTCONN\n");
			return err;
		} else {
			goto fake;
		}
	}
	fdisp_init(&fdi, 0);
	if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
		if ((err == ENOTCONN) && vnode_isvroot(vp)) {
			/* see comment at similar place in fuse_statfs() */
			fdisp_destroy(&fdi);
			goto fake;
		}
		if (err == ENOENT) {
			fuse_internal_vnode_disappear(vp);
		}
		goto out;
	}
	cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
	if (vap != VTOVA(vp)) {
		memcpy(vap, VTOVA(vp), sizeof(*vap));
	}
	if (vap->va_type != vnode_vtype(vp)) {
		fuse_internal_vnode_disappear(vp);
		err = ENOENT;
		goto out;
	}
	if ((fvdat->flag & FN_SIZECHANGE) != 0)
		vap->va_size = fvdat->filesize;

	if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
		/*
	         * This is for those cases when the file size changed without us
	         * knowing, and we want to catch up.
	         */
		off_t new_filesize = ((struct fuse_attr_out *)
				      fdi.answ)->attr.size;

		if (fvdat->filesize != new_filesize) {
			fuse_vnode_setsize(vp, cred, new_filesize);
		}
	}
	debug_printf("fuse_getattr e: returning 0\n");

out:
	fdisp_destroy(&fdi);
	return err;

fake:
	bzero(vap, sizeof(*vap));
	vap->va_type = vnode_vtype(vp);

	return 0;
}
示例#7
0
static int
fuse_vfsop_unmount(struct mount *mp, int mntflags)
{
	int err = 0;
	int flags = 0;

	struct cdev *fdev;
	struct fuse_data *data;
	struct fuse_dispatcher fdi;
	struct thread *td = curthread;

	fuse_trace_printf_vfsop();

	if (mntflags & MNT_FORCE) {
		flags |= FORCECLOSE;
	}
	data = fuse_get_mpdata(mp);
	if (!data) {
		panic("no private data for mount point?");
	}
	/* There is 1 extra root vnode reference (mp->mnt_data). */
	FUSE_LOCK();
	if (data->vroot != NULL) {
		struct vnode *vroot = data->vroot;

		data->vroot = NULL;
		FUSE_UNLOCK();
		vrele(vroot);
	} else
		FUSE_UNLOCK();
	err = vflush(mp, 0, flags, td);
	if (err) {
		debug_printf("vflush failed");
		return err;
	}
	if (fdata_get_dead(data)) {
		goto alreadydead;
	}
	fdisp_init(&fdi, 0);
	fdisp_make(&fdi, FUSE_DESTROY, mp, 0, td, NULL);

	err = fdisp_wait_answ(&fdi);
	fdisp_destroy(&fdi);

	fdata_set_dead(data);

alreadydead:
	FUSE_LOCK();
	data->mp = NULL;
	fdev = data->fdev;
	fdata_trydestroy(data);
	FUSE_UNLOCK();

	MNT_ILOCK(mp);
	mp->mnt_data = NULL;
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);

	dev_rel(fdev);

	return 0;
}
示例#8
0
__private_extern__
int
fuse_internal_init_synchronous(struct fuse_ticket *ftick)
{
    int err = 0;
    struct fuse_init_out *fiio;
    struct fuse_data *data = ftick->tk_data;

    if ((err = ftick->tk_aw_ohead.error)) {
        goto out;
    }

    fiio = fticket_resp(ftick)->base;

    if ((fiio->major < MACFUSE_MIN_USER_VERSION_MAJOR) ||
        (fiio->minor < MACFUSE_MIN_USER_VERSION_MINOR)){
        IOLog("MacFUSE: user-space library has too low a version\n");
        err = EPROTONOSUPPORT;
        goto out;
    }

    data->fuse_libabi_major = fiio->major;
    data->fuse_libabi_minor = fiio->minor;

    if (fuse_libabi_geq(data, MACFUSE_MIN_USER_VERSION_MAJOR,
                              MACFUSE_MIN_USER_VERSION_MINOR)) {
        if (fticket_resp(ftick)->len == sizeof(struct fuse_init_out)) {
            data->max_write = fiio->max_write;
        } else {
            err = EINVAL;
        }
    } else {
        /* Old fix values */
        data->max_write = 4096;
    }

    if (fiio->flags & FUSE_CASE_INSENSITIVE) {
        data->dataflags |= FSESS_CASE_INSENSITIVE;
    }

    if (fiio->flags & FUSE_VOL_RENAME) {
        data->dataflags |= FSESS_VOL_RENAME;
    }

    if (fiio->flags & FUSE_XTIMES) {
        data->dataflags |= FSESS_XTIMES;
    }

out:
    fuse_ticket_drop(ftick);

    if (err) {
        fdata_set_dead(data);
    }

    fuse_lck_mtx_lock(data->ticket_mtx);
    data->dataflags |= FSESS_INITED;
    fuse_wakeup(&data->ticketer);
    fuse_lck_mtx_unlock(data->ticket_mtx);

    return 0;
}