Пример #1
0
static int
fuse_internal_print_vnodes_callback(vnode_t vp, __unused void *cargs)
{
    const char *vname = NULL;
    struct fuse_vnode_data *fvdat = VTOFUD(vp);

#if M_MACFUSE_ENABLE_UNSUPPORTED
    vname = vnode_getname(vp);
#endif /* M_MACFUSE_ENABLE_UNSUPPORTED */

    if (vname) {
        IOLog("MacFUSE: vp=%p ino=%lld parent=%lld inuse=%d %s\n",
              vp, fvdat->nodeid, fvdat->parent_nodeid,
              vnode_isinuse(vp, 0), vname);
    } else {
        if (fvdat->nodeid == FUSE_ROOT_ID) {
            IOLog("MacFUSE: vp=%p ino=%lld parent=%lld inuse=%d /\n",
                  vp, fvdat->nodeid, fvdat->parent_nodeid,
                  vnode_isinuse(vp, 0));
        } else {
            IOLog("MacFUSE: vp=%p ino=%lld parent=%lld inuse=%d\n",
                  vp, fvdat->nodeid, fvdat->parent_nodeid,
                  vnode_isinuse(vp, 0));
        }
    }

#if M_MACFUSE_ENABLE_UNSUPPORTED
    if (vname) {
        vnode_putname(vname);
    }
#endif /* M_MACFUSE_ENABLE_UNSUPPORTED */
 
    return VNODE_RETURNED;
}
Пример #2
0
static int
vboxvfs_vnode_close(struct vnop_close_args *args)
{
    vnode_t          vnode;
    mount_t          mp;
    vboxvfs_vnode_t *pVnodeData;
    vboxvfs_mount_t *pMount;

    int rc;

    PDEBUG("Closing vnode...");

    AssertReturn(args, EINVAL);

    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_exclusive(pVnodeData->pLock);

    if (vnode_isinuse(vnode, 0))
    {
        PDEBUG("vnode '%s' (handle 0x%X) is still in use, just return ok",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return 0;
    }

    /* At this point we must make sure that vnode has VBoxVFS object handle assigned */
    if (pVnodeData->pHandle == SHFL_HANDLE_NIL)
    {
        PDEBUG("vnode has no active VBoxVFS object handle set, aborting");
        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return EINVAL;
    }

    rc = vboxvfs_close_internal(pMount, pVnodeData->pHandle);
    if (rc == 0)
    {
        PDEBUG("Close success: '%s' (handle 0x%X)",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        /* Forget about previously assigned VBoxVFS object handle */
        pVnodeData->pHandle = SHFL_HANDLE_NIL;
    }
    else
    {
        PDEBUG("Unable to close: '%s' (handle 0x%X): %d",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle, rc);
    }

    lck_rw_unlock_exclusive(pVnodeData->pLock);

    return rc;
}
Пример #3
0
static int
vnop_remove_9p(struct vnop_remove_args *ap)
{
	vnode_t dvp, vp;
	node_9p *dnp, *np;
	int e;

	TRACE();
	dvp = ap->a_dvp;
	vp = ap->a_vp;
	dnp = NTO9P(dvp);
	np = NTO9P(vp);

	if (dvp == vp) {
		panic("parent == node");
		return EINVAL;
	}

	if (ISSET(ap->a_flags, VNODE_REMOVE_NODELETEBUSY) &&
		vnode_isinuse(vp, 0))
		return EBUSY;

	nlock_9p(dnp, NODE_LCK_EXCLUSIVE);
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	if ((e=remove_9p(np->nmp, np->fid)))
		goto error;

	cache_purge(vp);
	vnode_recycle(vp);

error:
	nunlock_9p(np);
	nunlock_9p(dnp);
	return e;
}
Пример #4
0
/**
 * Unmount VBoxVFS.
 *
 * @param mp        Mount data provided by VFS layer.
 * @param fFlags    Unmounting flags.
 * @param pContext  kAuth context needed in order to authentificate mount operation.
 *
 * @return 0 on success or BSD error code otherwise.
 */
static int
vboxvfs_unmount(struct mount *mp, int fFlags, vfs_context_t pContext)
{
    NOREF(pContext);

    vboxvfs_mount_t *pMount;
    int                  rc = EBUSY;
    int                  fFlush = (fFlags & MNT_FORCE) ? FORCECLOSE : 0;

    PDEBUG("Attempting to %s unmount a shared folder", (fFlags & MNT_FORCE) ? "forcibly" : "normally");

    AssertReturn(mp, EINVAL);

    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);

    AssertReturn(pMount,             EINVAL);
    AssertReturn(pMount->pRootVnode, EINVAL);

    /* Check if we can do unmount at the moment */
    if (!vnode_isinuse(pMount->pRootVnode, 1))
    {
        /* Flush child vnodes first */
        rc = vflush(mp, pMount->pRootVnode, fFlush);
        if (rc == 0)
        {
            /* Flush root vnode */
            rc = vflush(mp, NULL, fFlush);
            if (rc == 0)
            {
                vfs_setfsprivate(mp, NULL);

                rc = VbglR0SfUnmapFolder(&g_vboxSFClient, &pMount->pMap);
                if (RT_SUCCESS(rc))
                {
                    vboxvfs_destroy_internal_data(&pMount);
                    PDEBUG("A shared folder has been successfully unmounted");
                    return 0;
                }

                PDEBUG("Unable to unmount shared folder");
                rc = EPROTO;
            }
            else
                PDEBUG("Unable to flush filesystem before unmount, some data might be lost");
        }
        else
            PDEBUG("Unable to flush child vnodes");

    }
    else
        PDEBUG("Root vnode is in use, can't unmount");

    vnode_put(pMount->pRootVnode);

    return rc;
}
Пример #5
0
static int
devfsspec_close(struct vnop_close_args *ap)
        /* struct vnop_close_args {
		struct vnode *a_vp;
		int  a_fflag;
		vfs_context_t a_context;
	} */
{
    	struct vnode *	    	vp = ap->a_vp;
	register devnode_t * 	dnp;
	struct timeval now;

	if (vnode_isinuse(vp, 1)) {
	    DEVFS_LOCK();
	    microtime(&now);
	    dnp = VTODN(vp);
	    dn_times(dnp, &now, &now, &now);
	    DEVFS_UNLOCK();
	}
	return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
}
Пример #6
0
static int
vfs_unmount_9p(mount_t mp, int mntflags, __unused vfs_context_t ctx)
{
	mount_9p *nmp;
	vnode_t vp;
	int e, flags;

	TRACE();
	nmp = MTO9P(mp);
	flags = 0;
	if(ISSET(mntflags,MNT_FORCE))
		SET(flags, FORCECLOSE);

	OSBitOrAtomic(F_UNMOUNTING, &nmp->flags);
	vp = nmp->root;
	if ((e=vflush(mp, vp, flags)))
		goto error;

	if (vnode_isinuse(vp, 1) && !ISSET(flags, FORCECLOSE)) {
		e = EBUSY;
		goto error;
	}

	clunk_9p(nmp, NTO9P(vp)->fid);
	vnode_rele(vp);
	vflush(mp, NULL, FORCECLOSE);
	vfs_setfsprivate(mp, NULL);
	disconnect_9p(nmp);
	cancelrpcs_9p(nmp);
	freemount_9p(nmp);
    return 0;

error:
	OSBitAndAtomic(~F_UNMOUNTING, &nmp->flags);
	return e;
}
Пример #7
0
static errno_t
fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context)
{
    int   err        = 0;
    int   flags      = 0;

    fuse_device_t          fdev;
    struct fuse_data      *data;
    struct fuse_dispatcher fdi;

    vnode_t fuse_rootvp = NULLVP;

    fuse_trace_printf_vfsop();

    if (mntflags & MNT_FORCE) {
        flags |= FORCECLOSE;
    }

    data = fuse_get_mpdata(mp);
    if (!data) {
        panic("fuse4x: no mount private data in vfs_unmount");
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif

    fdev = data->fdev;

    if (fdata_dead_get(data)) {

        /*
         * If the file system daemon is dead, it's pointless to try to do
         * any unmount-time operations that go out to user space. Therefore,
         * we pretend that this is a force unmount. However, this isn't of much
         * use. That's because if any non-root vnode is in use, the vflush()
         * that the kernel does before calling our VFS_UNMOUNT will fail
         * if the original unmount wasn't forcible already. That earlier
         * vflush is called with SKIPROOT though, so it wouldn't bail out
         * on the root vnode being in use.
         *
         * If we want, we could set FORCECLOSE here so that a non-forced
         * unmount will be "upgraded" to a forced unmount if the root vnode
         * is busy (you are cd'd to the mount point, for example). It's not
         * quite pure to do that though.
         *
         *    flags |= FORCECLOSE;
         *    log("fuse4x: forcing unmount on a dead file system\n");
         */

    } else if (!(data->dataflags & FSESS_INITED)) {
        flags |= FORCECLOSE;
        log("fuse4x: forcing unmount on not-yet-alive file system\n");
        fdata_set_dead(data);
    }

    fuse_rootvp = data->rootvp;

    fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    err = vflush(mp, fuse_rootvp, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);
    if (err) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return err;
    }

    if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return EBUSY;
    }

    if (fdata_dead_get(data)) {
        goto alreadydead;
    }

    fdisp_init(&fdi, 0 /* no data to send along */);
    fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context);

    fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__);
    err = fdisp_wait_answ(&fdi);
    fuse_trace_printf("%s:   Reply received.\n", __FUNCTION__);
    if (!err) {
        fuse_ticket_drop(fdi.tick);
    }

    /*
     * Note that dounmount() signals a VQ_UNMOUNT VFS event.
     */

    fdata_set_dead(data);

alreadydead:

    fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    data->rootvp = NULLVP;

    fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    (void)vflush(mp, NULLVP, FORCECLOSE);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    fuse_lck_mtx_lock(fdev->mtx);

    vfs_setfsprivate(mp, NULL);
    data->dataflags &= ~FSESS_MOUNTED;
    OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif

    if (!(data->dataflags & FSESS_OPENED)) {

        /* fdev->data was left for us to clean up */

        fuse_device_close_final(fdev);

        /* fdev->data is gone now */
    }

    fuse_lck_mtx_unlock(fdev->mtx);

    return 0;
}
Пример #8
0
static int
vboxvfs_vnode_open(struct vnop_open_args *args)
{
    vnode_t           vnode;
    vboxvfs_vnode_t  *pVnodeData;
    uint32_t          fHostFlags;
    mount_t           mp;
    vboxvfs_mount_t  *pMount;

    int rc;

    PDEBUG("Opening vnode...");

    AssertReturn(args, EINVAL);

    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_exclusive(pVnodeData->pLock);

    if (vnode_isinuse(vnode, 0))
    {
        PDEBUG("vnode '%s' (handle 0x%X) already has VBoxVFS object handle assigned, just return ok",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return 0;
    }

    /* At this point we must make sure that nobody is using VBoxVFS object handle */
    //if (pVnodeData->Handle != SHFL_HANDLE_NIL)
    //{
    //    PDEBUG("vnode has active VBoxVFS object handle set, aborting");
    //    lck_rw_unlock_exclusive(pVnodeData->pLock);
    //    return EINVAL;
    //}

    fHostFlags  = vboxvfs_g2h_mode_inernal(args->a_mode);
    fHostFlags |= (vnode_isdir(vnode) ? SHFL_CF_DIRECTORY : 0);

    SHFLHANDLE Handle;
    rc = vboxvfs_open_internal(pMount, pVnodeData->pPath, fHostFlags, &Handle);
    if (rc == 0)
    {
        PDEBUG("Open success: '%s' (handle 0x%X)",
               (char *)pVnodeData->pPath->String.utf8,
               (int)Handle);

        pVnodeData->pHandle = Handle;
    }
    else
    {
        PDEBUG("Unable to open: '%s': %d",
               (char *)pVnodeData->pPath->String.utf8,
               rc);
    }

    lck_rw_unlock_exclusive(pVnodeData->pLock);

    return rc;
}
Пример #9
0
/*
 * Free reference to null layer
 */
static int
nullfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx)
{
	struct null_mount * mntdata;
	struct vnode * vp;
	int error, flags;

	NULLFSDEBUG("nullfs_unmount: mp = %p\n", (void *)mp);

	/* check entitlement or superuser*/
	if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT) &&
		vfs_context_suser(ctx) != 0) {
		return EPERM;
	}

	if (mntflags & MNT_FORCE) {
		flags = FORCECLOSE;
	} else {
		flags = 0;
	}

	mntdata = MOUNTTONULLMOUNT(mp);
	vp      = mntdata->nullm_rootvp;

	// release our reference on the root before flushing.
	// it will get pulled out of the mount structure by reclaim
	vnode_getalways(vp);

	error = vflush(mp, vp, flags);
	if (error)
	{
		vnode_put(vp);
		return (error);
	}

	if (vnode_isinuse(vp,1) && flags == 0)
	{
		vnode_put(vp);
		return EBUSY;
	}

	vnode_rele(vp); // Drop reference taken by nullfs_mount
	vnode_put(vp); // Drop ref taken above

	//Force close to get rid of the last vnode
	(void)vflush(mp, NULL, FORCECLOSE);

	/* no more vnodes, so tear down the mountpoint */

	lck_mtx_lock(&mntdata->nullm_lock);

	vfs_setfsprivate(mp, NULL);

	vnode_getalways(mntdata->nullm_lowerrootvp);
	vnode_rele(mntdata->nullm_lowerrootvp);
	vnode_put(mntdata->nullm_lowerrootvp);

	lck_mtx_unlock(&mntdata->nullm_lock);

	nullfs_destroy_lck(&mntdata->nullm_lock);

	FREE(mntdata, M_TEMP);

	uint64_t vflags = vfs_flags(mp);
	vfs_setflags(mp, vflags & ~MNT_LOCAL);

	return (0);
}
Пример #10
0
int
SRXAFSCB_GetCE(struct rx_call *a_call, afs_int32 a_index,
	       struct AFSDBCacheEntry *a_result)
{

    int i;		/*Loop variable */
    struct vcache *tvc;	/*Ptr to current cache entry */
    int code;			/*Return code */
    XSTATS_DECLS;

    RX_AFS_GLOCK();

    XSTATS_START_CMTIME(AFS_STATS_CM_RPCIDX_GETCE);

    AFS_STATCNT(SRXAFSCB_GetCE);
    for (i = 0; i < VCSIZE; i++) {
	for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
	    if (a_index == 0)
		goto searchDone;
	    a_index--;
	}			/*Zip through current hash chain */
    }				/*Zip through hash chains */

  searchDone:
    if (tvc == NULL) {
	/*Past EOF */
	code = 1;
	goto fcnDone;
    }

    /*
     * Copy out the located entry.
     */
    a_result->addr = afs_data_pointer_to_int32(tvc);
    a_result->cell = tvc->f.fid.Cell;
    a_result->netFid.Volume = tvc->f.fid.Fid.Volume;
    a_result->netFid.Vnode = tvc->f.fid.Fid.Vnode;
    a_result->netFid.Unique = tvc->f.fid.Fid.Unique;
    a_result->lock.waitStates = tvc->lock.wait_states;
    a_result->lock.exclLocked = tvc->lock.excl_locked;
    a_result->lock.readersReading = tvc->lock.readers_reading;
    a_result->lock.numWaiting = tvc->lock.num_waiting;
#if defined(INSTRUMENT_LOCKS)
    a_result->lock.pid_last_reader = MyPidxx2Pid(tvc->lock.pid_last_reader);
    a_result->lock.pid_writer = MyPidxx2Pid(tvc->lock.pid_writer);
    a_result->lock.src_indicator = tvc->lock.src_indicator;
#else
    /* On osf20 , the vcache does not maintain these three fields */
    a_result->lock.pid_last_reader = 0;
    a_result->lock.pid_writer = 0;
    a_result->lock.src_indicator = 0;
#endif /* INSTRUMENT_LOCKS */
#ifdef AFS_64BIT_CLIENT
    a_result->Length = (afs_int32) tvc->f.m.Length & 0xffffffff;
#else /* AFS_64BIT_CLIENT */
    a_result->Length = tvc->f.m.Length;
#endif /* AFS_64BIT_CLIENT */
    a_result->DataVersion = hgetlo(tvc->f.m.DataVersion);
    a_result->callback = afs_data_pointer_to_int32(tvc->callback);	/* XXXX Now a pointer; change it XXXX */
    a_result->cbExpires = tvc->cbExpires;
    if (tvc->f.states & CVInit) {
        a_result->refCount = 1;
    } else {
#ifdef AFS_DARWIN80_ENV
    a_result->refCount = vnode_isinuse(AFSTOV(tvc),0)?1:0; /* XXX fix */
#else
    a_result->refCount = VREFCOUNT(tvc);
#endif
    }
    a_result->opens = tvc->opens;
    a_result->writers = tvc->execsOrWriters;
    a_result->mvstat = tvc->mvstat;
    a_result->states = tvc->f.states;
    code = 0;

    /*
     * Return our results.
     */
  fcnDone:
    XSTATS_END_TIME;

    RX_AFS_GUNLOCK();

    return (code);

}				/*SRXAFSCB_GetCE */