static int
zfsctl_unmount_snap(zfs_snapentry_t *sep, int fflags, cred_t *cr)
{
	vnode_t *svp = sep->se_root;
	int error;

	ASSERT(vn_ismntpt(svp));

	/* this will be dropped by dounmount() */
	if ((error = vn_vfswlock(svp)) != 0)
		return (error);

	VN_HOLD(svp);
	error = dounmount(vn_mountedvfs(svp), fflags, cr);
	if (error) {
		VN_RELE(svp);
		return (error);
	}

	/*
	 * We can't use VN_RELE(), as that will try to invoke
	 * zfsctl_snapdir_inactive(), which would cause us to destroy
	 * the sd_lock mutex held by our caller.
	 */
	ASSERT(svp->v_count == 1);
	gfs_vop_inactive(svp, cr, NULL);

	kmem_free(sep->se_name, strlen(sep->se_name) + 1);
	kmem_free(sep, sizeof (zfs_snapentry_t));

	return (0);
}
Exemple #2
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags, struct proc *p)
{
	struct vnode *vp = VFSTOUFS(mp)->um_devvp;
	struct mfsnode *mfsp = VTOMFS(vp);
	struct buf *bp;
	caddr_t base;
	int sleepreturn = 0;

	base = mfsp->mfs_baseoff;
	while (mfsp->mfs_buflist != (struct buf *)-1) {
		while ((bp = mfsp->mfs_buflist) != NULL) {
			mfsp->mfs_buflist = bp->b_actf;
			mfs_doio(bp, base);
			wakeup((caddr_t)bp);
		}
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, clear the signal (it has been "processed"),
		 * otherwise we will loop here, as tsleep will always return
		 * EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) ||
			    dounmount(mp, 0, p, NULL))
				CLRSIG(p, CURSIG(p));
			sleepreturn = 0;
			continue;
		}
		sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0);
	}
	return (0);
}
Exemple #3
0
static OSKIT_COMDECL_U filesystem_release(oskit_filesystem_t *f)
{
    struct gfilesystem *fs;
    struct proc *p;
    dev_t dev;
    int rc;
    unsigned newcount;
    
    fs = (struct gfilesystem*)f; 
    if (fs == NULL)
	    panic("%s:%d: null filesystem", __FILE__, __LINE__);
    if (fs->count == 0)
	    panic("%s:%d: bad count", __FILE__, __LINE__);    

    newcount = --fs->count;
    if (newcount == 0)
    {
	rc = getproc(&p);
	assert(rc == 0);
	if (fs->mp)
	{
	    dev = ((struct ufsmount *)fs->mp->mnt_data)->um_dev;
	    rc = dounmount(fs->mp, MNT_FORCE, p);
	    assert(rc == 0);
	    oskit_blkio_release((oskit_blkio_t *)dev);
	}
	prfree(p);
	free(fs,M_TEMP);
    }

    return newcount;
}
/*
 * Reopen zfsvfs_t::z_os and release VOPs.
 */
int
zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode)
{
	int err;

	ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock));
	ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));

	err = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (err) {
		zfsvfs->z_os = NULL;
	} else {
		znode_t *zp;

		VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);

		/*
		 * Attempt to re-establish all the active znodes with
		 * their dbufs.  If a zfs_rezget() fails, then we'll let
		 * any potential callers discover that via ZFS_ENTER_VERIFY_VP
		 * when they try to use their znode.
		 */
		mutex_enter(&zfsvfs->z_znodes_lock);
		for (zp = list_head(&zfsvfs->z_all_znodes); zp;
		    zp = list_next(&zfsvfs->z_all_znodes, zp)) {
			(void) zfs_rezget(zp);
		}
		mutex_exit(&zfsvfs->z_znodes_lock);

	}

	/* release the VOPs */
	rw_exit(&zfsvfs->z_teardown_inactive_lock);
	rrw_exit(&zfsvfs->z_teardown_lock, FTAG);

	if (err) {
		/*
		 * Since we couldn't reopen zfsvfs::z_os, force
		 * unmount this file system.
		 */
		if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0)
			(void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
	}
	return (err);
}
Exemple #5
0
static OSKIT_COMDECL filesystem_unmount(oskit_filesystem_t *f)
{
    struct gfilesystem *fs = (struct gfilesystem *) f; 
    struct mount *mp;
    dev_t dev;
    struct proc *p;
    oskit_error_t ferror;
    int error;

    
    if (!fs || !fs->count || !fs->mp)
	    return OSKIT_E_INVALIDARG;

    ferror = getproc(&p);
    if (ferror)
	    return ferror;

    mp = fs->mp;

    /*
     * Only root, or the user that did the original mount is
     * permitted to forcibly unmount it.
     */
    if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
	(error = suser(p->p_ucred, 0))) 
    {
	prfree(p);
	return errno_to_oskit_error(error);
    }

    /* Get the blkio "pointed" to by the dev_t so we can release it below. */
    dev = ((struct ufsmount *)mp->mnt_data)->um_dev;

    error = dounmount(fs->mp, MNT_FORCE, p);
    fs->mp = 0;
    prfree(p);
    oskit_blkio_release((oskit_blkio_t *)dev);
    if (error)
	    return errno_to_oskit_error(error);	

    return 0;    
}
Exemple #6
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags, struct proc *p)
{
	struct vnode *vp = VFSTOUFS(mp)->um_devvp;
	struct mfsnode *mfsp = VTOMFS(vp);
	struct buf *bp;
	int sleepreturn = 0;

	while (1) {
		while (1) {
			if (mfsp->mfs_shutdown == 1)
				break;
			bp = bufq_dequeue(&mfsp->mfs_bufq);
			if (bp == NULL)
				break;
			mfs_doio(mfsp, bp);
			wakeup(bp);
		}
		if (mfsp->mfs_shutdown == 1)
			break;

		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, clear the signal (it has been "processed"),
		 * otherwise we will loop here, as tsleep will always return
		 * EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) ||
			    dounmount(mp,
			    (CURSIG(p) == SIGKILL) ? MNT_FORCE : 0, p, NULL))
				CLRSIG(p, CURSIG(p));
			sleepreturn = 0;
			continue;
		}
		sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0);
	}
	return (0);
}
Exemple #7
0
/*
 * dsl_crypto_key_unload
 *
 * Remove the key from the in memory keystore.
 *
 * First we have to remove the minor node for a ZVOL or unmount
 * the filesystem.  This is so that we flush all pending IO for it to disk
 * so we won't need to encrypt anything with this key.  Anything in flight
 * should already have a lock on the keys it needs.
 * We can't assume that userland has already successfully unmounted the
 * dataset though in many cases it will have.
 *
 * If the key can't be removed return the failure back to our caller.
 */
int
dsl_crypto_key_unload(const char *dsname)
{
    dsl_dataset_t *ds;
    objset_t *os;
    int error;
    spa_t *spa;
    dsl_pool_t *dp;
#ifdef _KERNEL
    dmu_objset_type_t os_type;
    //vfs_t *vfsp;
    struct vfsmount *vfsp;
#endif /* _KERNEL */

    error = dsl_pool_hold(dsname, FTAG, &dp);
    if (error != 0)
        return (error);

    /* XXX - should we use own_exclusive() here? */
    if ((error = dsl_dataset_hold(dp, dsname, FTAG, &ds)) != 0) {
        dsl_pool_rele(dp, FTAG);
        return (error);
    }

    if ((error = dmu_objset_from_ds(ds, &os)) != 0) {
        dsl_dataset_rele(ds, FTAG);
        dsl_pool_rele(dp, FTAG);
        return (error);
    }

#ifdef _KERNEL
    /*
     * Make sure that the device node has gone for ZVOLs
     * and that filesystems are umounted.
     */
#if 0 // FIXME
    os_type = dmu_objset_type(os);
    if (os_type == DMU_OST_ZVOL) {
        error = zvol_remove_minor(dsname);
        if (error == ENXIO)
            error = 0;
    } else if (os_type == DMU_OST_ZFS) {
        vfsp = zfs_get_vfs(dsname);
        if (vfsp != NULL) {
            error = vn_vfswlock(vfsp->vfs_vnodecovered);
            VFS_RELE(vfsp);
            if (error == 0)
                error = dounmount(vfsp, 0, CRED());
        }
    }
    if (error != 0) {
        dsl_dataset_rele(ds, FTAG);
        return (error);
    }
#endif

#endif /* _KERNEL */

    /*
     * Make sure all dbufs are synced.
     *
     * It is essential for encrypted datasets to ensure that
     * there is no further pending IO before removing the key.
     */
    if (dmu_objset_is_dirty(os, 0)) // FIXME, 0?
        txg_wait_synced(dmu_objset_pool(os), 0);
    dmu_objset_evict_dbufs(os);

    spa = dsl_dataset_get_spa(ds);
    error = zcrypt_keystore_remove(spa, ds->ds_object);

    dsl_dataset_rele(ds, FTAG);
    dsl_pool_rele(dp, FTAG);
    return (error);
}
Exemple #8
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags)
{
	struct vnode *vp;
	struct mfsnode *mfsp;
	struct proc *p;
	struct buf *bp;
	void *base;
	int sleepreturn = 0, refcnt, error;
	ksiginfoq_t kq;

	/*
	 * Ensure that file system is still mounted when getting mfsnode.
	 * Add a reference to the mfsnode to prevent it disappearing in
	 * this routine.
	 */
	if ((error = vfs_busy(mp, NULL)) != 0)
		return error;
	vp = VFSTOUFS(mp)->um_devvp;
	mfsp = VTOMFS(vp);
	mutex_enter(&mfs_lock);
	mfsp->mfs_refcnt++;
	mutex_exit(&mfs_lock);
	vfs_unbusy(mp, false, NULL);

	base = mfsp->mfs_baseoff;
	mutex_enter(&mfs_lock);
	while (mfsp->mfs_shutdown != 1) {
		while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) {
			mutex_exit(&mfs_lock);
			mfs_doio(bp, base);
			mutex_enter(&mfs_lock);
		}
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, or the filesystem is already in the
		 * process of being unmounted, clear the signal (it has been
		 * "processed"), otherwise we will loop here, as tsleep
		 * will always return EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			mutex_exit(&mfs_lock);
			if (dounmount(mp, 0, curlwp) != 0) {
				p = curproc;
				ksiginfo_queue_init(&kq);
				mutex_enter(p->p_lock);
				sigclearall(p, NULL, &kq);
				mutex_exit(p->p_lock);
				ksiginfo_queue_drain(&kq);
			}
			sleepreturn = 0;
			mutex_enter(&mfs_lock);
			continue;
		}

		sleepreturn = cv_wait_sig(&mfsp->mfs_cv, &mfs_lock);
	}
	KASSERT(bufq_peek(mfsp->mfs_buflist) == NULL);
	refcnt = --mfsp->mfs_refcnt;
	mutex_exit(&mfs_lock);
	if (refcnt == 0) {
		bufq_free(mfsp->mfs_buflist);
		cv_destroy(&mfsp->mfs_cv);
		kmem_free(mfsp, sizeof(*mfsp));
	}
	return (sleepreturn);
}
/*
 * Force the unmouting of a file descriptor from ALL of the nodes
 * that it was mounted to.
 * At the present time, the only usage for this routine is in the
 * event one end of a pipe was mounted. At the time the unmounted
 * end gets closed down, the mounted end is forced to be unmounted.
 *
 * This routine searches the namenode hash list for all namenodes
 * that have a nm_filevp field equal to vp. Each time one is found,
 * the dounmount() routine is called. This causes the nm_unmount()
 * routine to be called and thus, the file descriptor is unmounted
 * from the node.
 *
 * At the start of this routine, the reference count for vp is
 * incremented to protect the vnode from being released in the
 * event the mount was the only thing keeping the vnode active.
 * If that is the case, the VOP_CLOSE operation is applied to
 * the vnode, prior to it being released.
 */
static int
nm_umountall(vnode_t *vp, cred_t *crp)
{
	vfs_t *vfsp;
	struct namenode *nodep;
	int error = 0;
	int realerr = 0;

	/*
	 * For each namenode that is associated with the file:
	 * If the v_vfsp field is not namevfs, dounmount it.  Otherwise,
	 * it was created in nm_open() and will be released in time.
	 * The following loop replicates some code from nm_find.  That
	 * routine can't be used as is since the list isn't strictly
	 * consumed as it is traversed.
	 */
	mutex_enter(&ntable_lock);
	nodep = *NM_FILEVP_HASH(vp);
	while (nodep) {
		if (nodep->nm_filevp == vp &&
		    (vfsp = NMTOV(nodep)->v_vfsp) != NULL &&
		    vfsp != &namevfs && (NMTOV(nodep)->v_flag & VROOT)) {

			/*
			 * If the vn_vfswlock fails, skip the vfs since
			 * somebody else may be unmounting it.
			 */
			if (vn_vfswlock(vfsp->vfs_vnodecovered)) {
				realerr = EBUSY;
				nodep = nodep->nm_nextp;
				continue;
			}

			/*
			 * Can't hold ntable_lock across call to do_unmount
			 * because nm_unmount tries to acquire it.  This means
			 * there is a window where another mount of vp can
			 * happen so it is possible that after nm_unmountall
			 * there are still some mounts.  This situation existed
			 * without MT locking because dounmount can sleep
			 * so another mount could happen during that time.
			 * This situation is unlikely and doesn't really cause
			 * any problems.
			 */
			mutex_exit(&ntable_lock);
			if ((error = dounmount(vfsp, 0, crp)) != 0)
				realerr = error;
			mutex_enter(&ntable_lock);
			/*
			 * Since we dropped the ntable_lock, we
			 * have to start over from the beginning.
			 * If for some reasons dounmount() fails,
			 * start from beginning means that we will keep on
			 * trying unless another thread unmounts it for us.
			 */
			nodep = *NM_FILEVP_HASH(vp);
		} else
			nodep = nodep->nm_nextp;
	}
	mutex_exit(&ntable_lock);
	return (realerr);
}