Esempio n. 1
0
/*
 * Called when an unmount is requested and certain sanity checks have
 * already passed.  At this point no dentries or inodes have been reclaimed
 * from their respective caches.  We drop the extra reference on the .zfs
 * control directory to allow everything to be reclaimed.  All snapshots
 * must already have been unmounted to reach this point.
 */
void
zfs_preumount(struct super_block *sb)
{
	zfs_sb_t *zsb = sb->s_fs_info;

	/* zsb is NULL when zfs_domount fails during mount */
	if (zsb) {
		zfsctl_destroy(sb->s_fs_info);
		/*
		 * Wait for iput_async before entering evict_inodes in
		 * generic_shutdown_super. The reason we must finish before
		 * evict_inodes is when lazytime is on, or when zfs_purgedir
		 * calls zfs_zget, iput would bump i_count from 0 to 1. This
		 * would race with the i_count check in evict_inodes. This means
		 * it could destroy the inode while we are still using it.
		 *
		 * We wait for two passes. xattr directories in the first pass
		 * may add xattr entries in zfs_purgedir, so in the second pass
		 * we wait for them. We don't use taskq_wait here because it is
		 * a pool wide taskq. Other mounted filesystems can constantly
		 * do iput_async and there's no guarantee when taskq will be
		 * empty.
		 */
		taskq_wait_outstanding(dsl_pool_iput_taskq(
		    dmu_objset_pool(zsb->z_os)), 0);
		taskq_wait_outstanding(dsl_pool_iput_taskq(
		    dmu_objset_pool(zsb->z_os)), 0);
	}
}
Esempio n. 2
0
File: zfs_vfsops.c Progetto: nwf/zfs
/*
 * Teardown the zfs_sb_t.
 *
 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
 * and 'z_teardown_inactive_lock' held.
 */
int
zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
{
	znode_t	*zp;

	/*
	 * If someone has not already unmounted this file system,
	 * drain the iput_taskq to ensure all active references to the
	 * zfs_sb_t have been handled only then can it be safely destroyed.
	 */
	if (zsb->z_os) {
		/*
		 * If we're unmounting we have to wait for the list to
		 * drain completely.
		 *
		 * If we're not unmounting there's no guarantee the list
		 * will drain completely, but iputs run from the taskq
		 * may add the parents of dir-based xattrs to the taskq
		 * so we want to wait for these.
		 *
		 * We can safely read z_nr_znodes without locking because the
		 * VFS has already blocked operations which add to the
		 * z_all_znodes list and thus increment z_nr_znodes.
		 */
		int round = 0;
		while (zsb->z_nr_znodes > 0) {
			taskq_wait_outstanding(dsl_pool_iput_taskq(
			    dmu_objset_pool(zsb->z_os)), 0);
			if (++round > 1 && !unmounting)
				break;
		}
	}

	rrm_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG);

	if (!unmounting) {
		/*
		 * We purge the parent filesystem's super block as the
		 * parent filesystem and all of its snapshots have their
		 * inode's super block set to the parent's filesystem's
		 * super block.  Note,  'z_parent' is self referential
		 * for non-snapshots.
		 */
		shrink_dcache_sb(zsb->z_parent->z_sb);
	}

	/*
	 * Close the zil. NB: Can't close the zil while zfs_inactive
	 * threads are blocked as zil_close can call zfs_inactive.
	 */
	if (zsb->z_log) {
		zil_close(zsb->z_log);
		zsb->z_log = NULL;
	}

	rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER);

	/*
	 * If we are not unmounting (ie: online recv) and someone already
	 * unmounted this file system while we were doing the switcheroo,
	 * or a reopen of z_os failed then just bail out now.
	 */
	if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) {
		rw_exit(&zsb->z_teardown_inactive_lock);
		rrm_exit(&zsb->z_teardown_lock, FTAG);
		return (SET_ERROR(EIO));
	}

	/*
	 * At this point there are no VFS ops active, and any new VFS ops
	 * will fail with EIO since we have z_teardown_lock for writer (only
	 * relevant for forced unmount).
	 *
	 * Release all holds on dbufs.
	 */
	if (!unmounting) {
		mutex_enter(&zsb->z_znodes_lock);
		for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
		zp = list_next(&zsb->z_all_znodes, zp)) {
			if (zp->z_sa_hdl)
				zfs_znode_dmu_fini(zp);
		}
		mutex_exit(&zsb->z_znodes_lock);
	}

	/*
	 * If we are unmounting, set the unmounted flag and let new VFS ops
	 * unblock.  zfs_inactive will have the unmounted behavior, and all
	 * other VFS ops will fail with EIO.
	 */
	if (unmounting) {
		zsb->z_unmounted = B_TRUE;
		rrm_exit(&zsb->z_teardown_lock, FTAG);
		rw_exit(&zsb->z_teardown_inactive_lock);
	}

	/*
	 * z_os will be NULL if there was an error in attempting to reopen
	 * zsb, so just return as the properties had already been
	 *
	 * unregistered and cached data had been evicted before.
	 */
	if (zsb->z_os == NULL)
		return (0);

	/*
	 * Unregister properties.
	 */
	zfs_unregister_callbacks(zsb);

	/*
	 * Evict cached data
	 */
	if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) &&
	    !zfs_is_readonly(zsb))
		txg_wait_synced(dmu_objset_pool(zsb->z_os), 0);
	dmu_objset_evict_dbufs(zsb->z_os);

	return (0);
}
Esempio n. 3
0
/*
 * Teardown the zfs_sb_t.
 *
 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
 * and 'z_teardown_inactive_lock' held.
 */
int
zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
{
	znode_t	*zp;

	rrw_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG);

	if (!unmounting) {
		/*
		 * We purge the parent filesystem's super block as the
		 * parent filesystem and all of its snapshots have their
		 * inode's super block set to the parent's filesystem's
		 * super block.  Note,  'z_parent' is self referential
		 * for non-snapshots.
		 */
		shrink_dcache_sb(zsb->z_parent->z_sb);
	}

	/*
	 * If someone has not already unmounted this file system,
	 * drain the iput_taskq to ensure all active references to the
	 * zfs_sb_t have been handled only then can it be safely destroyed.
	 */
	if (zsb->z_os)
		taskq_wait(dsl_pool_iput_taskq(dmu_objset_pool(zsb->z_os)));

	/*
	 * Close the zil. NB: Can't close the zil while zfs_inactive
	 * threads are blocked as zil_close can call zfs_inactive.
	 */
	if (zsb->z_log) {
		zil_close(zsb->z_log);
		zsb->z_log = NULL;
	}

	rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER);

	/*
	 * If we are not unmounting (ie: online recv) and someone already
	 * unmounted this file system while we were doing the switcheroo,
	 * or a reopen of z_os failed then just bail out now.
	 */
	if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) {
		rw_exit(&zsb->z_teardown_inactive_lock);
		rrw_exit(&zsb->z_teardown_lock, FTAG);
		return (EIO);
	}

	/*
	 * At this point there are no VFS ops active, and any new VFS ops
	 * will fail with EIO since we have z_teardown_lock for writer (only
	 * relevant for forced unmount).
	 *
	 * Release all holds on dbufs.
	 */
	mutex_enter(&zsb->z_znodes_lock);
	for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
	    zp = list_next(&zsb->z_all_znodes, zp)) {
		if (zp->z_sa_hdl) {
			ASSERT(atomic_read(&ZTOI(zp)->i_count) > 0);
			zfs_znode_dmu_fini(zp);
		}
	}
	mutex_exit(&zsb->z_znodes_lock);

	/*
	 * If we are unmounting, set the unmounted flag and let new VFS ops
	 * unblock.  zfs_inactive will have the unmounted behavior, and all
	 * other VFS ops will fail with EIO.
	 */
	if (unmounting) {
		zsb->z_unmounted = B_TRUE;
		rrw_exit(&zsb->z_teardown_lock, FTAG);
		rw_exit(&zsb->z_teardown_inactive_lock);
	}

	/*
	 * z_os will be NULL if there was an error in attempting to reopen
	 * zsb, so just return as the properties had already been
	 *
	 * unregistered and cached data had been evicted before.
	 */
	if (zsb->z_os == NULL)
		return (0);

	/*
	 * Unregister properties.
	 */
	zfs_unregister_callbacks(zsb);

	/*
	 * Evict cached data
	 */
	if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) &&
	    !zfs_is_readonly(zsb))
		txg_wait_synced(dmu_objset_pool(zsb->z_os), 0);
	dmu_objset_evict_dbufs(zsb->z_os);

	return (0);
}
Esempio n. 4
0
void
zfs_rmnode(znode_t *zp)
{
    zfs_sb_t	*zsb = ZTOZSB(zp);
    objset_t	*os = zsb->z_os;
    znode_t		*xzp = NULL;
    dmu_tx_t	*tx;
    uint64_t	acl_obj;
    uint64_t	xattr_obj;
    uint64_t	count;
    int		error;

    ASSERT(zp->z_links == 0);
    ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);

    /*
     * If this is an attribute directory, purge its contents.
     */
    if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) {
        error = zap_count(os, zp->z_id, &count);
        if (error) {
            zfs_znode_dmu_fini(zp);
            return;
        }

        if (count > 0) {
            taskq_t *taskq;

            /*
             * There are still directory entries in this xattr
             * directory.  Let zfs_unlinked_drain() deal with
             * them to avoid deadlocking this process in the
             * zfs_purgedir()->zfs_zget()->ilookup() callpath
             * on the xattr inode's I_FREEING bit.
             */
            taskq = dsl_pool_iput_taskq(dmu_objset_pool(os));
            taskq_dispatch(taskq, (task_func_t *)
                           zfs_unlinked_drain, zsb, TQ_SLEEP);

            zfs_znode_dmu_fini(zp);
            return;
        }
    }

    /*
     * Free up all the data in the file.
     */
    error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END);
    if (error) {
        /*
         * Not enough space.  Leave the file in the unlinked set.
         */
        zfs_znode_dmu_fini(zp);
        return;
    }

    /*
     * If the file has extended attributes, we're going to unlink
     * the xattr dir.
     */
    error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
                      &xattr_obj, sizeof (xattr_obj));
    if (error == 0 && xattr_obj) {
        error = zfs_zget(zsb, xattr_obj, &xzp);
        ASSERT(error == 0);
    }

    acl_obj = zfs_external_acl(zp);

    /*
     * Set up the final transaction.
     */
    tx = dmu_tx_create(os);
    dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
    dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
    if (xzp) {
        dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, TRUE, NULL);
        dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
    }
    if (acl_obj)
        dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);

    zfs_sa_upgrade_txholds(tx, zp);
    error = dmu_tx_assign(tx, TXG_WAIT);
    if (error) {
        /*
         * Not enough space to delete the file.  Leave it in the
         * unlinked set, leaking it until the fs is remounted (at
         * which point we'll call zfs_unlinked_drain() to process it).
         */
        dmu_tx_abort(tx);
        zfs_znode_dmu_fini(zp);
        goto out;
    }

    if (xzp) {
        ASSERT(error == 0);
        mutex_enter(&xzp->z_lock);
        xzp->z_unlinked = B_TRUE;	/* mark xzp for deletion */
        xzp->z_links = 0;	/* no more links to it */
        VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zsb),
                              &xzp->z_links, sizeof (xzp->z_links), tx));
        mutex_exit(&xzp->z_lock);
        zfs_unlinked_add(xzp, tx);
    }

    /* Remove this znode from the unlinked set */
    VERIFY3U(0, ==,
             zap_remove_int(zsb->z_os, zsb->z_unlinkedobj, zp->z_id, tx));

    zfs_znode_delete(zp, tx);

    dmu_tx_commit(tx);
out:
    if (xzp)
        iput(ZTOI(xzp));
}