Example #1
0
/*
 * Called when an unmount is requested and certain sanity checks have
 * already passed.  At this point no dentries or inodes have been reclaimed
 * from their respective caches.  We drop the extra reference on the .zfs
 * control directory to allow everything to be reclaimed.  All snapshots
 * must already have been unmounted to reach this point.
 */
void
zfs_preumount(struct super_block *sb)
{
	zfs_sb_t *zsb = sb->s_fs_info;

	/* zsb is NULL when zfs_domount fails during mount */
	if (zsb) {
		zfsctl_destroy(sb->s_fs_info);
		/*
		 * Wait for iput_async before entering evict_inodes in
		 * generic_shutdown_super. The reason we must finish before
		 * evict_inodes is when lazytime is on, or when zfs_purgedir
		 * calls zfs_zget, iput would bump i_count from 0 to 1. This
		 * would race with the i_count check in evict_inodes. This means
		 * it could destroy the inode while we are still using it.
		 *
		 * We wait for two passes. xattr directories in the first pass
		 * may add xattr entries in zfs_purgedir, so in the second pass
		 * we wait for them. We don't use taskq_wait here because it is
		 * a pool wide taskq. Other mounted filesystems can constantly
		 * do iput_async and there's no guarantee when taskq will be
		 * empty.
		 */
		taskq_wait_outstanding(dsl_pool_iput_taskq(
		    dmu_objset_pool(zsb->z_os)), 0);
		taskq_wait_outstanding(dsl_pool_iput_taskq(
		    dmu_objset_pool(zsb->z_os)), 0);
	}
}
Example #2
0
static int
splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	splat_taskq_arg_t *tq_arg;
	taskq_ent_t *tqe;
	int error;

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	             "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST7_NAME,
	             prealloc ? "prealloc" :  "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
	                       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST7_NAME);
		return -EINVAL;
	}

	tq_arg = kmem_alloc(sizeof (splat_taskq_arg_t), KM_SLEEP);
	tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);

	tq_arg->depth = 0;
	tq_arg->flag  = 0;
	tq_arg->id    = 0;
	tq_arg->file  = file;
	tq_arg->name  = SPLAT_TASKQ_TEST7_NAME;
	tq_arg->tq    = tq;

	if (prealloc) {
		taskq_init_ent(tqe);
		tq_arg->tqe = tqe;
	} else {
		tq_arg->tqe = NULL;
	}

	splat_taskq_test7_func(tq_arg);

	if (tq_arg->flag == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' waiting\n", tq_arg->name);
		taskq_wait_outstanding(tq, SPLAT_TASKQ_DEPTH_MAX);
	}

	error = (tq_arg->depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL);

	kmem_free(tqe, sizeof (taskq_ent_t));
	kmem_free(tq_arg, sizeof (splat_taskq_arg_t));

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	              "Taskq '%s' destroying\n", tq_arg->name);
	taskq_destroy(tq);

	return (error);
}
Example #3
0
File: zfs_vfsops.c Project: nwf/zfs
/*
 * Teardown the zfs_sb_t.
 *
 * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock'
 * and 'z_teardown_inactive_lock' held.
 */
int
zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
{
	znode_t	*zp;

	/*
	 * If someone has not already unmounted this file system,
	 * drain the iput_taskq to ensure all active references to the
	 * zfs_sb_t have been handled only then can it be safely destroyed.
	 */
	if (zsb->z_os) {
		/*
		 * If we're unmounting we have to wait for the list to
		 * drain completely.
		 *
		 * If we're not unmounting there's no guarantee the list
		 * will drain completely, but iputs run from the taskq
		 * may add the parents of dir-based xattrs to the taskq
		 * so we want to wait for these.
		 *
		 * We can safely read z_nr_znodes without locking because the
		 * VFS has already blocked operations which add to the
		 * z_all_znodes list and thus increment z_nr_znodes.
		 */
		int round = 0;
		while (zsb->z_nr_znodes > 0) {
			taskq_wait_outstanding(dsl_pool_iput_taskq(
			    dmu_objset_pool(zsb->z_os)), 0);
			if (++round > 1 && !unmounting)
				break;
		}
	}

	rrm_enter(&zsb->z_teardown_lock, RW_WRITER, FTAG);

	if (!unmounting) {
		/*
		 * We purge the parent filesystem's super block as the
		 * parent filesystem and all of its snapshots have their
		 * inode's super block set to the parent's filesystem's
		 * super block.  Note,  'z_parent' is self referential
		 * for non-snapshots.
		 */
		shrink_dcache_sb(zsb->z_parent->z_sb);
	}

	/*
	 * Close the zil. NB: Can't close the zil while zfs_inactive
	 * threads are blocked as zil_close can call zfs_inactive.
	 */
	if (zsb->z_log) {
		zil_close(zsb->z_log);
		zsb->z_log = NULL;
	}

	rw_enter(&zsb->z_teardown_inactive_lock, RW_WRITER);

	/*
	 * If we are not unmounting (ie: online recv) and someone already
	 * unmounted this file system while we were doing the switcheroo,
	 * or a reopen of z_os failed then just bail out now.
	 */
	if (!unmounting && (zsb->z_unmounted || zsb->z_os == NULL)) {
		rw_exit(&zsb->z_teardown_inactive_lock);
		rrm_exit(&zsb->z_teardown_lock, FTAG);
		return (SET_ERROR(EIO));
	}

	/*
	 * At this point there are no VFS ops active, and any new VFS ops
	 * will fail with EIO since we have z_teardown_lock for writer (only
	 * relevant for forced unmount).
	 *
	 * Release all holds on dbufs.
	 */
	if (!unmounting) {
		mutex_enter(&zsb->z_znodes_lock);
		for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
		zp = list_next(&zsb->z_all_znodes, zp)) {
			if (zp->z_sa_hdl)
				zfs_znode_dmu_fini(zp);
		}
		mutex_exit(&zsb->z_znodes_lock);
	}

	/*
	 * If we are unmounting, set the unmounted flag and let new VFS ops
	 * unblock.  zfs_inactive will have the unmounted behavior, and all
	 * other VFS ops will fail with EIO.
	 */
	if (unmounting) {
		zsb->z_unmounted = B_TRUE;
		rrm_exit(&zsb->z_teardown_lock, FTAG);
		rw_exit(&zsb->z_teardown_inactive_lock);
	}

	/*
	 * z_os will be NULL if there was an error in attempting to reopen
	 * zsb, so just return as the properties had already been
	 *
	 * unregistered and cached data had been evicted before.
	 */
	if (zsb->z_os == NULL)
		return (0);

	/*
	 * Unregister properties.
	 */
	zfs_unregister_callbacks(zsb);

	/*
	 * Evict cached data
	 */
	if (dsl_dataset_is_dirty(dmu_objset_ds(zsb->z_os)) &&
	    !zfs_is_readonly(zsb))
		txg_wait_synced(dmu_objset_pool(zsb->z_os), 0);
	dmu_objset_evict_dbufs(zsb->z_os);

	return (0);
}
Example #4
0
static int
splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
	taskq_ent_t *tqes;
	int i, rc = 0;
	uint_t tflags;

	tqes = kmem_alloc(sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX, KM_SLEEP);
	memset(tqes, 0, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST6_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST6_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST6_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;
		tflags = TQ_SLEEP;
		if (i > 4)
			tflags |= TQ_FRONT;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test6_func,
			                   &tq_id[i], tflags, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test6_func,
					    &tq_id[i], tflags);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test6_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
		/* Sleep to let tasks 1-3 start executing. */
		if ( i == 2 )
			msleep(100);
	}

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name,
		     SPLAT_TASKQ_ORDER_MAX);
	taskq_wait_outstanding(tq, SPLAT_TASKQ_ORDER_MAX);
	rc = splat_taskq_test_order(&tq_arg, order);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	kmem_free(tqes, sizeof(*tqes) * SPLAT_TASKQ_ORDER_MAX);

	return rc;
}