Пример #1
0
void
dsl_pool_close(dsl_pool_t *dp)
{
	/*
	 * Drop our references from dsl_pool_open().
	 *
	 * Since we held the origin_snap from "syncing" context (which
	 * includes pool-opening context), it actually only got a "ref"
	 * and not a hold, so just drop that here.
	 */
	if (dp->dp_origin_snap != NULL)
		dsl_dataset_rele(dp->dp_origin_snap, dp);
	if (dp->dp_mos_dir != NULL)
		dsl_dir_rele(dp->dp_mos_dir, dp);
	if (dp->dp_free_dir != NULL)
		dsl_dir_rele(dp->dp_free_dir, dp);
	if (dp->dp_leak_dir != NULL)
		dsl_dir_rele(dp->dp_leak_dir, dp);
	if (dp->dp_root_dir != NULL)
		dsl_dir_rele(dp->dp_root_dir, dp);

	bpobj_close(&dp->dp_free_bpobj);
	bpobj_close(&dp->dp_obsolete_bpobj);

	/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
	if (dp->dp_meta_objset != NULL)
		dmu_objset_evict(dp->dp_meta_objset);

	txg_list_destroy(&dp->dp_dirty_datasets);
	txg_list_destroy(&dp->dp_dirty_zilogs);
	txg_list_destroy(&dp->dp_sync_tasks);
	txg_list_destroy(&dp->dp_early_sync_tasks);
	txg_list_destroy(&dp->dp_dirty_dirs);

	taskq_destroy(dp->dp_zil_clean_taskq);
	taskq_destroy(dp->dp_sync_taskq);

	/*
	 * We can't set retry to TRUE since we're explicitly specifying
	 * a spa to flush. This is good enough; any missed buffers for
	 * this spa won't cause trouble, and they'll eventually fall
	 * out of the ARC just like any other unused buffer.
	 */
	arc_flush(dp->dp_spa, FALSE);

	mmp_fini(dp->dp_spa);
	txg_fini(dp);
	dsl_scan_fini(dp);
	dmu_buf_user_evict_wait();

	rrw_destroy(&dp->dp_config_rwlock);
	mutex_destroy(&dp->dp_lock);
	cv_destroy(&dp->dp_spaceavail_cv);
	taskq_destroy(dp->dp_iput_taskq);
	if (dp->dp_blkstats != NULL) {
		mutex_destroy(&dp->dp_blkstats->zab_lock);
		vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
	}
	kmem_free(dp, sizeof (dsl_pool_t));
}
Пример #2
0
static int
splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t tqe;

	taskq_init_ent(&tqe);

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST1_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
			       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		           "Taskq '%s' create failed\n",
		           SPLAT_TASKQ_TEST1_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	tq_arg.id   = 0;
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST1_NAME;

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
	           "Taskq '%s' function '%s' dispatching\n",
	           tq_arg.name, sym2str(splat_taskq_test13_func));
	if (prealloc) {
		taskq_dispatch_ent(tq, splat_taskq_test13_func,
		                   &tq_arg, TQ_SLEEP, &tqe);
		id = tqe.tqent_id;
	} else {
		id = taskq_dispatch(tq, splat_taskq_test13_func,
				    &tq_arg, TQ_SLEEP);
	}

	if (id == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		             "Taskq '%s' function '%s' dispatch failed\n",
		             tq_arg.name, sym2str(splat_taskq_test13_func));
		taskq_destroy(tq);
		return -EINVAL;
	}

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
	           tq_arg.name);
	taskq_wait(tq);
	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
	           tq_arg.name);

	taskq_destroy(tq);

	return (tq_arg.flag) ? 0 : -EINVAL;
}
Пример #3
0
/*
 * pppt_disable_svc
 *
 * clean up all existing sessions and deregister targets from STMF
 */
static void
pppt_disable_svc(void)
{
	pppt_tgt_t	*tgt, *next_tgt;
	avl_tree_t	delete_target_list;

	ASSERT(pppt_global.global_svc_state == PSS_DISABLING);

	avl_create(&delete_target_list,
	    pppt_tgt_avl_compare, sizeof (pppt_tgt_t),
	    offsetof(pppt_tgt_t, target_global_ln));

	PPPT_GLOBAL_LOCK();
	for (tgt = avl_first(&pppt_global.global_target_list);
	    tgt != NULL;
	    tgt = next_tgt) {
		next_tgt = AVL_NEXT(&pppt_global.global_target_list, tgt);
		avl_remove(&pppt_global.global_target_list, tgt);
		avl_add(&delete_target_list, tgt);
		pppt_tgt_async_delete(tgt);
	}
	PPPT_GLOBAL_UNLOCK();

	for (tgt = avl_first(&delete_target_list);
	    tgt != NULL;
	    tgt = next_tgt) {
		next_tgt = AVL_NEXT(&delete_target_list, tgt);
		mutex_enter(&tgt->target_mutex);
		while ((tgt->target_refcount > 0) ||
		    (tgt->target_state != TS_DELETING)) {
			cv_wait(&tgt->target_cv, &tgt->target_mutex);
		}
		mutex_exit(&tgt->target_mutex);

		avl_remove(&delete_target_list, tgt);
		pppt_tgt_destroy(tgt);
	}

	taskq_destroy(pppt_global.global_sess_taskq);

	taskq_destroy(pppt_global.global_dispatch_taskq);

	avl_destroy(&pppt_global.global_sess_list);
	avl_destroy(&pppt_global.global_target_list);

	(void) stmf_deregister_port_provider(pppt_global.global_pp);

	stmf_free(pppt_global.global_dbuf_store);
	pppt_global.global_dbuf_store = NULL;

	stmf_free(pppt_global.global_pp);
	pppt_global.global_pp = NULL;
}
Пример #4
0
void
mdeg_fini(void)
{
	/*
	 * Flip the enabled switch off to make sure that
	 * no events get dispatched while things are being
	 * torn down.
	 */
	mdeg.enabled = B_FALSE;

	/* destroy the task queue */
	taskq_destroy(mdeg.taskq);

	/*
	 * Deallocate the table of registered clients
	 */
	kmem_free(mdeg.tbl, mdeg.maxclnts * sizeof (mdeg_clnt_t));
	rw_destroy(&mdeg.rwlock);

	/*
	 * Free up the cached MDs.
	 */
	if (mdeg.md_curr)
		(void) md_fini_handle(mdeg.md_curr);

	if (mdeg.md_prev)
		(void) md_fini_handle(mdeg.md_prev);

	mutex_destroy(&mdeg.lock);
}
Пример #5
0
/*
 * Close down the txg subsystem.
 */
void
txg_fini(dsl_pool_t *dp)
{
	tx_state_t *tx = &dp->dp_tx;
	int c;

	ASSERT(tx->tx_threads == 0);

	mutex_destroy(&tx->tx_sync_lock);

	cv_destroy(&tx->tx_sync_more_cv);
	cv_destroy(&tx->tx_sync_done_cv);
	cv_destroy(&tx->tx_quiesce_more_cv);
	cv_destroy(&tx->tx_quiesce_done_cv);
	cv_destroy(&tx->tx_exit_cv);

	for (c = 0; c < max_ncpus; c++) {
		int i;

		mutex_destroy(&tx->tx_cpu[c].tc_lock);
		for (i = 0; i < TXG_SIZE; i++) {
			cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
			list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
		}
	}

	if (tx->tx_commit_cb_taskq != NULL)
		taskq_destroy(tx->tx_commit_cb_taskq);

	vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));

	bzero(tx, sizeof (tx_state_t));
}
Пример #6
0
int
zvol_init(void)
{
	int error;

	zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
		                  zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
	if (zvol_taskq == NULL) {
		printk(KERN_INFO "ZFS: taskq_create() failed\n");
		return (-ENOMEM);
	}

	error = register_blkdev(zvol_major, ZVOL_DRIVER);
	if (error) {
		printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
		taskq_destroy(zvol_taskq);
		return (error);
	}

	blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
	                    THIS_MODULE, zvol_probe, NULL, NULL);

	mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zvol_state_list, sizeof (zvol_state_t),
	            offsetof(zvol_state_t, zv_next));

	(void) zvol_create_minors(NULL);

	return (0);
}
Пример #7
0
static void
system_taskq_fini(void *arg)
{

	taskq_destroy(system_taskq);
	uma_zdestroy(taskq_zone);
}
Пример #8
0
static int
ipmi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	if (cmd != DDI_DETACH)
		return (DDI_FAILURE);

	if (ipmi_found == B_FALSE)
		return (DDI_SUCCESS);

	if (!list_is_empty(&dev_list))
		return (DDI_FAILURE);

	/* poke the taskq so that it can terminate */
	sc->ipmi_detaching = 1;
	cv_signal(&sc->ipmi_request_added);

	ddi_remove_minor_node(dip, NULL);
	ipmi_dip = NULL;

	taskq_destroy(sc->ipmi_kthread);
	list_destroy(&dev_list);
	id_space_destroy(minor_ids);

	ipmi_attached = B_FALSE;
	return (DDI_SUCCESS);
}
Пример #9
0
void
ipmi_shutdown(struct ipmi_softc *sc)
{
	taskq_destroy(sc->ipmi_kthread);

	cv_destroy(&sc->ipmi_request_added);
	mutex_destroy(&sc->ipmi_lock);
}
Пример #10
0
static int
splat_rwlock_test2(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	taskq_t *tq;
	int i, rc = 0, tq_count = 256;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	/* Create several threads allowing tasks to race with each other */
	tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
			  maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
	if (tq == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	/*
	 * Schedule N work items to the work queue each of which enters the
	 * writer rwlock, sleeps briefly, then exits the writer rwlock.  On a
	 * multiprocessor box these work items will be handled by all available
	 * CPUs.  The task function checks to ensure the tracked shared variable
	 * is always only incremented by one.  Additionally, the rwlock itself
	 * is instrumented such that if any two processors are in the
	 * critical region at the same time the system will panic.  If the
	 * rwlock is implemented right this will never happy, that's a pass.
	 */
	for (i = 0; i < tq_count; i++) {
		if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
			splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
				     "Failed to queue task %d\n", i);
			rc = -EINVAL;
		}
	}

	taskq_wait(tq);

	if (rwp->rw_rc == tq_count) {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "correctly entered/exited the rwlock %d times\n",
			     num_online_cpus(), rwp->rw_rc);
	} else {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "only processed %d/%d w rwlock work items\n",
			     num_online_cpus(), rwp->rw_rc, tq_count);
		rc = -EINVAL;
	}

	taskq_destroy(tq);
	rw_destroy(&(rwp->rw_rwlock));
out:
	kfree(rwp);
	return rc;
}
Пример #11
0
static int
splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	splat_taskq_arg_t *tq_arg;
	taskq_ent_t *tqe;
	int error;

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	             "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST7_NAME,
	             prealloc ? "prealloc" :  "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
	                       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST7_NAME);
		return -EINVAL;
	}

	tq_arg = kmem_alloc(sizeof (splat_taskq_arg_t), KM_SLEEP);
	tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);

	tq_arg->depth = 0;
	tq_arg->flag  = 0;
	tq_arg->id    = 0;
	tq_arg->file  = file;
	tq_arg->name  = SPLAT_TASKQ_TEST7_NAME;
	tq_arg->tq    = tq;

	if (prealloc) {
		taskq_init_ent(tqe);
		tq_arg->tqe = tqe;
	} else {
		tq_arg->tqe = NULL;
	}

	splat_taskq_test7_func(tq_arg);

	if (tq_arg->flag == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' waiting\n", tq_arg->name);
		taskq_wait_outstanding(tq, SPLAT_TASKQ_DEPTH_MAX);
	}

	error = (tq_arg->depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL);

	kmem_free(tqe, sizeof (taskq_ent_t));
	kmem_free(tq_arg, sizeof (splat_taskq_arg_t));

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	              "Taskq '%s' destroying\n", tq_arg->name);
	taskq_destroy(tq);

	return (error);
}
Пример #12
0
void
zvol_fini(void)
{
	zvol_remove_minors(NULL);
	blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
	unregister_blkdev(zvol_major, ZVOL_DRIVER);
	taskq_destroy(zvol_taskq);
	mutex_destroy(&zvol_state_lock);
	list_destroy(&zvol_state_list);
}
Пример #13
0
int
_fini(void)
{
    int ret;

    SCKM_DEBUG0(D_INIT, "in _fini");

    if ((ret = mod_remove(&modlinkage)) != 0) {
        return (ret);
    }

    /*
     * Wait for scheduled tasks to complete, then destroy task queue.
     */
    mutex_enter(&sckm_taskq_ptr_mutex);
    if (sckm_taskq != NULL) {
        taskq_destroy(sckm_taskq);
        sckm_taskq = NULL;
    }
    mutex_exit(&sckm_taskq_ptr_mutex);

    /*
     * Terminate incoming and outgoing IOSRAM mailboxes
     */
    mboxsc_fini(KEY_KDSC);
    mboxsc_fini(KEY_SCKD);

    /*
     * Destroy module synchronization objects and free memory
     */
    mutex_destroy(&sckm_task_mutex);
    mutex_destroy(&sckm_taskq_ptr_mutex);
    mutex_destroy(&sckm_umutex);
    cv_destroy(&sckm_cons_cv);

    if (sckm_udata.buf != NULL) {
        kmem_free(sckm_udata.buf, SCKM_SCKD_MAXDATA);
        sckm_udata.buf = NULL;
    }
    if (rep_data != NULL) {
        kmem_free(rep_data, SCKM_KDSC_MAXDATA);
        rep_data = NULL;
    }
    if (req_data != NULL) {
        kmem_free(req_data, SCKM_SCKD_MAXDATA);
        req_data = NULL;
    }

    return (ret);
}
Пример #14
0
/*ARGSUSED*/
static int
emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	struct emul64	*emul64;
	scsi_hba_tran_t	*tran;
	int		instance = ddi_get_instance(dip);


	/* get transport structure pointer from the dip */
	if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) {
		return (DDI_FAILURE);
	}

	/* get soft state from transport structure */
	emul64 = TRAN2EMUL64(tran);

	if (!emul64) {
		return (DDI_FAILURE);
	}

	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd);

	switch (cmd) {
	case DDI_DETACH:
		EMUL64_MUTEX_ENTER(emul64);

		taskq_destroy(emul64->emul64_taskq);
		(void) scsi_hba_detach(dip);

		scsi_hba_tran_free(emul64->emul64_tran);


		EMUL64_MUTEX_EXIT(emul64);

		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
		mutex_destroy(EMUL64_RESP_MUTEX(emul64));


		EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done");
		ddi_soft_state_free(emul64_state, instance);

		return (DDI_SUCCESS);

	case DDI_SUSPEND:
		return (DDI_SUCCESS);

	default:
		return (DDI_FAILURE);
	}
}
Пример #15
0
static int test_taskq(void)
{
	struct taskq *tq;

	tq = taskq_create("test_taskq", 1, 0, 0, 0, 0);
	if (!tq) {
		kprintf("failed to create test taskq\n");
		return 1;
	}

	if (do_test(tq, "test taskq"))
		return 1;

	taskq_destroy(tq);
	return 0;
}
Пример #16
0
void
do_cpu_config_watch(int state)
{
	static struct xenbus_watch cpu_config_watch;

	if (state != XENSTORE_UP)
		return;
	cpu_config_watch.node = "cpu";
	cpu_config_watch.callback = vcpu_config_event;
	if (register_xenbus_watch(&cpu_config_watch)) {
		taskq_destroy(cpu_config_tq);
		cmn_err(CE_WARN, "do_cpu_config_watch: "
		    "failed to set vcpu config watch");
	}

}
Пример #17
0
static int
splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskq_ent_t tqe;
	splat_taskq_arg_t tq_arg;

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	             "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST7_NAME,
	             prealloc ? "prealloc" :  "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
	                       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST7_NAME);
		return -EINVAL;
	}

	tq_arg.depth = 0;
	tq_arg.flag  = 0;
	tq_arg.id    = 0;
	tq_arg.file  = file;
	tq_arg.name  = SPLAT_TASKQ_TEST7_NAME;
	tq_arg.tq    = tq;

	if (prealloc) {
		taskq_init_ent(&tqe);
		tq_arg.tqe = &tqe;
	} else {
		tq_arg.tqe = NULL;
	}

	splat_taskq_test7_func(&tq_arg);

	if (tq_arg.flag == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' waiting\n", tq_arg.name);
		taskq_wait_id(tq, SPLAT_TASKQ_DEPTH_MAX);
	}

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	              "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return tq_arg.depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL;
}
Пример #18
0
void
dsl_pool_close(dsl_pool_t *dp)
{
	/*
	 * Drop our references from dsl_pool_open().
	 *
	 * Since we held the origin_snap from "syncing" context (which
	 * includes pool-opening context), it actually only got a "ref"
	 * and not a hold, so just drop that here.
	 */
	if (dp->dp_origin_snap)
		dsl_dataset_rele(dp->dp_origin_snap, dp);
	if (dp->dp_mos_dir)
		dsl_dir_rele(dp->dp_mos_dir, dp);
	if (dp->dp_free_dir)
		dsl_dir_rele(dp->dp_free_dir, dp);
	if (dp->dp_leak_dir)
		dsl_dir_rele(dp->dp_leak_dir, dp);
	if (dp->dp_root_dir)
		dsl_dir_rele(dp->dp_root_dir, dp);

	bpobj_close(&dp->dp_free_bpobj);

	/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
	if (dp->dp_meta_objset)
		dmu_objset_evict(dp->dp_meta_objset);

	txg_list_destroy(&dp->dp_dirty_datasets);
	txg_list_destroy(&dp->dp_dirty_zilogs);
	txg_list_destroy(&dp->dp_sync_tasks);
	txg_list_destroy(&dp->dp_dirty_dirs);

	arc_flush(dp->dp_spa);
	txg_fini(dp);
	dsl_scan_fini(dp);
	dmu_buf_user_evict_wait();

	rrw_destroy(&dp->dp_config_rwlock);
	mutex_destroy(&dp->dp_lock);
	taskq_destroy(dp->dp_vnrele_taskq);
	if (dp->dp_blkstats)
		kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
	kmem_free(dp, sizeof (dsl_pool_t));
}
Пример #19
0
static int
splat_rwlock_test4(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	taskq_t *tq;
	int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, maxclsyspri,
			  50, INT_MAX, TASKQ_PREPOPULATE);
	if (tq == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	splat_init_rw_priv(rwp, file);

	/* Validate all combinations of rw_tryenter() contention */
	rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
	rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
	rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
	rc4 = splat_rwlock_test4_type(tq, rwp, 0,      RW_READER, RW_READER);
	rc5 = splat_rwlock_test4_type(tq, rwp, 0,      RW_NONE,   RW_WRITER);
	rc6 = splat_rwlock_test4_type(tq, rwp, 0,      RW_NONE,   RW_READER);

	if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6)
		rc = -EINVAL;

	taskq_destroy(tq);
out:
	rw_destroy(&(rwp->rw_rwlock));
	kfree(rwp);

	return rc;
}
Пример #20
0
void
idm_conn_sm_fini(idm_conn_t *ic)
{

	/*
	 * The connection may only be partially created. If there
	 * is no taskq, then the connection SM was not initialized.
	 */
	if (ic->ic_state_taskq == NULL) {
		return;
	}

	taskq_destroy(ic->ic_state_taskq);

	cv_destroy(&ic->ic_state_cv);
	/*
	 * The thread that generated the event that got us here may still
	 * hold the ic_state_mutex. Once it is released we can safely
	 * destroy it since there is no way to locate the object now.
	 */
	mutex_enter(&ic->ic_state_mutex);
	mutex_destroy(&ic->ic_state_mutex);
}
Пример #21
0
Файл: zvol.c Проект: avg-I/zfs
int
zvol_init(void)
{
    int error;

    list_create(&zvol_state_list, sizeof (zvol_state_t),
                offsetof(zvol_state_t, zv_next));

    mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);

    zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
                              zvol_threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
    if (zvol_taskq == NULL) {
        printk(KERN_INFO "ZFS: taskq_create() failed\n");
        error = -ENOMEM;
        goto out1;
    }

    error = register_blkdev(zvol_major, ZVOL_DRIVER);
    if (error) {
        printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
        goto out2;
    }

    blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
                        THIS_MODULE, zvol_probe, NULL, NULL);

    return (0);

out2:
    taskq_destroy(zvol_taskq);
out1:
    mutex_destroy(&zvol_state_lock);
    list_destroy(&zvol_state_list);

    return (SET_ERROR(error));
}
Пример #22
0
Файл: zil.c Проект: harshada/zfs
/*
 * Close an intent log.
 */
void
zil_close(zilog_t *zilog)
{
	/*
	 * If the log isn't already committed, mark the objset dirty
	 * (so zil_sync() will be called) and wait for that txg to sync.
	 */
	if (!zil_is_committed(zilog)) {
		uint64_t txg;
		dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
		(void) dmu_tx_assign(tx, TXG_WAIT);
		dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
		txg = dmu_tx_get_txg(tx);
		dmu_tx_commit(tx);
		txg_wait_synced(zilog->zl_dmu_pool, txg);
	}

	taskq_destroy(zilog->zl_clean_taskq);
	zilog->zl_clean_taskq = NULL;
	zilog->zl_get_data = NULL;

	zil_itx_clean(zilog);
	ASSERT(list_head(&zilog->zl_itx_list) == NULL);
}
Пример #23
0
void
vdev_file_fini(void)
{
	taskq_destroy(vdev_file_taskq);
}
Пример #24
0
static int
dtrace_unload()
{
	dtrace_state_t *state;
	int error = 0;

#if __FreeBSD_version < 800039
	/*
	 * Check if there is still an event handler callback
	 * registered.
	 */
	if (eh_tag != 0) {
		/* De-register the device cloning event handler. */
		EVENTHANDLER_DEREGISTER(dev_clone, eh_tag);
		eh_tag = 0;

		/* Stop device cloning. */
		clone_cleanup(&dtrace_clones);
	}
#else
	destroy_dev(dtrace_dev);
	destroy_dev(helper_dev);
#endif

	mutex_enter(&dtrace_provider_lock);
	mutex_enter(&dtrace_lock);
	mutex_enter(&cpu_lock);

	ASSERT(dtrace_opens == 0);

	if (dtrace_helpers > 0) {
		mutex_exit(&cpu_lock);
		mutex_exit(&dtrace_lock);
		mutex_exit(&dtrace_provider_lock);
		return (EBUSY);
	}

	if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
		mutex_exit(&cpu_lock);
		mutex_exit(&dtrace_lock);
		mutex_exit(&dtrace_provider_lock);
		return (EBUSY);
	}

	dtrace_provider = NULL;
	EVENTHANDLER_DEREGISTER(kld_load, dtrace_kld_load_tag);
	EVENTHANDLER_DEREGISTER(kld_unload_try, dtrace_kld_unload_try_tag);

	if ((state = dtrace_anon_grab()) != NULL) {
		/*
		 * If there were ECBs on this state, the provider should
		 * have not been allowed to detach; assert that there is
		 * none.
		 */
		ASSERT(state->dts_necbs == 0);
		dtrace_state_destroy(state);
	}

	bzero(&dtrace_anon, sizeof (dtrace_anon_t));

	mutex_exit(&cpu_lock);

	if (dtrace_helptrace_enabled) {
		kmem_free(dtrace_helptrace_buffer, 0);
		dtrace_helptrace_buffer = NULL;
	}

	if (dtrace_probes != NULL) {
		kmem_free(dtrace_probes, 0);
		dtrace_probes = NULL;
		dtrace_nprobes = 0;
	}

	dtrace_hash_destroy(dtrace_bymod);
	dtrace_hash_destroy(dtrace_byfunc);
	dtrace_hash_destroy(dtrace_byname);
	dtrace_bymod = NULL;
	dtrace_byfunc = NULL;
	dtrace_byname = NULL;

	kmem_cache_destroy(dtrace_state_cache);

	delete_unrhdr(dtrace_arena);

	if (dtrace_toxrange != NULL) {
		kmem_free(dtrace_toxrange, 0);
		dtrace_toxrange = NULL;
		dtrace_toxranges = 0;
		dtrace_toxranges_max = 0;
	}

	ASSERT(dtrace_vtime_references == 0);
	ASSERT(dtrace_opens == 0);
	ASSERT(dtrace_retained == NULL);

	mutex_exit(&dtrace_lock);
	mutex_exit(&dtrace_provider_lock);

	mutex_destroy(&dtrace_meta_lock);
	mutex_destroy(&dtrace_provider_lock);
	mutex_destroy(&dtrace_lock);
#ifdef DEBUG
	mutex_destroy(&dtrace_errlock);
#endif

	taskq_destroy(dtrace_taskq);

	/* Reset our hook for exceptions. */
	dtrace_invop_uninit();

	/*
	 * Reset our hook for thread switches, but ensure that vtime isn't
	 * active first.
	 */
	dtrace_vtime_active = 0;
	dtrace_vtime_switch_func = NULL;

	/* Unhook from the trap handler. */
	dtrace_trap_func = NULL;

	return (error);
}
Пример #25
0
/*
 * Cleanup the various pieces we needed for .zfs directories.  In particular
 * ensure the expiry timer is canceled safely.
 */
void
zfsctl_fini(void)
{
	taskq_destroy(zfs_expire_taskq);
}
Пример #26
0
static int
splat_taskq_test4_common(struct file *file, void *arg, int minalloc,
                         int maxalloc, int nr_tasks, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t *tqes;
	int i, j, rc = 0;

	tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL);
	if (tqes == NULL)
		return -ENOMEM;

	splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		     "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
		     SPLAT_TASKQ_TEST4_NAME,
		     prealloc ? "prealloc" : "dynamic",
		     minalloc, maxalloc, nr_tasks);
	if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
		               minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST4_NAME);
		rc = -EINVAL;
		goto out_free;
	}

	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST4_NAME;

	for (i = 1; i <= nr_tasks; i *= 2) {
		atomic_set(&tq_arg.count, 0);
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		             "Taskq '%s' function '%s' dispatched %d times\n",
		             tq_arg.name, sym2str(splat_taskq_test4_func), i);

		for (j = 0; j < i; j++) {
			taskq_init_ent(&tqes[j]);

			if (prealloc) {
				taskq_dispatch_ent(tq, splat_taskq_test4_func,
				                   &tq_arg, TQ_SLEEP, &tqes[j]);
				id = tqes[j].tqent_id;
			} else {
				id = taskq_dispatch(tq, splat_taskq_test4_func,
						    &tq_arg, TQ_SLEEP);
			}

			if (id == 0) {
				splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
				        "Taskq '%s' function '%s' dispatch "
					"%d failed\n", tq_arg.name,
					sym2str(splat_taskq_test4_func), j);
					rc = -EINVAL;
					goto out;
			}
		}

		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
			     "waiting for %d dispatches\n", tq_arg.name, i);
		taskq_wait(tq);
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
			     "%d/%d dispatches finished\n", tq_arg.name,
			     atomic_read(&tq_arg.count), i);
		if (atomic_read(&tq_arg.count) != i) {
			rc = -ERANGE;
			goto out;

		}
	}
out:
	splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
	           tq_arg.name);
	taskq_destroy(tq);

out_free:
	kfree(tqes);

	return rc;
}
Пример #27
0
/*
 * pppt_enable_svc
 *
 * registers all the configured targets and target portals with STMF
 */
static int
pppt_enable_svc(void)
{
	stmf_port_provider_t	*pp;
	stmf_dbuf_store_t	*dbuf_store;
	int			rc = 0;

	ASSERT(pppt_global.global_svc_state == PSS_ENABLING);

	/*
	 * Make sure that can tell if we have partially allocated
	 * in case we need to exit and tear down anything allocated.
	 */
	pppt_global.global_dbuf_store = NULL;
	pp = NULL;
	pppt_global.global_pp = NULL;
	pppt_global.global_dispatch_taskq = NULL;
	pppt_global.global_sess_taskq = NULL;

	avl_create(&pppt_global.global_target_list,
	    pppt_tgt_avl_compare, sizeof (pppt_tgt_t),
	    offsetof(pppt_tgt_t, target_global_ln));

	avl_create(&pppt_global.global_sess_list,
	    pppt_sess_avl_compare_by_id, sizeof (pppt_sess_t),
	    offsetof(pppt_sess_t, ps_global_ln));

	/*
	 * Setup STMF dbuf store.  Tf buffers are associated with a particular
	 * lport (FC, SRP) then the dbuf_store should stored in the lport
	 * context, otherwise (iSCSI) the dbuf_store should be global.
	 */
	dbuf_store = stmf_alloc(STMF_STRUCT_DBUF_STORE, 0, 0);
	if (dbuf_store == NULL) {
		rc = ENOMEM;
		goto tear_down_and_return;
	}
	dbuf_store->ds_alloc_data_buf = pppt_dbuf_alloc;
	dbuf_store->ds_free_data_buf = pppt_dbuf_free;
	dbuf_store->ds_port_private = NULL;
	pppt_global.global_dbuf_store = dbuf_store;

	/* Register port provider */
	pp = stmf_alloc(STMF_STRUCT_PORT_PROVIDER, 0, 0);
	if (pp == NULL) {
		rc = ENOMEM;
		goto tear_down_and_return;
	}

	pp->pp_portif_rev = PORTIF_REV_1;
	pp->pp_instance = 0;
	pp->pp_name = PPPT_MODNAME;
	pp->pp_cb = NULL;

	pppt_global.global_pp = pp;

	if (stmf_register_port_provider(pp) != STMF_SUCCESS) {
		rc = EIO;
		goto tear_down_and_return;
	}

	pppt_global.global_dispatch_taskq = taskq_create("pppt_dispatch",
	    1, minclsyspri, 1, INT_MAX, TASKQ_PREPOPULATE);

	pppt_global.global_sess_taskq = taskq_create("pppt_session",
	    1, minclsyspri, 1, INT_MAX, TASKQ_PREPOPULATE);

	return (0);

tear_down_and_return:

	if (pppt_global.global_sess_taskq) {
		taskq_destroy(pppt_global.global_sess_taskq);
		pppt_global.global_sess_taskq = NULL;
	}

	if (pppt_global.global_dispatch_taskq) {
		taskq_destroy(pppt_global.global_dispatch_taskq);
		pppt_global.global_dispatch_taskq = NULL;
	}

	if (pppt_global.global_pp)
		pppt_global.global_pp = NULL;

	if (pp)
		stmf_free(pp);

	if (pppt_global.global_dbuf_store) {
		stmf_free(pppt_global.global_dbuf_store);
		pppt_global.global_dbuf_store = NULL;
	}

	avl_destroy(&pppt_global.global_sess_list);
	avl_destroy(&pppt_global.global_target_list);

	return (rc);
}
Пример #28
0
static int
splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 };
	int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 };
	taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
	int i, rc = 0;

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST5_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST5_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST5_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test5_func,
			               &tq_id[i], TQ_SLEEP, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test5_func,
					    &tq_id[i], TQ_SLEEP);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test5_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
	}

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name, 3);
	taskq_wait_id(tq, 3);
	if ((rc = splat_taskq_test_order(&tq_arg, order1)))
		goto out;

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name, 8);
	taskq_wait_id(tq, 8);
	rc = splat_taskq_test_order(&tq_arg, order2);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return rc;
}
Пример #29
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		taskq_t *t;
		char rdsk[MAXPATHLEN];
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, ZFS_DISK_ROOTD) == 0)
			(void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
		else
			(void) strlcpy(rdsk, path, sizeof (rdsk));

		if ((dfd = open(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}

		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = taskq_create("z_import", 2 * max_ncpus, defclsyspri,
		    2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) taskq_dispatch(t, zpool_open_func, slice,
			    TQ_SLEEP);
		taskq_wait(t);
		taskq_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path, i+1,
					    slice->rn_num_labels, config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Пример #30
0
static int
splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
	taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
	int i, rc = 0;
	uint_t tflags;

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST6_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST6_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST6_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;
		tflags = TQ_SLEEP;
		if (i > 4)
			tflags |= TQ_FRONT;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test6_func,
			                   &tq_id[i], tflags, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test6_func,
					    &tq_id[i], tflags);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test6_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
		/* Sleep to let tasks 1-3 start executing. */
		if ( i == 2 )
			msleep(100);
	}

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name,
		     SPLAT_TASKQ_ORDER_MAX);
	taskq_wait_id(tq, SPLAT_TASKQ_ORDER_MAX);
	rc = splat_taskq_test_order(&tq_arg, order);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return rc;
}