コード例 #1
0
ファイル: taskq.c プロジェクト: jpeach/zfsd
/* Like taskq_create(), but creates the taskq threads in the specified
 * system process.  If proc != &p0, this must be called from a thread
 * in that process.
 */
taskq_t *
taskq_create_proc(const char *name, int nthreads, pri_t pri, int minalloc,
    int maxalloc, struct proc *proc, uint_t flags)
{
	// NOTE: proc is ignored.
	return taskq_create(name, nthreads, pri, minalloc, maxalloc, flags);
}
コード例 #2
0
ファイル: dsl_pool.c プロジェクト: ChaosJohn/freebsd
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
	dsl_pool_t *dp;
	blkptr_t *bp = spa_get_rootblkptr(spa);

	dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
	dp->dp_spa = spa;
	dp->dp_meta_rootbp = *bp;
	rrw_init(&dp->dp_config_rwlock, B_TRUE);
	dp->dp_write_limit = zfs_write_limit_min;
	txg_init(dp, txg);

	txg_list_create(&dp->dp_dirty_datasets,
	    offsetof(dsl_dataset_t, ds_dirty_link));
	txg_list_create(&dp->dp_dirty_zilogs,
	    offsetof(zilog_t, zl_dirty_link));
	txg_list_create(&dp->dp_dirty_dirs,
	    offsetof(dsl_dir_t, dd_dirty_link));
	txg_list_create(&dp->dp_sync_tasks,
	    offsetof(dsl_sync_task_t, dst_node));

	mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);

	dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
	    1, 4, 0);

	return (dp);
}
コード例 #3
0
ファイル: taskq.c プロジェクト: jpeach/zfsd
/* Like taskq_create_proc(), but the taskq threads will use the
 * System Duty Cycle (SDC) scheduling class with a duty cycle of dc.
 */
taskq_t	*
taskq_create_sysdc(const char *name, int nthreads, int minalloc,
    int maxalloc, struct proc *proc, uint_t dc, uint_t flags)
{
	// NOTE: proc and dc are ignored.
	return taskq_create(name, nthreads, minclsyspri, minalloc, maxalloc, flags);
}
コード例 #4
0
ファイル: dsl_pool.c プロジェクト: 64116278/zfs
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
	dsl_pool_t *dp;
	blkptr_t *bp = spa_get_rootblkptr(spa);

	dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
	dp->dp_spa = spa;
	dp->dp_meta_rootbp = *bp;
	rrw_init(&dp->dp_config_rwlock, B_TRUE);
	txg_init(dp, txg);

	txg_list_create(&dp->dp_dirty_datasets,
	    offsetof(dsl_dataset_t, ds_dirty_link));
	txg_list_create(&dp->dp_dirty_zilogs,
	    offsetof(zilog_t, zl_dirty_link));
	txg_list_create(&dp->dp_dirty_dirs,
	    offsetof(dsl_dir_t, dd_dirty_link));
	txg_list_create(&dp->dp_sync_tasks,
	    offsetof(dsl_sync_task_t, dst_node));

	mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);

	dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri,
	    max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);

	return (dp);
}
コード例 #5
0
ファイル: txg.c プロジェクト: Zak-Adelman/zfs
/*
 * Dispatch the commit callbacks registered on this txg to worker threads.
 */
static void
txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
{
	int c;
	tx_state_t *tx = &dp->dp_tx;
	list_t *cb_list;

	for (c = 0; c < max_ncpus; c++) {
		tx_cpu_t *tc = &tx->tx_cpu[c];
		/* No need to lock tx_cpu_t at this point */

		int g = txg & TXG_MASK;

		if (list_is_empty(&tc->tc_callbacks[g]))
			continue;

		if (tx->tx_commit_cb_taskq == NULL) {
			/*
			 * Commit callback taskq hasn't been created yet.
			 */
			tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
			    100, minclsyspri, max_ncpus, INT_MAX,
			    TASKQ_THREADS_CPU_PCT | TASKQ_PREPOPULATE);
		}

		cb_list = kmem_alloc(sizeof (list_t), KM_PUSHPAGE);
		list_create(cb_list, sizeof (dmu_tx_callback_t),
		    offsetof(dmu_tx_callback_t, dcb_node));

		list_move_tail(cb_list, &tc->tc_callbacks[g]);

		(void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
		    txg_do_callbacks, cb_list, TQ_SLEEP);
	}
}
コード例 #6
0
idm_status_t
idm_conn_sm_init(idm_conn_t *ic)
{
	char taskq_name[32];

	/*
	 * Caller should have assigned a unique connection ID.  Use this
	 * connection ID to create a unique connection name string
	 */
	ASSERT(ic->ic_internal_cid != 0);
	(void) snprintf(taskq_name, sizeof (taskq_name) - 1, "conn_sm%08x",
	    ic->ic_internal_cid);

	ic->ic_state_taskq = taskq_create(taskq_name, 1, minclsyspri, 4, 16384,
	    TASKQ_PREPOPULATE);
	if (ic->ic_state_taskq == NULL) {
		return (IDM_STATUS_FAIL);
	}

	idm_sm_audit_init(&ic->ic_state_audit);
	mutex_init(&ic->ic_state_mutex, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&ic->ic_state_cv, NULL, CV_DEFAULT, NULL);

	ic->ic_state = CS_S1_FREE;
	ic->ic_last_state = CS_S1_FREE;

	return (IDM_STATUS_SUCCESS);
}
コード例 #7
0
int
zvol_init(void)
{
	int error;

	zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
		                  zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
	if (zvol_taskq == NULL) {
		printk(KERN_INFO "ZFS: taskq_create() failed\n");
		return (-ENOMEM);
	}

	error = register_blkdev(zvol_major, ZVOL_DRIVER);
	if (error) {
		printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
		taskq_destroy(zvol_taskq);
		return (error);
	}

	blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
	                    THIS_MODULE, zvol_probe, NULL, NULL);

	mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zvol_state_list, sizeof (zvol_state_t),
	            offsetof(zvol_state_t, zv_next));

	(void) zvol_create_minors(NULL);

	return (0);
}
コード例 #8
0
ファイル: xpv_support.c プロジェクト: bahamas10/openzfs
static int
xpv_drv_init(void)
{
	if (xpv_feature(XPVF_HYPERCALLS) < 0 ||
	    xpv_feature(XPVF_SHARED_INFO) < 0)
		return (-1);

	/* Set up the grant tables.  */
	gnttab_init();

	/* Set up event channel support */
	if (ec_init() != 0)
		return (-1);

	/* Set up xenbus */
	xb_addr = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
	xs_early_init();
	xs_domu_init();

	/* Set up for suspend/resume/migrate */
	xen_shutdown_tq = taskq_create("shutdown_taskq", 1,
	    maxclsyspri - 1, 1, 1, TASKQ_PREPOPULATE);
	shutdown_watch.node = "control/shutdown";
	shutdown_watch.callback = xen_shutdown_handler;
	if (register_xenbus_watch(&shutdown_watch))
		cmn_err(CE_WARN, "Failed to set shutdown watcher");

	return (0);
}
コード例 #9
0
void
vdev_file_init(void)
{
	vdev_file_taskq = taskq_create("vdev_file_taskq", 100, minclsyspri,
	    max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);

	VERIFY(vdev_file_taskq);
}
コード例 #10
0
ファイル: splat-rwlock.c プロジェクト: harshada/spl
static int
splat_rwlock_test2(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	taskq_t *tq;
	int i, rc = 0, tq_count = 256;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	/* Create several threads allowing tasks to race with each other */
	tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
			  maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
	if (tq == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	/*
	 * Schedule N work items to the work queue each of which enters the
	 * writer rwlock, sleeps briefly, then exits the writer rwlock.  On a
	 * multiprocessor box these work items will be handled by all available
	 * CPUs.  The task function checks to ensure the tracked shared variable
	 * is always only incremented by one.  Additionally, the rwlock itself
	 * is instrumented such that if any two processors are in the
	 * critical region at the same time the system will panic.  If the
	 * rwlock is implemented right this will never happy, that's a pass.
	 */
	for (i = 0; i < tq_count; i++) {
		if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
			splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
				     "Failed to queue task %d\n", i);
			rc = -EINVAL;
		}
	}

	taskq_wait(tq);

	if (rwp->rw_rc == tq_count) {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "correctly entered/exited the rwlock %d times\n",
			     num_online_cpus(), rwp->rw_rc);
	} else {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "only processed %d/%d w rwlock work items\n",
			     num_online_cpus(), rwp->rw_rc, tq_count);
		rc = -EINVAL;
	}

	taskq_destroy(tq);
	rw_destroy(&(rwp->rw_rwlock));
out:
	kfree(rwp);
	return rc;
}
コード例 #11
0
static void
system_taskq_init(void *arg)
{

	taskq_zone = uma_zcreate("taskq_zone", sizeof(taskq_ent_t),
	    NULL, NULL, NULL, NULL, 0, 0);
	system_taskq = taskq_create("system_taskq", mp_ncpus, minclsyspri,
	    0, 0, 0);
}
コード例 #12
0
ファイル: zil.c プロジェクト: harshada/zfs
/*
 * Open an intent log.
 */
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data)
{
	zilog_t *zilog = dmu_objset_zil(os);
	zilog->zl_get_data = get_data;
	zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri,
	    2, 2, TASKQ_PREPOPULATE);
	return (zilog);
}
コード例 #13
0
ファイル: splat-taskq.c プロジェクト: BjoKaSH/spl-osx
static int
splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t tqe;

	taskq_init_ent(&tqe);

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST1_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
			       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		           "Taskq '%s' create failed\n",
		           SPLAT_TASKQ_TEST1_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	tq_arg.id   = 0;
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST1_NAME;

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
	           "Taskq '%s' function '%s' dispatching\n",
	           tq_arg.name, sym2str(splat_taskq_test13_func));
	if (prealloc) {
		taskq_dispatch_ent(tq, splat_taskq_test13_func,
		                   &tq_arg, TQ_SLEEP, &tqe);
		id = tqe.tqent_id;
	} else {
		id = taskq_dispatch(tq, splat_taskq_test13_func,
				    &tq_arg, TQ_SLEEP);
	}

	if (id == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		             "Taskq '%s' function '%s' dispatch failed\n",
		             tq_arg.name, sym2str(splat_taskq_test13_func));
		taskq_destroy(tq);
		return -EINVAL;
	}

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
	           tq_arg.name);
	taskq_wait(tq);
	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
	           tq_arg.name);

	taskq_destroy(tq);

	return (tq_arg.flag) ? 0 : -EINVAL;
}
コード例 #14
0
/*
 * This routine is called after all the "normal" MP startup has
 * been done; a good place to start watching xen store for virtual
 * cpu hot plug events.
 */
void
mach_cpucontext_fini(void)
{

	cpu_config_tq = taskq_create("vcpu config taskq", 1,
	    maxclsyspri - 1, 1, 1, TASKQ_PREPOPULATE);

	(void) xs_register_xenbus_callback(do_cpu_config_watch);
}
コード例 #15
0
ファイル: splat-taskq.c プロジェクト: bprotopopov/spl
static int
splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	splat_taskq_arg_t *tq_arg;
	taskq_ent_t *tqe;
	int error;

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	             "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST7_NAME,
	             prealloc ? "prealloc" :  "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
	                       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST7_NAME);
		return -EINVAL;
	}

	tq_arg = kmem_alloc(sizeof (splat_taskq_arg_t), KM_SLEEP);
	tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);

	tq_arg->depth = 0;
	tq_arg->flag  = 0;
	tq_arg->id    = 0;
	tq_arg->file  = file;
	tq_arg->name  = SPLAT_TASKQ_TEST7_NAME;
	tq_arg->tq    = tq;

	if (prealloc) {
		taskq_init_ent(tqe);
		tq_arg->tqe = tqe;
	} else {
		tq_arg->tqe = NULL;
	}

	splat_taskq_test7_func(tq_arg);

	if (tq_arg->flag == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' waiting\n", tq_arg->name);
		taskq_wait_outstanding(tq, SPLAT_TASKQ_DEPTH_MAX);
	}

	error = (tq_arg->depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL);

	kmem_free(tqe, sizeof (taskq_ent_t));
	kmem_free(tq_arg, sizeof (splat_taskq_arg_t));

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	              "Taskq '%s' destroying\n", tq_arg->name);
	taskq_destroy(tq);

	return (error);
}
コード例 #16
0
ファイル: zfs_osx.cpp プロジェクト: JHUDSSJC/osx-zfs-crypto
void
system_taskq_init(void)
{

    system_taskq = taskq_create("system_taskq",
                                system_taskq_size * max_ncpus,
                                minclsyspri, 4, 512,
                                TASKQ_DYNAMIC | TASKQ_PREPOPULATE);


}
コード例 #17
0
ファイル: tst-solaris-taskq.c プロジェクト: Meeuwisse/osmu
static int test_taskq(void)
{
	struct taskq *tq;

	tq = taskq_create("test_taskq", 1, 0, 0, 0, 0);
	if (!tq) {
		kprintf("failed to create test taskq\n");
		return 1;
	}

	if (do_test(tq, "test taskq"))
		return 1;

	taskq_destroy(tq);
	return 0;
}
コード例 #18
0
ファイル: splat-taskq.c プロジェクト: BjoKaSH/spl-osx
static int
splat_taskq_test7_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskq_ent_t tqe;
	splat_taskq_arg_t tq_arg;

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	             "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST7_NAME,
	             prealloc ? "prealloc" :  "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST7_NAME, 1, maxclsyspri,
	                       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST7_NAME);
		return -EINVAL;
	}

	tq_arg.depth = 0;
	tq_arg.flag  = 0;
	tq_arg.id    = 0;
	tq_arg.file  = file;
	tq_arg.name  = SPLAT_TASKQ_TEST7_NAME;
	tq_arg.tq    = tq;

	if (prealloc) {
		taskq_init_ent(&tqe);
		tq_arg.tqe = &tqe;
	} else {
		tq_arg.tqe = NULL;
	}

	splat_taskq_test7_func(&tq_arg);

	if (tq_arg.flag == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' waiting\n", tq_arg.name);
		taskq_wait_id(tq, SPLAT_TASKQ_DEPTH_MAX);
	}

	splat_vprint(file, SPLAT_TASKQ_TEST7_NAME,
	              "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return tq_arg.depth == SPLAT_TASKQ_DEPTH_MAX ? 0 : -EINVAL;
}
コード例 #19
0
void
xen_late_startup(void)
{
	if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
		xen_shutdown_tq = taskq_create("shutdown_taskq", 1,
		    maxclsyspri - 1, 1, 1, TASKQ_PREPOPULATE);
		shutdown_watch.node = "control/shutdown";
		shutdown_watch.callback = xen_shutdown_handler;
		if (register_xenbus_watch(&shutdown_watch))
			cmn_err(CE_WARN, "Failed to set shutdown watcher");

		sysrq_watch.node = "control/sysrq";
		sysrq_watch.callback = xen_sysrq_handler;
		if (register_xenbus_watch(&sysrq_watch))
			cmn_err(CE_WARN, "Failed to set sysrq watcher");
	}
	balloon_init(xen_info->nr_pages);
}
コード例 #20
0
ファイル: mdeg.c プロジェクト: apprisi/illumos-gate
int
mdeg_init(void)
{
	int	tblsz;

	/*
	 * Grab the current MD
	 */
	if ((mdeg.md_curr = md_get_handle()) == NULL) {
		cmn_err(CE_WARN, "unable to cache snapshot of MD");
		return (-1);
	}

	/*
	 * Initialize table of registered clients
	 */
	mdeg.maxclnts = MDEG_MAX_CLNTS_INIT;

	tblsz = mdeg.maxclnts * sizeof (mdeg_clnt_t);
	mdeg.tbl = kmem_zalloc(tblsz, KM_SLEEP);

	rw_init(&mdeg.rwlock, NULL, RW_DRIVER, NULL);

	mdeg.nclnts = 0;

	/*
	 * Initialize global lock
	 */
	mutex_init(&mdeg.lock, NULL, MUTEX_DRIVER, NULL);

	/*
	 * Initialize the task queue
	 */
	mdeg.taskq = taskq_create("mdeg_taskq", 1, minclsyspri, 1,
	    MDEG_MAX_TASKQ_THR, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);

	/* ready to begin handling clients */
	mdeg.enabled = B_TRUE;

	return (0);
}
コード例 #21
0
ファイル: splat-rwlock.c プロジェクト: harshada/spl
static int
splat_rwlock_test4(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	taskq_t *tq;
	int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, maxclsyspri,
			  50, INT_MAX, TASKQ_PREPOPULATE);
	if (tq == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	splat_init_rw_priv(rwp, file);

	/* Validate all combinations of rw_tryenter() contention */
	rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER);
	rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER);
	rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER);
	rc4 = splat_rwlock_test4_type(tq, rwp, 0,      RW_READER, RW_READER);
	rc5 = splat_rwlock_test4_type(tq, rwp, 0,      RW_NONE,   RW_WRITER);
	rc6 = splat_rwlock_test4_type(tq, rwp, 0,      RW_NONE,   RW_READER);

	if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6)
		rc = -EINVAL;

	taskq_destroy(tq);
out:
	rw_destroy(&(rwp->rw_rwlock));
	kfree(rwp);

	return rc;
}
コード例 #22
0
ファイル: zvol.c プロジェクト: avg-I/zfs
int
zvol_init(void)
{
    int error;

    list_create(&zvol_state_list, sizeof (zvol_state_t),
                offsetof(zvol_state_t, zv_next));

    mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);

    zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
                              zvol_threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
    if (zvol_taskq == NULL) {
        printk(KERN_INFO "ZFS: taskq_create() failed\n");
        error = -ENOMEM;
        goto out1;
    }

    error = register_blkdev(zvol_major, ZVOL_DRIVER);
    if (error) {
        printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
        goto out2;
    }

    blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
                        THIS_MODULE, zvol_probe, NULL, NULL);

    return (0);

out2:
    taskq_destroy(zvol_taskq);
out1:
    mutex_destroy(&zvol_state_lock);
    list_destroy(&zvol_state_list);

    return (SET_ERROR(error));
}
コード例 #23
0
ファイル: libzfs_import.c プロジェクト: cbreak-black/zfs
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		taskq_t *t;
		char rdsk[MAXPATHLEN];
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, ZFS_DISK_ROOTD) == 0)
			(void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
		else
			(void) strlcpy(rdsk, path, sizeof (rdsk));

		if ((dfd = open(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}

		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = taskq_create("z_import", 2 * max_ncpus, defclsyspri,
		    2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) taskq_dispatch(t, zpool_open_func, slice,
			    TQ_SLEEP);
		taskq_wait(t);
		taskq_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path, i+1,
					    slice->rn_num_labels, config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
コード例 #24
0
ファイル: pppt.c プロジェクト: madhavsuresh/illumos-gate
/*
 * pppt_enable_svc
 *
 * registers all the configured targets and target portals with STMF
 */
static int
pppt_enable_svc(void)
{
	stmf_port_provider_t	*pp;
	stmf_dbuf_store_t	*dbuf_store;
	int			rc = 0;

	ASSERT(pppt_global.global_svc_state == PSS_ENABLING);

	/*
	 * Make sure that can tell if we have partially allocated
	 * in case we need to exit and tear down anything allocated.
	 */
	pppt_global.global_dbuf_store = NULL;
	pp = NULL;
	pppt_global.global_pp = NULL;
	pppt_global.global_dispatch_taskq = NULL;
	pppt_global.global_sess_taskq = NULL;

	avl_create(&pppt_global.global_target_list,
	    pppt_tgt_avl_compare, sizeof (pppt_tgt_t),
	    offsetof(pppt_tgt_t, target_global_ln));

	avl_create(&pppt_global.global_sess_list,
	    pppt_sess_avl_compare_by_id, sizeof (pppt_sess_t),
	    offsetof(pppt_sess_t, ps_global_ln));

	/*
	 * Setup STMF dbuf store.  Tf buffers are associated with a particular
	 * lport (FC, SRP) then the dbuf_store should stored in the lport
	 * context, otherwise (iSCSI) the dbuf_store should be global.
	 */
	dbuf_store = stmf_alloc(STMF_STRUCT_DBUF_STORE, 0, 0);
	if (dbuf_store == NULL) {
		rc = ENOMEM;
		goto tear_down_and_return;
	}
	dbuf_store->ds_alloc_data_buf = pppt_dbuf_alloc;
	dbuf_store->ds_free_data_buf = pppt_dbuf_free;
	dbuf_store->ds_port_private = NULL;
	pppt_global.global_dbuf_store = dbuf_store;

	/* Register port provider */
	pp = stmf_alloc(STMF_STRUCT_PORT_PROVIDER, 0, 0);
	if (pp == NULL) {
		rc = ENOMEM;
		goto tear_down_and_return;
	}

	pp->pp_portif_rev = PORTIF_REV_1;
	pp->pp_instance = 0;
	pp->pp_name = PPPT_MODNAME;
	pp->pp_cb = NULL;

	pppt_global.global_pp = pp;

	if (stmf_register_port_provider(pp) != STMF_SUCCESS) {
		rc = EIO;
		goto tear_down_and_return;
	}

	pppt_global.global_dispatch_taskq = taskq_create("pppt_dispatch",
	    1, minclsyspri, 1, INT_MAX, TASKQ_PREPOPULATE);

	pppt_global.global_sess_taskq = taskq_create("pppt_session",
	    1, minclsyspri, 1, INT_MAX, TASKQ_PREPOPULATE);

	return (0);

tear_down_and_return:

	if (pppt_global.global_sess_taskq) {
		taskq_destroy(pppt_global.global_sess_taskq);
		pppt_global.global_sess_taskq = NULL;
	}

	if (pppt_global.global_dispatch_taskq) {
		taskq_destroy(pppt_global.global_dispatch_taskq);
		pppt_global.global_dispatch_taskq = NULL;
	}

	if (pppt_global.global_pp)
		pppt_global.global_pp = NULL;

	if (pp)
		stmf_free(pp);

	if (pppt_global.global_dbuf_store) {
		stmf_free(pppt_global.global_dbuf_store);
		pppt_global.global_dbuf_store = NULL;
	}

	avl_destroy(&pppt_global.global_sess_list);
	avl_destroy(&pppt_global.global_target_list);

	return (rc);
}
コード例 #25
0
ファイル: zfs_ctldir.c プロジェクト: EW1/zfs
/*
 * Initialize the various pieces we'll need to create and manipulate .zfs
 * directories.  Currently this is unused but available.
 */
void
zfsctl_init(void)
{
	zfs_expire_taskq = taskq_create("z_unmount", 1, maxclsyspri,
	    1, 8, TASKQ_PREPOPULATE);
}
コード例 #26
0
ファイル: splat-taskq.c プロジェクト: BjoKaSH/spl-osx
static int
splat_taskq_test2_impl(struct file *file, void *arg, boolean_t prealloc) {
	taskq_t *tq[TEST2_TASKQS] = { NULL };
	taskqid_t id;
	splat_taskq_arg_t tq_args[TEST2_TASKQS];
	taskq_ent_t *func1_tqes = NULL;
	taskq_ent_t *func2_tqes = NULL;
	int i, rc = 0;

	func1_tqes = kmalloc(sizeof(*func1_tqes) * TEST2_TASKQS, GFP_KERNEL);
	if (func1_tqes == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	func2_tqes = kmalloc(sizeof(*func2_tqes) * TEST2_TASKQS, GFP_KERNEL);
	if (func2_tqes == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	for (i = 0; i < TEST2_TASKQS; i++) {
		taskq_init_ent(&func1_tqes[i]);
		taskq_init_ent(&func2_tqes[i]);

		splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
			     "Taskq '%s/%d' creating (%s dispatch)\n",
			     SPLAT_TASKQ_TEST2_NAME, i,
			     prealloc ? "prealloc" : "dynamic");
		if ((tq[i] = taskq_create(SPLAT_TASKQ_TEST2_NAME,
			                  TEST2_THREADS_PER_TASKQ,
					  maxclsyspri, 50, INT_MAX,
					  TASKQ_PREPOPULATE)) == NULL) {
			splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
			           "Taskq '%s/%d' create failed\n",
				   SPLAT_TASKQ_TEST2_NAME, i);
			rc = -EINVAL;
			break;
		}

		tq_args[i].flag = i;
		tq_args[i].id   = i;
		tq_args[i].file = file;
		tq_args[i].name = SPLAT_TASKQ_TEST2_NAME;

		splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
		           "Taskq '%s/%d' function '%s' dispatching\n",
			   tq_args[i].name, tq_args[i].id,
		           sym2str(splat_taskq_test2_func1));
		if (prealloc) {
			taskq_dispatch_ent(tq[i], splat_taskq_test2_func1,
			                 &tq_args[i], TQ_SLEEP, &func1_tqes[i]);
			id = func1_tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq[i], splat_taskq_test2_func1,
					    &tq_args[i], TQ_SLEEP);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
			           "Taskq '%s/%d' function '%s' dispatch "
			           "failed\n", tq_args[i].name, tq_args[i].id,
			           sym2str(splat_taskq_test2_func1));
			rc = -EINVAL;
			break;
		}

		splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
		           "Taskq '%s/%d' function '%s' dispatching\n",
			   tq_args[i].name, tq_args[i].id,
		           sym2str(splat_taskq_test2_func2));
		if (prealloc) {
			taskq_dispatch_ent(tq[i], splat_taskq_test2_func2,
			                &tq_args[i], TQ_SLEEP, &func2_tqes[i]);
			id = func2_tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq[i], splat_taskq_test2_func2,
			                    &tq_args[i], TQ_SLEEP);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST2_NAME, "Taskq "
				     "'%s/%d' function '%s' dispatch failed\n",
			             tq_args[i].name, tq_args[i].id,
			             sym2str(splat_taskq_test2_func2));
			rc = -EINVAL;
			break;
		}
	}

	/* When rc is set we're effectively just doing cleanup here, so
	 * ignore new errors in that case.  They just cause noise. */
	for (i = 0; i < TEST2_TASKQS; i++) {
		if (tq[i] != NULL) {
			splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
			           "Taskq '%s/%d' waiting\n",
			           tq_args[i].name, tq_args[i].id);
			taskq_wait(tq[i]);
			splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
			           "Taskq '%s/%d; destroying\n",
			          tq_args[i].name, tq_args[i].id);

			taskq_destroy(tq[i]);

			if (!rc && tq_args[i].flag != ((i * 2) + 1)) {
				splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
				           "Taskq '%s/%d' processed tasks "
				           "out of order; %d != %d\n",
				           tq_args[i].name, tq_args[i].id,
				           tq_args[i].flag, i * 2 + 1);
				rc = -EINVAL;
			} else {
				splat_vprint(file, SPLAT_TASKQ_TEST2_NAME,
				           "Taskq '%s/%d' processed tasks "
					   "in the correct order; %d == %d\n",
				           tq_args[i].name, tq_args[i].id,
				           tq_args[i].flag, i * 2 + 1);
			}
		}
	}
out:
	if (func1_tqes)
		kfree(func1_tqes);

	if (func2_tqes)
		kfree(func2_tqes);

	return rc;
}
コード例 #27
0
ファイル: emul64.c プロジェクト: apprisi/illumos-gate
/*ARGSUSED*/
static int
emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	int		mutex_initted = 0;
	struct emul64	*emul64;
	int		instance;
	scsi_hba_tran_t	*tran = NULL;
	ddi_dma_attr_t	tmp_dma_attr;

	emul64_bsd_get_props(dip);

	bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr));
	instance = ddi_get_instance(dip);

	switch (cmd) {
	case DDI_ATTACH:
		break;

	case DDI_RESUME:
		tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
		if (!tran) {
			return (DDI_FAILURE);
		}
		emul64 = TRAN2EMUL64(tran);

		return (DDI_SUCCESS);

	default:
		emul64_i_log(NULL, CE_WARN,
		    "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance);
		return (DDI_FAILURE);
	}

	/*
	 * Allocate emul64 data structure.
	 */
	if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) {
		emul64_i_log(NULL, CE_WARN,
		    "emul64%d: Failed to alloc soft state",
		    instance);
		return (DDI_FAILURE);
	}

	emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
	if (emul64 == (struct emul64 *)NULL) {
		emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state",
		    instance);
		ddi_soft_state_free(emul64_state, instance);
		return (DDI_FAILURE);
	}


	/*
	 * Allocate a transport structure
	 */
	tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
	if (tran == NULL) {
		cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n");
		goto fail;
	}

	emul64->emul64_tran			= tran;
	emul64->emul64_dip			= dip;

	tran->tran_hba_private		= emul64;
	tran->tran_tgt_private		= NULL;
	tran->tran_tgt_init		= emul64_tran_tgt_init;
	tran->tran_tgt_probe		= scsi_hba_probe;
	tran->tran_tgt_free		= NULL;

	tran->tran_start		= emul64_scsi_start;
	tran->tran_abort		= emul64_scsi_abort;
	tran->tran_reset		= emul64_scsi_reset;
	tran->tran_getcap		= emul64_scsi_getcap;
	tran->tran_setcap		= emul64_scsi_setcap;
	tran->tran_init_pkt		= emul64_scsi_init_pkt;
	tran->tran_destroy_pkt		= emul64_scsi_destroy_pkt;
	tran->tran_dmafree		= emul64_scsi_dmafree;
	tran->tran_sync_pkt		= emul64_scsi_sync_pkt;
	tran->tran_reset_notify 	= emul64_scsi_reset_notify;

	tmp_dma_attr.dma_attr_minxfer = 0x1;
	tmp_dma_attr.dma_attr_burstsizes = 0x7f;

	/*
	 * Attach this instance of the hba
	 */
	if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran,
	    0) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n");
		goto fail;
	}

	emul64->emul64_initiator_id = 2;

	/*
	 * Look up the scsi-options property
	 */
	emul64->emul64_scsi_options =
	    ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options",
	    EMUL64_DEFAULT_SCSI_OPTIONS);
	EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x",
	    emul64->emul64_scsi_options);


	/* mutexes to protect the emul64 request and response queue */
	mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER,
	    emul64->emul64_iblock);
	mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER,
	    emul64->emul64_iblock);

	mutex_initted = 1;

	EMUL64_MUTEX_ENTER(emul64);

	/*
	 * Initialize the default Target Capabilities and Sync Rates
	 */
	emul64_i_initcap(emul64);

	EMUL64_MUTEX_EXIT(emul64);


	ddi_report_dev(dip);
	emul64->emul64_taskq = taskq_create("emul64_comp",
	    emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0);

	return (DDI_SUCCESS);

fail:
	emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance);

	if (mutex_initted) {
		mutex_destroy(EMUL64_REQ_MUTEX(emul64));
		mutex_destroy(EMUL64_RESP_MUTEX(emul64));
	}
	if (tran) {
		scsi_hba_tran_free(tran);
	}
	ddi_soft_state_free(emul64_state, instance);
	return (DDI_FAILURE);
}
コード例 #28
0
ファイル: splat-taskq.c プロジェクト: BjoKaSH/spl-osx
static int
splat_taskq_test4_common(struct file *file, void *arg, int minalloc,
                         int maxalloc, int nr_tasks, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t *tqes;
	int i, j, rc = 0;

	tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL);
	if (tqes == NULL)
		return -ENOMEM;

	splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		     "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
		     SPLAT_TASKQ_TEST4_NAME,
		     prealloc ? "prealloc" : "dynamic",
		     minalloc, maxalloc, nr_tasks);
	if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
		               minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST4_NAME);
		rc = -EINVAL;
		goto out_free;
	}

	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST4_NAME;

	for (i = 1; i <= nr_tasks; i *= 2) {
		atomic_set(&tq_arg.count, 0);
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		             "Taskq '%s' function '%s' dispatched %d times\n",
		             tq_arg.name, sym2str(splat_taskq_test4_func), i);

		for (j = 0; j < i; j++) {
			taskq_init_ent(&tqes[j]);

			if (prealloc) {
				taskq_dispatch_ent(tq, splat_taskq_test4_func,
				                   &tq_arg, TQ_SLEEP, &tqes[j]);
				id = tqes[j].tqent_id;
			} else {
				id = taskq_dispatch(tq, splat_taskq_test4_func,
						    &tq_arg, TQ_SLEEP);
			}

			if (id == 0) {
				splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
				        "Taskq '%s' function '%s' dispatch "
					"%d failed\n", tq_arg.name,
					sym2str(splat_taskq_test4_func), j);
					rc = -EINVAL;
					goto out;
			}
		}

		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
			     "waiting for %d dispatches\n", tq_arg.name, i);
		taskq_wait(tq);
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
			     "%d/%d dispatches finished\n", tq_arg.name,
			     atomic_read(&tq_arg.count), i);
		if (atomic_read(&tq_arg.count) != i) {
			rc = -ERANGE;
			goto out;

		}
	}
out:
	splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
	           tq_arg.name);
	taskq_destroy(tq);

out_free:
	kfree(tqes);

	return rc;
}
コード例 #29
0
ファイル: splat-taskq.c プロジェクト: BjoKaSH/spl-osx
static int
splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 };
	int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 };
	taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
	int i, rc = 0;

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST5_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST5_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST5_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test5_func,
			               &tq_id[i], TQ_SLEEP, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test5_func,
					    &tq_id[i], TQ_SLEEP);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test5_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
	}

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name, 3);
	taskq_wait_id(tq, 3);
	if ((rc = splat_taskq_test_order(&tq_arg, order1)))
		goto out;

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name, 8);
	taskq_wait_id(tq, 8);
	rc = splat_taskq_test_order(&tq_arg, order2);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return rc;
}
コード例 #30
0
ファイル: splat-taskq.c プロジェクト: BjoKaSH/spl-osx
static int
splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
	taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
	int i, rc = 0;
	uint_t tflags;

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST6_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST6_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST6_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;
		tflags = TQ_SLEEP;
		if (i > 4)
			tflags |= TQ_FRONT;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test6_func,
			                   &tq_id[i], tflags, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test6_func,
					    &tq_id[i], tflags);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test6_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
		/* Sleep to let tasks 1-3 start executing. */
		if ( i == 2 )
			msleep(100);
	}

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name,
		     SPLAT_TASKQ_ORDER_MAX);
	taskq_wait_id(tq, SPLAT_TASKQ_ORDER_MAX);
	rc = splat_taskq_test_order(&tq_arg, order);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return rc;
}