Example #1
0
/*
 * Called from kcf:_init()
 */
void
kcf_rnd_init()
{
	hrtime_t ts;
	time_t now;

	mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL);

	/*
	 * Add bytes to the cache using
	 * . 2 unpredictable times: high resolution time since the boot-time,
	 *   and the current time-of-the day.
	 * This is used only to make the timeout value in the timer
	 * unpredictable.
	 */
	ts = gethrtime();
	rndc_addbytes((uint8_t *)&ts, sizeof (ts));

	(void) drv_getparm(TIME, &now);
	rndc_addbytes((uint8_t *)&now, sizeof (now));

	rnbyte_cnt = 0;
	findex = rindex = 0;
	num_waiters = 0;

	rnd_alloc_magazines();

	(void) taskq_dispatch(system_taskq, rnd_init2, NULL, TQ_SLEEP);
}
Example #2
0
/*
 * Schedule any callouts that are due on or before this tick.
 */
static void
callout_schedule_1(callout_table_t *ct)
{
	callout_t *cp;
	clock_t curtime, runtime;

	mutex_enter(&ct->ct_lock);
	ct->ct_curtime = curtime = lbolt;
	while (((runtime = ct->ct_runtime) - curtime) <= 0) {
		for (cp = ct->ct_lbhash[CALLOUT_LBHASH(runtime)];
		    cp != NULL; cp = cp->c_lbnext) {
			if (cp->c_runtime != runtime ||
			    (cp->c_xid & CALLOUT_EXECUTING))
				continue;
			mutex_exit(&ct->ct_lock);
			if (ct->ct_taskq == NULL)
				softcall((void (*)(void *))callout_execute, ct);
			else
				(void) taskq_dispatch(ct->ct_taskq,
				    (task_func_t *)callout_execute, ct,
				    KM_NOSLEEP);
			return;
		}
		ct->ct_runtime++;
	}
	mutex_exit(&ct->ct_lock);
}
Example #3
0
/*
 * Dispatch the commit callbacks registered on this txg to worker threads.
 */
static void
txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
{
	int c;
	tx_state_t *tx = &dp->dp_tx;
	list_t *cb_list;

	for (c = 0; c < max_ncpus; c++) {
		tx_cpu_t *tc = &tx->tx_cpu[c];
		/* No need to lock tx_cpu_t at this point */

		int g = txg & TXG_MASK;

		if (list_is_empty(&tc->tc_callbacks[g]))
			continue;

		if (tx->tx_commit_cb_taskq == NULL) {
			/*
			 * Commit callback taskq hasn't been created yet.
			 */
			tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
			    100, minclsyspri, max_ncpus, INT_MAX,
			    TASKQ_THREADS_CPU_PCT | TASKQ_PREPOPULATE);
		}

		cb_list = kmem_alloc(sizeof (list_t), KM_PUSHPAGE);
		list_create(cb_list, sizeof (dmu_tx_callback_t),
		    offsetof(dmu_tx_callback_t, dcb_node));

		list_move_tail(cb_list, &tc->tc_callbacks[g]);

		(void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
		    txg_do_callbacks, cb_list, TQ_SLEEP);
	}
}
Example #4
0
/*
 * smb_handle_write_raw
 *
 * Called from smb_session_daemon() when the SMB command is SMB_COM_WRITE_RAW.
 * Dispatches the command to the worker thread and waits until the worker
 * has completed processing the command.
 *
 * Returns 0 for success, non-zero for failure
 */
int
smb_handle_write_raw(smb_session_t *session, smb_request_t *sr)
{
	int	drop_reason = 0;

	/*
	 * Set flag to indicate that we are waiting for raw data.  The
	 * worker thread will actually retrieve the raw data directly
	 * from the socket.  This should be the only case when a worker
	 * thread reads from the session socket.  When the data is read
	 * the worker will clear the flag.
	 */
	smb_rwx_rwenter(&session->s_lock, RW_WRITER);
	switch (session->s_state) {
	case SMB_SESSION_STATE_NEGOTIATED:
	case SMB_SESSION_STATE_OPLOCK_BREAKING:
		session->s_state = SMB_SESSION_STATE_WRITE_RAW_ACTIVE;
		smb_rwx_rwexit(&session->s_lock);
		smb_srqueue_waitq_enter(session->s_srqueue);
		sr->sr_state = SMB_REQ_STATE_SUBMITTED;
		(void) taskq_dispatch(session->s_server->sv_worker_pool,
		    smb_session_worker, sr, TQ_SLEEP);
		smb_rwx_rwenter(&session->s_lock, RW_READER);
		while (session->s_state == SMB_SESSION_STATE_WRITE_RAW_ACTIVE) {
			(void) smb_rwx_rwwait(&session->s_lock, -1);
		}
		drop_reason = session->s_write_raw_status;
		break;
	default:
		drop_reason = 21;
		break;
	}
	smb_rwx_rwexit(&session->s_lock);
	return (drop_reason);
}
/*ARGSUSED*/
static void
xen_shutdown_handler(struct xenbus_watch *watch, const char **vec,
	unsigned int len)
{
	char *str;
	xenbus_transaction_t xbt;
	int err, shutdown_code = SHUTDOWN_INVALID;
	unsigned int slen;

again:
	err = xenbus_transaction_start(&xbt);
	if (err)
		return;
	if (xenbus_read(xbt, "control", "shutdown", (void *)&str, &slen)) {
		(void) xenbus_transaction_end(xbt, 1);
		return;
	}

	SUSPEND_DEBUG("%d: xen_shutdown_handler: \"%s\"\n", CPU->cpu_id, str);

	/*
	 * If this is a watch fired from our write below, check out early to
	 * avoid an infinite loop.
	 */
	if (strcmp(str, "") == 0) {
		(void) xenbus_transaction_end(xbt, 0);
		kmem_free(str, slen);
		return;
	} else if (strcmp(str, "poweroff") == 0) {
		shutdown_code = SHUTDOWN_POWEROFF;
	} else if (strcmp(str, "reboot") == 0) {
		shutdown_code = SHUTDOWN_REBOOT;
	} else if (strcmp(str, "suspend") == 0) {
		shutdown_code = SHUTDOWN_SUSPEND;
	} else if (strcmp(str, "halt") == 0) {
		shutdown_code = SHUTDOWN_HALT;
	} else {
		printf("Ignoring shutdown request: %s\n", str);
	}

	/*
	 * XXPV	Should we check the value of xenbus_write() too, or are all
	 *	errors automatically folded into xenbus_transaction_end() ??
	 */
	(void) xenbus_write(xbt, "control", "shutdown", "");
	err = xenbus_transaction_end(xbt, 0);
	if (err == EAGAIN) {
		SUSPEND_DEBUG("%d: trying again\n", CPU->cpu_id);
		kmem_free(str, slen);
		goto again;
	}

	kmem_free(str, slen);
	if (shutdown_code != SHUTDOWN_INVALID) {
		(void) taskq_dispatch(xen_shutdown_tq, xen_shutdown,
		    (void *)(intptr_t)shutdown_code, 0);
	}
}
Example #6
0
static int
splat_rwlock_test2(struct file *file, void *arg)
{
	rw_priv_t *rwp;
	taskq_t *tq;
	int i, rc = 0, tq_count = 256;

	rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL);
	if (rwp == NULL)
		return -ENOMEM;

	splat_init_rw_priv(rwp, file);

	/* Create several threads allowing tasks to race with each other */
	tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(),
			  maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
	if (tq == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	/*
	 * Schedule N work items to the work queue each of which enters the
	 * writer rwlock, sleeps briefly, then exits the writer rwlock.  On a
	 * multiprocessor box these work items will be handled by all available
	 * CPUs.  The task function checks to ensure the tracked shared variable
	 * is always only incremented by one.  Additionally, the rwlock itself
	 * is instrumented such that if any two processors are in the
	 * critical region at the same time the system will panic.  If the
	 * rwlock is implemented right this will never happy, that's a pass.
	 */
	for (i = 0; i < tq_count; i++) {
		if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) {
			splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME,
				     "Failed to queue task %d\n", i);
			rc = -EINVAL;
		}
	}

	taskq_wait(tq);

	if (rwp->rw_rc == tq_count) {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "correctly entered/exited the rwlock %d times\n",
			     num_online_cpus(), rwp->rw_rc);
	} else {
		splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads "
			     "only processed %d/%d w rwlock work items\n",
			     num_online_cpus(), rwp->rw_rc, tq_count);
		rc = -EINVAL;
	}

	taskq_destroy(tq);
	rw_destroy(&(rwp->rw_rwlock));
out:
	kfree(rwp);
	return rc;
}
Example #7
0
static int
splat_taskq_test1_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t tqe;

	taskq_init_ent(&tqe);

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
	             SPLAT_TASKQ_TEST1_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST1_NAME, 1, maxclsyspri,
			       50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		           "Taskq '%s' create failed\n",
		           SPLAT_TASKQ_TEST1_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	tq_arg.id   = 0;
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST1_NAME;

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
	           "Taskq '%s' function '%s' dispatching\n",
	           tq_arg.name, sym2str(splat_taskq_test13_func));
	if (prealloc) {
		taskq_dispatch_ent(tq, splat_taskq_test13_func,
		                   &tq_arg, TQ_SLEEP, &tqe);
		id = tqe.tqent_id;
	} else {
		id = taskq_dispatch(tq, splat_taskq_test13_func,
				    &tq_arg, TQ_SLEEP);
	}

	if (id == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST1_NAME,
		             "Taskq '%s' function '%s' dispatch failed\n",
		             tq_arg.name, sym2str(splat_taskq_test13_func));
		taskq_destroy(tq);
		return -EINVAL;
	}

	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' waiting\n",
	           tq_arg.name);
	taskq_wait(tq);
	splat_vprint(file, SPLAT_TASKQ_TEST1_NAME, "Taskq '%s' destroying\n",
	           tq_arg.name);

	taskq_destroy(tq);

	return (tq_arg.flag) ? 0 : -EINVAL;
}
Example #8
0
/*
 * Simple algorithm for now, grab the global lock and let all
 * the clients update themselves in parallel. There is a lot of
 * room for improvement here. We could eliminate some scans of
 * the DAG by incrementally scanning at lower levels of the DAG
 * rather than having each client start its own scan from the root.
 */
void
mdeg_notify_clients(void)
{
	md_t		*md_new;
	mdeg_clnt_t	*clnt;
	int		idx;
	int		nclnt;

	rw_enter(&mdeg.rwlock, RW_READER);
	mutex_enter(&mdeg.lock);

	/*
	 * Rotate the MDs
	 */
	if ((md_new = md_get_handle()) == NULL) {
		cmn_err(CE_WARN, "unable to retrieve new MD");
		goto done;
	}

	if (mdeg.md_prev) {
		(void) md_fini_handle(mdeg.md_prev);
	}

	mdeg.md_prev = mdeg.md_curr;
	mdeg.md_curr = md_new;

	if (mdeg.nclnts == 0) {
		MDEG_DBG("mdeg_notify_clients: no clients registered\n");
		goto done;
	}

	/* dispatch the update notification to all clients */
	for (idx = 0, nclnt = 0; idx < mdeg.maxclnts; idx++) {
		clnt = &mdeg.tbl[idx];

		if (!clnt->valid)
			continue;

		MDEG_DBG("notifying client 0x%lx (%d/%d)\n", clnt->hdl,
		    ++nclnt, mdeg.nclnts);

		(void) taskq_dispatch(mdeg.taskq, mdeg_notify_client,
		    (void *)clnt, TQ_SLEEP);
	}

	/*
	 * Wait for all mdeg_notify_client notifications to
	 * finish while we are still holding mdeg.rwlock.
	 */
	taskq_wait(mdeg.taskq);

done:
	mutex_exit(&mdeg.lock);
	rw_exit(&mdeg.rwlock);
}
/*
 * This is the SMB2 handler for new smb requests, called from
 * smb_session_reader after SMB negotiate is done.  For most SMB2
 * requests, we just enqueue them for the smb_session_worker to
 * execute via the task queue, so they can block for resources
 * without stopping the reader thread.  A few protocol messages
 * are special cases and are handled directly here in the reader
 * thread so they don't wait for taskq scheduling.
 *
 * This function must either enqueue the new request for
 * execution via the task queue, or execute it directly
 * and then free it.  If this returns non-zero, the caller
 * will drop the session.
 */
int
smb2sr_newrq(smb_request_t *sr)
{
	uint32_t magic;
	uint16_t command;
	int rc;

	magic = LE_IN32(sr->sr_request_buf);
	if (magic != SMB2_PROTOCOL_MAGIC) {
		smb_request_free(sr);
		/* will drop the connection */
		return (EPROTO);
	}

	/*
	 * Execute Cancel requests immediately, (here in the
	 * reader thread) so they won't wait for any other
	 * commands we might already have in the task queue.
	 * Cancel also skips signature verification and
	 * does not consume a sequence number.
	 * [MS-SMB2] 3.2.4.24 Cancellation...
	 */
	command = LE_IN16((uint8_t *)sr->sr_request_buf + 12);
	if (command == SMB2_CANCEL) {
		rc = smb2sr_newrq_cancel(sr);
		smb_request_free(sr);
		return (rc);
	}

	/*
	 * XXX With SMB3 this is supposed to increment based on
	 * the number of credits consumed by a request.  Todo
	 */
	if (sr->session->signing.flags & SMB_SIGNING_ENABLED) {
		/* XXX MS-SMB2 is unclear on this. todo */
		sr->session->signing.seqnum++;
		sr->sr_seqnum = sr->session->signing.seqnum;
		sr->reply_seqnum = sr->sr_seqnum;
	}

	/*
	 * Submit the request to the task queue, which calls
	 * smb2_dispatch_request when the workload permits.
	 */
	sr->sr_time_submitted = gethrtime();
	sr->sr_state = SMB_REQ_STATE_SUBMITTED;
	sr->work_func = smb2sr_work;
	smb_srqueue_waitq_enter(sr->session->s_srqueue);
	(void) taskq_dispatch(sr->session->s_server->sv_worker_pool,
	    smb_session_worker, sr, TQ_SLEEP);

	return (0);

}
Example #10
0
void
idm_conn_event_locked(idm_conn_t *ic, idm_conn_event_t event,
    uintptr_t event_info, idm_pdu_event_type_t pdu_event_type)
{
	idm_conn_event_ctx_t	*event_ctx;

	ASSERT(mutex_owned(&ic->ic_state_mutex));

	idm_sm_audit_event(&ic->ic_state_audit, SAS_IDM_CONN,
	    (int)ic->ic_state, (int)event, event_info);

	/*
	 * It's very difficult to prevent a few straggling events
	 * at the end.  For example idm_sorx_thread will generate
	 * a CE_TRANSPORT_FAIL event when it exits.  Rather than
	 * push complicated restrictions all over the code to
	 * prevent this we will simply drop the events (and in
	 * the case of PDU events release them appropriately)
	 * since they are irrelevant once we are in a terminal state.
	 * Of course those threads need to have appropriate holds on
	 * the connection otherwise it might disappear.
	 */
	if ((ic->ic_state == CS_S9_INIT_ERROR) ||
	    (ic->ic_state == CS_S9A_REJECTED) ||
	    (ic->ic_state == CS_S11_COMPLETE)) {
		if ((pdu_event_type == CT_TX_PDU) ||
		    (pdu_event_type == CT_RX_PDU)) {
			ic->ic_pdu_events--;
			idm_pdu_complete((idm_pdu_t *)event_info,
			    IDM_STATUS_SUCCESS);
		}
		IDM_SM_LOG(CE_NOTE, "*** Dropping event %s (%d) because of"
		    "state %s (%d)",
		    idm_ce_name[event], event,
		    idm_cs_name[ic->ic_state], ic->ic_state);
		return;
	}

	/*
	 * Normal event handling
	 */
	idm_conn_hold(ic);

	event_ctx = kmem_zalloc(sizeof (*event_ctx), KM_SLEEP);
	event_ctx->iec_ic = ic;
	event_ctx->iec_event = event;
	event_ctx->iec_info = event_info;
	event_ctx->iec_pdu_event_type = pdu_event_type;

	(void) taskq_dispatch(ic->ic_state_taskq, &idm_conn_event_handler,
	    event_ctx, TQ_SLEEP);
}
Example #11
0
void vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
    VERIFY(vp->v_count > 0);
    mutex_enter(&vp->v_lock);
    if (vp->v_count == 1) {
        mutex_exit(&vp->v_lock);
        VERIFY(taskq_dispatch(taskq, (task_func_t *)vn_rele_inactive,
                              vp, UMEM_NOFAIL) != 0);
        return;
    }
    vp->v_count--;
    mutex_exit(&vp->v_lock);
}
Example #12
0
/*
 * Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
 * asynchronously using a taskq. This can avoid deadlocks caused by re-entering
 * the file system as a result of releasing the vnode. Note, file systems
 * already have to handle the race where the vnode is incremented before the
 * inactive routine is called and does its locking.
 *
 * Warning: Excessive use of this routine can lead to performance problems.
 * This is because taskqs throttle back allocation if too many are created.
 */
void
vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
	VERIFY(vp->v_count > 0);
	VI_LOCK(vp);
	if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) {
		VI_UNLOCK(vp);
		VERIFY(taskq_dispatch((taskq_t *)taskq,
		    (task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0);
		return;
	}
	vp->v_usecount--;
	vdropl(vp);
}
Example #13
0
File: zil.c Project: harshada/zfs
/*
 * If there are any in-memory intent log transactions which have now been
 * synced then start up a taskq to free them.
 */
void
zil_clean(zilog_t *zilog)
{
	itx_t *itx;

	mutex_enter(&zilog->zl_lock);
	itx = list_head(&zilog->zl_itx_list);
	if ((itx != NULL) &&
	    (itx->itx_lr.lrc_txg <= spa_last_synced_txg(zilog->zl_spa))) {
		(void) taskq_dispatch(zilog->zl_clean_taskq,
		    (task_func_t *)zil_itx_clean, zilog, TQ_SLEEP);
	}
	mutex_exit(&zilog->zl_lock);
}
Example #14
0
File: zvol.c Project: alek-p/zfs
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
	zvol_task_t *task;
	taskqid_t id;

	task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
	if (task == NULL)
		return;

	id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
	if ((async == B_FALSE) && (id != 0))
		taskq_wait_id(spa->spa_zvol_taskq, id);
}
Example #15
0
/*
 * Use the global system task queue with a single task, wait until task
 * completes, ensure task ran properly.
 */
static int
splat_taskq_test3_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskqid_t id;
	splat_taskq_arg_t *tq_arg;
	taskq_ent_t *tqe;
	int error;

	tq_arg = kmem_alloc(sizeof (splat_taskq_arg_t), KM_SLEEP);
	tqe = kmem_alloc(sizeof (taskq_ent_t), KM_SLEEP);
	taskq_init_ent(tqe);

	tq_arg->flag = 0;
	tq_arg->id   = 0;
	tq_arg->file = file;
	tq_arg->name = SPLAT_TASKQ_TEST3_NAME;

	splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
	           "Taskq '%s' function '%s' %s dispatch\n",
	           tq_arg->name, sym2str(splat_taskq_test13_func),
		   prealloc ? "prealloc" : "dynamic");
	if (prealloc) {
		taskq_dispatch_ent(system_taskq, splat_taskq_test13_func,
		                   tq_arg, TQ_SLEEP, tqe);
		id = tqe->tqent_id;
	} else {
		id = taskq_dispatch(system_taskq, splat_taskq_test13_func,
				    tq_arg, TQ_SLEEP);
	}

	if (id == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
		           "Taskq '%s' function '%s' dispatch failed\n",
		           tq_arg->name, sym2str(splat_taskq_test13_func));
		kmem_free(tqe, sizeof (taskq_ent_t));
		kmem_free(tq_arg, sizeof (splat_taskq_arg_t));
		return -EINVAL;
	}

	splat_vprint(file, SPLAT_TASKQ_TEST3_NAME, "Taskq '%s' waiting\n",
	           tq_arg->name);
	taskq_wait(system_taskq);

	error = (tq_arg->flag) ? 0 : -EINVAL;

	kmem_free(tqe, sizeof (taskq_ent_t));
	kmem_free(tq_arg, sizeof (splat_taskq_arg_t));

	return (error);
}
Example #16
0
static int do_test(struct taskq *tq, char *desc)
{
	mutex_lock(&tq_mutex);
	tq_done = false;
	mutex_unlock(&tq_mutex);

	if (taskq_dispatch(tq, tq_test_func, desc, 0) == 0)
		return 1;

	mutex_lock(&tq_mutex);
	while (!tq_done)
		cv_wait(&tq_wait, &tq_mutex);
	mutex_unlock(&tq_mutex);
	return 0;
}
Example #17
0
/*ARGSUSED*/
static void
vcpu_config_event(struct xenbus_watch *watch, const char **vec, uint_t len)
{
	const char *path = vec[XS_WATCH_PATH];
	processorid_t id;
	char *s;

	if ((s = strstr(path, "cpu/")) != NULL &&
	    sscanf(s, "cpu/%d", &id) == 1) {
		/*
		 * Run the virtual CPU configuration on a separate thread to
		 * avoid blocking on this event for too long (and for now,
		 * to ensure configuration requests are serialized.)
		 */
		(void) taskq_dispatch(cpu_config_tq,
		    vcpu_config, (void *)(uintptr_t)id, 0);
	}
}
Example #18
0
/*
 * void task_rele(task_t *)
 *
 * Overview
 *   task_rele() relinquishes a reference on the given task, which was acquired
 *   via task_hold() or task_hold_by_id().  If this is the last member or
 *   observer of the task, dispatch it for commitment via the accounting
 *   subsystem.
 *
 * Return values
 *   None.
 *
 * Caller's context
 *   Caller must not be holding the task_hash_lock.
 */
void
task_rele(task_t *tk)
{
	mutex_enter(&task_hash_lock);
	if (atomic_add_32_nv(&tk->tk_hold_count, -1) > 0) {
		mutex_exit(&task_hash_lock);
		return;
	}

	ASSERT(tk->tk_nprocs == 0);

	mutex_enter(&tk->tk_zone->zone_nlwps_lock);
	tk->tk_proj->kpj_ntasks--;
	mutex_exit(&tk->tk_zone->zone_nlwps_lock);

	task_kstat_delete(tk);

	if (mod_hash_destroy(task_hash,
	    (mod_hash_key_t)(uintptr_t)tk->tk_tkid) != 0)
		panic("unable to delete task %d", tk->tk_tkid);
	mutex_exit(&task_hash_lock);

	/*
	 * At this point, there are no members or observers of the task, so we
	 * can safely send it on for commitment to the accounting subsystem.
	 * The task will be destroyed in task_end() subsequent to commitment.
	 * Since we may be called with pidlock held, taskq_dispatch() cannot
	 * sleep. Commitment is handled by a backup thread in case dispatching
	 * the task fails.
	 */
	if (taskq_dispatch(exacct_queue, exacct_commit_task, tk,
	    TQ_NOSLEEP | TQ_NOQUEUE) == NULL) {
		mutex_enter(&task_commit_lock);
		if (task_commit_head == NULL) {
			task_commit_head = task_commit_tail = tk;
		} else {
			task_commit_tail->tk_commit_next = tk;
			task_commit_tail = tk;
		}
		cv_signal(&task_commit_cv);
		mutex_exit(&task_commit_lock);
	}
}
Example #19
0
File: zvol.c Project: alek-p/zfs
static int
zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
	zvol_set_snapdev_arg_t *zsda = arg;
	char dsname[MAXNAMELEN];
	zvol_task_t *task;

	dsl_dataset_name(ds, dsname);
	dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
	    zsda->zsda_source, sizeof (zsda->zsda_value), 1,
	    &zsda->zsda_value, zsda->zsda_tx);

	task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname,
	    NULL, zsda->zsda_value);
	if (task == NULL)
		return (0);

	(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
		task, TQ_SLEEP);
	return (0);
}
Example #20
0
/*
 * Use the global system task queue with a single task, wait until task
 * completes, ensure task ran properly.
 */
static int
splat_taskq_test3_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t tqe;

	taskq_init_ent(&tqe);

	tq_arg.flag = 0;
	tq_arg.id   = 0;
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST3_NAME;

	splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
	           "Taskq '%s' function '%s' %s dispatch\n",
	           tq_arg.name, sym2str(splat_taskq_test13_func),
		   prealloc ? "prealloc" : "dynamic");
	if (prealloc) {
		taskq_dispatch_ent(system_taskq, splat_taskq_test13_func,
		                   &tq_arg, TQ_SLEEP, &tqe);
		id = tqe.tqent_id;
	} else {
		id = taskq_dispatch(system_taskq, splat_taskq_test13_func,
				    &tq_arg, TQ_SLEEP);
	}

	if (id == 0) {
		splat_vprint(file, SPLAT_TASKQ_TEST3_NAME,
		           "Taskq '%s' function '%s' dispatch failed\n",
		           tq_arg.name, sym2str(splat_taskq_test13_func));
		return -EINVAL;
	}

	splat_vprint(file, SPLAT_TASKQ_TEST3_NAME, "Taskq '%s' waiting\n",
	           tq_arg.name);
	taskq_wait(system_taskq);

	return (tq_arg.flag) ? 0 : -EINVAL;
}
Example #21
0
static int
splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc,
			krw_t holder_type, krw_t try_type)
{
	int id, rc = 0;

	/* Schedule a task function which will try and acquire the rwlock
	 * using type try_type while the rwlock is being held as holder_type.
	 * The result must match expected_rc for the test to pass */
	rwp->rw_rc = -EINVAL;
	rwp->rw_type = try_type;

	if (holder_type == RW_WRITER || holder_type == RW_READER)
		rw_enter(&rwp->rw_rwlock, holder_type);

	id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP);
	if (id == 0) {
		splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s",
			     "taskq_dispatch() failed\n");
		rc = -EINVAL;
		goto out;
	}

	taskq_wait_id(tq, id);

	if (rwp->rw_rc != expected_rc)
		rc = -EINVAL;

	splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME,
		     "%srw_tryenter(%s) returned %d (expected %d) when %s\n",
		     rc ? "Fail " : "", splat_rwlock_test4_name(try_type),
		     rwp->rw_rc, expected_rc,
		     splat_rwlock_test4_name(holder_type));
out:
	if (holder_type == RW_WRITER || holder_type == RW_READER)
		rw_exit(&rwp->rw_rwlock);

	return rc;
}
Example #22
0
void
pppt_sess_close_locked(pppt_sess_t *ps)
{
	pppt_tgt_t	*tgt = ps->ps_target;
	pppt_task_t	*ptask;

	stmf_trace("pppt", "Session close %p", (void *)ps);

	ASSERT(mutex_owned(&pppt_global.global_lock));
	ASSERT(mutex_owned(&tgt->target_mutex));
	ASSERT(mutex_owned(&ps->ps_mutex));
	ASSERT(!ps->ps_closed); /* Caller should ensure session is not closed */

	ps->ps_closed = B_TRUE;
	for (ptask = avl_first(&ps->ps_task_list); ptask != NULL;
	    ptask = AVL_NEXT(&ps->ps_task_list, ptask)) {
		mutex_enter(&ptask->pt_mutex);
		if (ptask->pt_state == PTS_ACTIVE) {
			stmf_abort(STMF_QUEUE_TASK_ABORT, ptask->pt_stmf_task,
			    STMF_ABORTED, NULL);
		}
		mutex_exit(&ptask->pt_mutex);
	}

	/*
	 * Now that all the tasks are aborting the session refcnt should
	 * go to 0.
	 */
	while (ps->ps_refcnt != 0) {
		cv_wait(&ps->ps_cv, &ps->ps_mutex);
	}

	avl_remove(&tgt->target_sess_list, ps);
	avl_remove(&pppt_global.global_sess_list, ps);
	(void) taskq_dispatch(pppt_global.global_sess_taskq,
	    &pppt_sess_destroy_task, ps, KM_SLEEP);

	stmf_trace("pppt", "Session close complete %p", (void *)ps);
}
Example #23
0
static void
splat_taskq_test7_func(void *arg)
{
	splat_taskq_arg_t *tq_arg = (splat_taskq_arg_t *)arg;
	taskqid_t id;

	ASSERT(tq_arg);

	if (tq_arg->depth >= SPLAT_TASKQ_DEPTH_MAX)
		return;

	tq_arg->depth++;

	splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
	             "Taskq '%s' function '%s' dispatching (depth = %u)\n",
	             tq_arg->name, sym2str(splat_taskq_test7_func),
	             tq_arg->depth);

	if (tq_arg->tqe) {
		VERIFY(taskq_empty_ent(tq_arg->tqe));
		taskq_dispatch_ent(tq_arg->tq, splat_taskq_test7_func,
		                   tq_arg, TQ_SLEEP, tq_arg->tqe);
		id = tq_arg->tqe->tqent_id;
	} else {
		id = taskq_dispatch(tq_arg->tq, splat_taskq_test7_func,
		                    tq_arg, TQ_SLEEP);
	}

	if (id == 0) {
		splat_vprint(tq_arg->file, SPLAT_TASKQ_TEST7_NAME,
		             "Taskq '%s' function '%s' dispatch failed "
		             "(depth = %u)\n", tq_arg->name,
		             sym2str(splat_taskq_test7_func), tq_arg->depth);
		tq_arg->flag = -EINVAL;
		return;
	}
}
Example #24
0
/*
 * sckm_mbox_callback
 *
 * Callback routine registered with the IOSRAM mailbox protocol driver.
 * Invoked when a message is received on the mailbox.
 */
static void
sckm_mbox_callback(void)
{
    SCKM_DEBUG0(D_CALLBACK, "in sckm_mbox_callback()");

    mutex_enter(&sckm_taskq_ptr_mutex);

    if (sckm_taskq == NULL) {
        mutex_exit(&sckm_taskq_ptr_mutex);
        return;
    }

    if (!taskq_dispatch(sckm_taskq, sckm_mbox_task, NULL, KM_NOSLEEP)) {
        /*
         * Too many tasks already pending. Do not queue a new
         * request.
         */
        SCKM_DEBUG0(D_CALLBACK, "failed dispatching task");
    }

    mutex_exit(&sckm_taskq_ptr_mutex);

    SCKM_DEBUG0(D_CALLBACK, "out sckm_mbox_callback()");
}
Example #25
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		taskq_t *t;
		char rdsk[MAXPATHLEN];
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, ZFS_DISK_ROOTD) == 0)
			(void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
		else
			(void) strlcpy(rdsk, path, sizeof (rdsk));

		if ((dfd = open(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}

		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = taskq_create("z_import", 2 * max_ncpus, defclsyspri,
		    2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) taskq_dispatch(t, zpool_open_func, slice,
			    TQ_SLEEP);
		taskq_wait(t);
		taskq_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path, i+1,
					    slice->rn_num_labels, config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Example #26
0
static int
splat_taskq_test4_common(struct file *file, void *arg, int minalloc,
                         int maxalloc, int nr_tasks, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_arg_t tq_arg;
	taskq_ent_t *tqes;
	int i, j, rc = 0;

	tqes = kmalloc(sizeof(*tqes) * nr_tasks, GFP_KERNEL);
	if (tqes == NULL)
		return -ENOMEM;

	splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		     "Taskq '%s' creating (%s dispatch) (%d/%d/%d)\n",
		     SPLAT_TASKQ_TEST4_NAME,
		     prealloc ? "prealloc" : "dynamic",
		     minalloc, maxalloc, nr_tasks);
	if ((tq = taskq_create(SPLAT_TASKQ_TEST4_NAME, 1, maxclsyspri,
		               minalloc, maxalloc, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST4_NAME);
		rc = -EINVAL;
		goto out_free;
	}

	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST4_NAME;

	for (i = 1; i <= nr_tasks; i *= 2) {
		atomic_set(&tq_arg.count, 0);
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
		             "Taskq '%s' function '%s' dispatched %d times\n",
		             tq_arg.name, sym2str(splat_taskq_test4_func), i);

		for (j = 0; j < i; j++) {
			taskq_init_ent(&tqes[j]);

			if (prealloc) {
				taskq_dispatch_ent(tq, splat_taskq_test4_func,
				                   &tq_arg, TQ_SLEEP, &tqes[j]);
				id = tqes[j].tqent_id;
			} else {
				id = taskq_dispatch(tq, splat_taskq_test4_func,
						    &tq_arg, TQ_SLEEP);
			}

			if (id == 0) {
				splat_vprint(file, SPLAT_TASKQ_TEST4_NAME,
				        "Taskq '%s' function '%s' dispatch "
					"%d failed\n", tq_arg.name,
					sym2str(splat_taskq_test4_func), j);
					rc = -EINVAL;
					goto out;
			}
		}

		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
			     "waiting for %d dispatches\n", tq_arg.name, i);
		taskq_wait(tq);
		splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' "
			     "%d/%d dispatches finished\n", tq_arg.name,
			     atomic_read(&tq_arg.count), i);
		if (atomic_read(&tq_arg.count) != i) {
			rc = -ERANGE;
			goto out;

		}
	}
out:
	splat_vprint(file, SPLAT_TASKQ_TEST4_NAME, "Taskq '%s' destroying\n",
	           tq_arg.name);
	taskq_destroy(tq);

out_free:
	kfree(tqes);

	return rc;
}
Example #27
0
static int
splat_taskq_test5_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order1[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,0,0,0 };
	int order2[SPLAT_TASKQ_ORDER_MAX] = { 1,2,4,5,3,8,6,7 };
	taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
	int i, rc = 0;

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST5_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST5_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST5_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST5_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test5_func,
			               &tq_id[i], TQ_SLEEP, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test5_func,
					    &tq_id[i], TQ_SLEEP);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test5_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
	}

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name, 3);
	taskq_wait_id(tq, 3);
	if ((rc = splat_taskq_test_order(&tq_arg, order1)))
		goto out;

	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name, 8);
	taskq_wait_id(tq, 8);
	rc = splat_taskq_test_order(&tq_arg, order2);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST5_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return rc;
}
Example #28
0
/*
 * Request will be added back to the request queue and retried if
 * it cannot be immediately dispatched to the taskq for handling
 */
static inline void
zvol_dispatch(task_func_t func, struct request *req)
{
	if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
		blk_requeue_request(req->q, req);
}
Example #29
0
/*
 * Function name : emul64_scsi_start()
 *
 * Return Values : TRAN_FATAL_ERROR	- emul64 has been shutdown
 *		   TRAN_BUSY		- request queue is full
 *		   TRAN_ACCEPT		- pkt has been submitted to emul64
 *
 * Description	 : init pkt, start the request
 *
 * Context	 : Can be called from different kernel process threads.
 *		   Can be called by interrupt thread.
 */
static int
emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
{
	struct emul64_cmd	*sp	= PKT2CMD(pkt);
	int			rval	= TRAN_ACCEPT;
	struct emul64		*emul64	= ADDR2EMUL64(ap);
	clock_t			cur_lbolt;
	taskqid_t		dispatched;

	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());

	EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp);

	pkt->pkt_reason = CMD_CMPLT;

#ifdef	EMUL64DEBUG
	if (emul64_cdb_debug) {
		emul64_debug_dump_cdb(ap, pkt);
	}
#endif	/* EMUL64DEBUG */

	/*
	 * calculate deadline from pkt_time
	 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
	 * we can shift and at the same time have a 28% grace period
	 * we ignore the rare case of pkt_time == 0 and deal with it
	 * in emul64_i_watch()
	 */
	cur_lbolt = ddi_get_lbolt();
	sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128);

	if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) {
		emul64_pkt_comp((caddr_t)pkt);
	} else {
		dispatched = NULL;
		if (emul64_collect_stats) {
			/*
			 * If we are collecting statistics, call
			 * taskq_dispatch in no sleep mode, so that we can
			 * detect if we are exceeding the queue length that
			 * was established in the call to taskq_create in
			 * emul64_attach.  If the no sleep call fails
			 * (returns NULL), the task will be dispatched in
			 * sleep mode below.
			 */
			dispatched = taskq_dispatch(emul64->emul64_taskq,
			    emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP);
			if (dispatched == NULL) {
				/* Queue was full.  dispatch failed. */
				mutex_enter(&emul64_stats_mutex);
				emul64_taskq_max++;
				mutex_exit(&emul64_stats_mutex);
			}
		}
		if (dispatched == NULL) {
			(void) taskq_dispatch(emul64->emul64_taskq,
			    emul64_pkt_comp, (void *)pkt, TQ_SLEEP);
		}
	}

done:
	ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
	ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());

	return (rval);
}
Example #30
0
static int
splat_taskq_test6_impl(struct file *file, void *arg, boolean_t prealloc)
{
	taskq_t *tq;
	taskqid_t id;
	splat_taskq_id_t tq_id[SPLAT_TASKQ_ORDER_MAX];
	splat_taskq_arg_t tq_arg;
	int order[SPLAT_TASKQ_ORDER_MAX] = { 1,2,3,6,7,8,4,5 };
	taskq_ent_t tqes[SPLAT_TASKQ_ORDER_MAX];
	int i, rc = 0;
	uint_t tflags;

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' creating (%s dispatch)\n",
		     SPLAT_TASKQ_TEST6_NAME,
		     prealloc ? "prealloc" : "dynamic");
	if ((tq = taskq_create(SPLAT_TASKQ_TEST6_NAME, 3, maxclsyspri,
		               50, INT_MAX, TASKQ_PREPOPULATE)) == NULL) {
		splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		             "Taskq '%s' create failed\n",
		             SPLAT_TASKQ_TEST6_NAME);
		return -EINVAL;
	}

	tq_arg.flag = 0;
	memset(&tq_arg.order, 0, sizeof(int) * SPLAT_TASKQ_ORDER_MAX);
	spin_lock_init(&tq_arg.lock);
	tq_arg.file = file;
	tq_arg.name = SPLAT_TASKQ_TEST6_NAME;

	for (i = 0; i < SPLAT_TASKQ_ORDER_MAX; i++) {
		taskq_init_ent(&tqes[i]);

		tq_id[i].id = i + 1;
		tq_id[i].arg = &tq_arg;
		tflags = TQ_SLEEP;
		if (i > 4)
			tflags |= TQ_FRONT;

		if (prealloc) {
			taskq_dispatch_ent(tq, splat_taskq_test6_func,
			                   &tq_id[i], tflags, &tqes[i]);
			id = tqes[i].tqent_id;
		} else {
			id = taskq_dispatch(tq, splat_taskq_test6_func,
					    &tq_id[i], tflags);
		}

		if (id == 0) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' function '%s' dispatch failed\n",
				tq_arg.name, sym2str(splat_taskq_test6_func));
				rc = -EINVAL;
				goto out;
		}

		if (tq_id[i].id != id) {
			splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
			        "Taskq '%s' expected taskqid %d got %d\n",
				tq_arg.name, (int)tq_id[i].id, (int)id);
				rc = -EINVAL;
				goto out;
		}
		/* Sleep to let tasks 1-3 start executing. */
		if ( i == 2 )
			msleep(100);
	}

	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME, "Taskq '%s' "
		     "waiting for taskqid %d completion\n", tq_arg.name,
		     SPLAT_TASKQ_ORDER_MAX);
	taskq_wait_id(tq, SPLAT_TASKQ_ORDER_MAX);
	rc = splat_taskq_test_order(&tq_arg, order);

out:
	splat_vprint(file, SPLAT_TASKQ_TEST6_NAME,
		     "Taskq '%s' destroying\n", tq_arg.name);
	taskq_destroy(tq);

	return rc;
}