示例#1
0
/*
 * Thread to setup switching mode. This thread is created during vsw_attach()
 * initially. It invokes vsw_setup_switching() and keeps retrying while the
 * returned value is EAGAIN. The thread exits when the switching mode setup is
 * done successfully or when the error returned is not EAGAIN. This thread may
 * also get created from vsw_update_md_prop() if the switching mode needs to be
 * updated.
 */
void
vsw_setup_switching_thread(void *arg)
{
	callb_cpr_t	cprinfo;
	vsw_t		*vswp =  (vsw_t *)arg;
	clock_t		wait_time;
	clock_t		xwait;
	clock_t		wait_rv;
	int		rv;

	/* wait time used on successive retries */
	xwait = drv_usectohz(vsw_setup_switching_delay * MICROSEC);

	CALLB_CPR_INIT(&cprinfo, &vswp->sw_thr_lock, callb_generic_cpr,
	    "vsw_setup_sw_thread");

	mutex_enter(&vswp->sw_thr_lock);

	while ((vswp->sw_thr_flags & VSW_SWTHR_STOP) == 0) {

		CALLB_CPR_SAFE_BEGIN(&cprinfo);

		/* Wait for sometime before (re)trying setup_switching() */
		wait_time = ddi_get_lbolt() + xwait;
		while ((vswp->sw_thr_flags & VSW_SWTHR_STOP) == 0) {
			wait_rv = cv_timedwait(&vswp->sw_thr_cv,
			    &vswp->sw_thr_lock, wait_time);
			if (wait_rv == -1) {	/* timed out */
				break;
			}
		}

		CALLB_CPR_SAFE_END(&cprinfo, &vswp->sw_thr_lock)

		if ((vswp->sw_thr_flags & VSW_SWTHR_STOP) != 0) {
			/*
			 * If there is a stop request, process that first and
			 * exit the loop. Continue to hold the mutex which gets
			 * released in CALLB_CPR_EXIT().
			 */
			break;
		}

		mutex_exit(&vswp->sw_thr_lock);
		rv = vsw_setup_switching(vswp);
		if (rv == 0) {
			vsw_setup_switching_post_process(vswp);
		}
		mutex_enter(&vswp->sw_thr_lock);
		if (rv != EAGAIN) {
			break;
		}

	}

	vswp->sw_thr_flags &= ~VSW_SWTHR_STOP;
	vswp->sw_thread = NULL;
	CALLB_CPR_EXIT(&cprinfo);
	thread_exit();
}
示例#2
0
文件: mmp.c 项目: LLNL/zfs
static void
mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
{
	ASSERT(*mpp != NULL);
	*mpp = NULL;
	cv_broadcast(&mmp->mmp_thread_cv);
	CALLB_CPR_EXIT(cpr);		/* drops &mmp->mmp_thread_lock */
	thread_exit();
}
示例#3
0
文件: txg.c 项目: Zak-Adelman/zfs
static void
txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
{
	ASSERT(*tpp != NULL);
	*tpp = NULL;
	tx->tx_threads--;
	cv_broadcast(&tx->tx_exit_cv);
	CALLB_CPR_EXIT(cpr);		/* drops &tx->tx_sync_lock */
	thread_exit();
}
示例#4
0
/*
 * Worker thread for processing task queue.
 */
static void
taskq_thread(void *arg)
{
    taskq_t *tq = arg;
    taskq_ent_t *tqe;
    callb_cpr_t cprinfo;
    hrtime_t start, end;

    CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr, tq->tq_name);

    mutex_enter(&tq->tq_lock);
    while (tq->tq_flags & TASKQ_ACTIVE) {
        if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
            if (--tq->tq_active == 0)
                cv_broadcast(&tq->tq_wait_cv);
            if (tq->tq_flags & TASKQ_CPR_SAFE) {
                cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
            } else {
                CALLB_CPR_SAFE_BEGIN(&cprinfo);
                cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
                CALLB_CPR_SAFE_END(&cprinfo, &tq->tq_lock);
            }
            tq->tq_active++;
            continue;
        }
        tqe->tqent_prev->tqent_next = tqe->tqent_next;
        tqe->tqent_next->tqent_prev = tqe->tqent_prev;
        mutex_exit(&tq->tq_lock);

        rw_enter(&tq->tq_threadlock, RW_READER);
        start = gethrtime();
        DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
                      taskq_ent_t *, tqe);
        tqe->tqent_func(tqe->tqent_arg);
        DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
                      taskq_ent_t *, tqe);
        end = gethrtime();
        rw_exit(&tq->tq_threadlock);

        mutex_enter(&tq->tq_lock);
        tq->tq_totaltime += end - start;
        tq->tq_executed++;

        taskq_ent_free(tq, tqe);
    }
    tq->tq_nthreads--;
    cv_broadcast(&tq->tq_wait_cv);
    ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
    CALLB_CPR_EXIT(&cprinfo);
    thread_exit();
}
示例#5
0
文件: zfs_dir.c 项目: andreiw/polaris
void
zfs_delete_thread(void *arg)
{
	zfsvfs_t	*zfsvfs = arg;
	zfs_delete_t 	*zd = &zfsvfs->z_delete_head;
	znode_t		*zp;
	callb_cpr_t	cprinfo;
	int		drained;

	CALLB_CPR_INIT(&cprinfo, &zd->z_mutex, callb_generic_cpr, "zfs_delete");

	mutex_enter(&zd->z_mutex);

	if (!zd->z_drained && !zd->z_draining) {
		zd->z_draining = B_TRUE;
		mutex_exit(&zd->z_mutex);
		drained = zfs_drain_dq(zfsvfs);
		mutex_enter(&zd->z_mutex);
		zd->z_draining = B_FALSE;
		zd->z_drained = drained;
		cv_broadcast(&zd->z_quiesce_cv);
	}

	while (zd->z_thread_count <= zd->z_thread_target) {
		zp = list_head(&zd->z_znodes);
		if (zp == NULL) {
			ASSERT(zd->z_znode_count == 0);
			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(&zd->z_cv, &zd->z_mutex);
			CALLB_CPR_SAFE_END(&cprinfo, &zd->z_mutex);
			continue;
		}
		ASSERT(zd->z_znode_count != 0);
		list_remove(&zd->z_znodes, zp);
		if (--zd->z_znode_count == 0)
			cv_broadcast(&zd->z_quiesce_cv);
		mutex_exit(&zd->z_mutex);
		zfs_rmnode(zp);
		(void) zfs_delete_thread_target(zfsvfs, -1);
		mutex_enter(&zd->z_mutex);
	}

	ASSERT(zd->z_thread_count != 0);
	if (--zd->z_thread_count == 0)
		cv_broadcast(&zd->z_cv);

	CALLB_CPR_EXIT(&cprinfo);	/* NB: drops z_mutex */
	thread_exit();
}
示例#6
0
static void
do_recall(struct recall_arg *arg)
{
	rfs4_deleg_state_t *dsp = arg->dsp;
	rfs4_file_t *fp = dsp->rds_finfo;
	callb_cpr_t cpr_info;
	kmutex_t cpr_lock;

	mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
	CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, "nfsv4Recall");

	/*
	 * It is possible that before this thread starts
	 * the client has send us a return_delegation, and
	 * if that is the case we do not need to send the
	 * recall callback.
	 */
	if (dsp->rds_dtype != OPEN_DELEGATE_NONE) {
		DTRACE_PROBE3(nfss__i__recall,
		    struct recall_arg *, arg,
		    struct rfs4_deleg_state_t *, dsp,
		    struct rfs4_file_t *, fp);

		if (arg->recall)
			(void) (*arg->recall)(dsp, arg->trunc);
	}

	mutex_enter(fp->rf_dinfo.rd_recall_lock);
	/*
	 * Recall count may go negative if the parent thread that is
	 * creating the individual callback threads does not modify
	 * the recall_count field before the callback thread actually
	 * gets a response from the CB_RECALL
	 */
	fp->rf_dinfo.rd_recall_count--;
	if (fp->rf_dinfo.rd_recall_count == 0)
		cv_signal(fp->rf_dinfo.rd_recall_cv);
	mutex_exit(fp->rf_dinfo.rd_recall_lock);

	mutex_enter(&cpr_lock);
	CALLB_CPR_EXIT(&cpr_info);
	mutex_destroy(&cpr_lock);

	rfs4_deleg_state_rele(dsp); /* release the hold for this thread */

	kmem_free(arg, sizeof (struct recall_arg));
}
示例#7
0
文件: devctl.c 项目: andreiw/polaris
static void
nvpflush_daemon(void)
{
	callb_cpr_t cprinfo;
	clock_t clk;
	int rval;
	int i;

	ASSERT(modrootloaded);

	nvpflush_thread = curthread;
	NVPDAEMON_DEBUG((CE_CONT, "nvpdaemon: init\n"));

	CALLB_CPR_INIT(&cprinfo, &nvpflush_lock, callb_generic_cpr, "nvp");
	mutex_enter(&nvpflush_lock);
	for (;;) {

		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		while (do_nvpflush == 0) {
			clk = cv_timedwait(&nvpflush_cv, &nvpflush_lock,
			    ddi_get_lbolt() +
				(nvpdaemon_idle_time * TICKS_PER_SECOND));
			if (clk == -1 &&
			    do_nvpflush == 0 && nvpflush_timer_busy == 0) {
				/*
				 * Note that CALLB_CPR_EXIT calls mutex_exit()
				 * on the lock passed in to CALLB_CPR_INIT,
				 * so the lock must be held when invoking it.
				 */
				CALLB_CPR_SAFE_END(&cprinfo, &nvpflush_lock);
				NVPDAEMON_DEBUG((CE_CONT, "nvpdaemon: exit\n"));
				ASSERT(mutex_owned(&nvpflush_lock));
				nvpflush_thr_id = NULL;
				nvpflush_daemon_active = 0;
				CALLB_CPR_EXIT(&cprinfo);
				thread_exit();
			}
		}
		CALLB_CPR_SAFE_END(&cprinfo, &nvpflush_lock);

		nvpbusy = 1;
		do_nvpflush = 0;
		mutex_exit(&nvpflush_lock);

		/*
		 * Try flushing what's dirty, reschedule if there's
		 * a failure or data gets marked as dirty again.
		 */
		for (i = 0; i < NCACHEFDS; i++) {
			rw_enter(&cachefds[i]->nvf_lock, RW_READER);
			if (NVF_IS_DIRTY(cachefds[i])) {
				NVPDAEMON_DEBUG((CE_CONT,
				    "nvpdaemon: flush %s\n",
				    cachefds[i]->nvf_name));
				rw_exit(&cachefds[i]->nvf_lock);
				rval = nvpflush_one(cachefds[i]);
				rw_enter(&cachefds[i]->nvf_lock, RW_READER);
				if (rval != DDI_SUCCESS ||
				    NVF_IS_DIRTY(cachefds[i])) {
					rw_exit(&cachefds[i]->nvf_lock);
					NVPDAEMON_DEBUG((CE_CONT,
					    "nvpdaemon: %s dirty again\n",
					    cachefds[i]->nvf_name));
					wake_nvpflush_daemon();
				} else {
					rw_exit(&cachefds[i]->nvf_lock);
					nvf_write_complete(cachefds[i]);
				}
			} else {
				NVPDAEMON_DEBUG((CE_CONT,
				    "nvpdaemon: not dirty %s\n",
				    cachefds[i]->nvf_name));
				rw_exit(&cachefds[i]->nvf_lock);
			}
		}

		mutex_enter(&nvpflush_lock);
		nvpbusy = 0;
	}
}
示例#8
0
static void
do_recall_file(struct master_recall_args *map)
{
	rfs4_file_t *fp = map->fp;
	rfs4_deleg_state_t *dsp;
	struct recall_arg *arg;
	callb_cpr_t cpr_info;
	kmutex_t cpr_lock;
	int32_t recall_count;

	rfs4_dbe_lock(fp->rf_dbe);

	/* Recall already in progress ? */
	mutex_enter(fp->rf_dinfo.rd_recall_lock);
	if (fp->rf_dinfo.rd_recall_count != 0) {
		mutex_exit(fp->rf_dinfo.rd_recall_lock);
		rfs4_dbe_rele_nolock(fp->rf_dbe);
		rfs4_dbe_unlock(fp->rf_dbe);
		kmem_free(map, sizeof (struct master_recall_args));
		return;
	}

	mutex_exit(fp->rf_dinfo.rd_recall_lock);

	mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
	CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr,	"v4RecallFile");

	recall_count = 0;
	for (dsp = list_head(&fp->rf_delegstatelist); dsp != NULL;
	    dsp = list_next(&fp->rf_delegstatelist, dsp)) {

		rfs4_dbe_lock(dsp->rds_dbe);
		/*
		 * if this delegation state
		 * is being reaped skip it
		 */
		if (rfs4_dbe_is_invalid(dsp->rds_dbe)) {
			rfs4_dbe_unlock(dsp->rds_dbe);
			continue;
		}

		/* hold for receiving thread */
		rfs4_dbe_hold(dsp->rds_dbe);
		rfs4_dbe_unlock(dsp->rds_dbe);

		arg = kmem_alloc(sizeof (struct recall_arg), KM_SLEEP);
		arg->recall = map->recall;
		arg->trunc = map->trunc;
		arg->dsp = dsp;

		recall_count++;

		(void) thread_create(NULL, 0, do_recall, arg, 0, &p0, TS_RUN,
		    minclsyspri);
	}

	rfs4_dbe_unlock(fp->rf_dbe);

	mutex_enter(fp->rf_dinfo.rd_recall_lock);
	/*
	 * Recall count may go negative if the parent thread that is
	 * creating the individual callback threads does not modify
	 * the recall_count field before the callback thread actually
	 * gets a response from the CB_RECALL
	 */
	fp->rf_dinfo.rd_recall_count += recall_count;
	while (fp->rf_dinfo.rd_recall_count)
		cv_wait(fp->rf_dinfo.rd_recall_cv, fp->rf_dinfo.rd_recall_lock);

	mutex_exit(fp->rf_dinfo.rd_recall_lock);

	DTRACE_PROBE1(nfss__i__recall_done, rfs4_file_t *, fp);
	rfs4_file_rele(fp);
	kmem_free(map, sizeof (struct master_recall_args));
	mutex_enter(&cpr_lock);
	CALLB_CPR_EXIT(&cpr_info);
	mutex_destroy(&cpr_lock);
}
示例#9
0
文件: kctl_wr.c 项目: andreiw/polaris
/*ARGSUSED*/
static void
kctl_wr_thread(void *arg)
{
	callb_cpr_t cprinfo;
	kmutex_t cprlock;

	mutex_init(&cprlock, NULL, MUTEX_DEFAULT, NULL);
	CALLB_CPR_INIT(&cprinfo, &cprlock, callb_generic_cpr, "kmdb work");

	for (;;) {
		/*
		 * XXX what should I do here for panic?  It'll spin unless I
		 * can figure out a way to park it.  Presumably I don't want to
		 * let it exit.
		 */
		mutex_enter(&cprlock);
		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		mutex_exit(&cprlock);

		sema_p(&kctl.kctl_wr_avail_sem);

		mutex_enter(&cprlock);
		CALLB_CPR_SAFE_END(&cprinfo, &cprlock);
		mutex_exit(&cprlock);

		kctl_dprintf("kctl worker thread - waking up");

		if (kmdb_kdi_get_unload_request() ||
		    kctl.kctl_wr_state != KCTL_WR_ST_RUN) {
			/*
			 * We've either got a debugger-initiated unload (if
			 * unload_request returned true), or we're stopping due
			 * to an error discovered by the driver (if
			 * kctl_worker_run is no longer non-zero).  Start
			 * cleaning up.
			 */

			/*
			 * The debugger has already deactivated itself, and will
			 * have dumped a bunch of stuff on the queue.  We need
			 * to process it before exiting.
			 */
			(void) kmdb_wr_driver_process(kctl_wr_process_cb,
			    KCTL_WR_PROCESS_UNLOADING);
			break;
		}

		/*
		 * A non-zero return means we've passed messages back to the
		 * debugger for processing, so we need to wake the debugger up.
		 */
		if (kctl_wr_process() > 0)
			kdi_dvec_enter();
	}

	/*
	 * NULL out the dmod search path, so we can send the current one back
	 * to the debugger.  XXX this should probably be somewhere else.
	 */
	kctl_dmod_path_reset();

	/*
	 * The debugger will send us unload notifications for each dmod that it
	 * noticed.  If, for example, the debugger is unloaded before the first
	 * start, it won't have noticed any of the dmods we loaded.  We'll need
	 * to initiate the unloads ourselves.
	 */
	kctl_dmod_unload_all();

	kctl.kctl_wr_state = KCTL_WR_ST_STOPPED;

	/*
	 * Must be last, as it concludes by setting state to INACTIVE.  The
	 * kctl data structure must not be accessed by this thread after that
	 * point.
	 */
	kctl_cleanup();

	mutex_enter(&cprlock);
	CALLB_CPR_EXIT(&cprinfo);
	mutex_destroy(&cprlock);
}
示例#10
0
/*
 * Main routine for the callbacks notifications thread
 */
static void
i_mac_notify_thread(void *arg)
{
	mac_impl_t	*mip = arg;
	callb_cpr_t	cprinfo;
	mac_cb_t	*mcb;
	mac_cb_info_t	*mcbi;
	mac_notify_cb_t	*mncb;

	mcbi = &mip->mi_notify_cb_info;
	CALLB_CPR_INIT(&cprinfo, mcbi->mcbi_lockp, callb_generic_cpr,
	    "i_mac_notify_thread");

	mutex_enter(mcbi->mcbi_lockp);

	for (;;) {
		uint32_t	bits;
		uint32_t	type;

		bits = mip->mi_notify_bits;
		if (bits == 0) {
			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp);
			CALLB_CPR_SAFE_END(&cprinfo, mcbi->mcbi_lockp);
			continue;
		}
		mip->mi_notify_bits = 0;
		if ((bits & (1 << MAC_NNOTE)) != 0) {
			/* request to quit */
			ASSERT(mip->mi_state_flags & MIS_DISABLED);
			break;
		}

		mutex_exit(mcbi->mcbi_lockp);

		/*
		 * Log link changes on the actual link, but then do reports on
		 * synthetic state (if part of a bridge).
		 */
		if ((bits & (1 << MAC_NOTE_LOWLINK)) != 0) {
			link_state_t newstate;
			mac_handle_t mh;

			i_mac_log_link_state(mip);
			newstate = mip->mi_lowlinkstate;
			if (mip->mi_bridge_link != NULL) {
				mutex_enter(&mip->mi_bridge_lock);
				if ((mh = mip->mi_bridge_link) != NULL) {
					newstate = mac_bridge_ls_cb(mh,
					    newstate);
				}
				mutex_exit(&mip->mi_bridge_lock);
			}
			if (newstate != mip->mi_linkstate) {
				mip->mi_linkstate = newstate;
				bits |= 1 << MAC_NOTE_LINK;
			}
		}

		/*
		 * Do notification callbacks for each notification type.
		 */
		for (type = 0; type < MAC_NNOTE; type++) {
			if ((bits & (1 << type)) == 0) {
				continue;
			}

			if (mac_notify_cb_list[type] != NULL)
				(*mac_notify_cb_list[type])(mip);

			/*
			 * Walk the list of notifications.
			 */
			MAC_CALLBACK_WALKER_INC(&mip->mi_notify_cb_info);
			for (mcb = mip->mi_notify_cb_list; mcb != NULL;
			    mcb = mcb->mcb_nextp) {
				mncb = (mac_notify_cb_t *)mcb->mcb_objp;
				mncb->mncb_fn(mncb->mncb_arg, type);
			}
			MAC_CALLBACK_WALKER_DCR(&mip->mi_notify_cb_info,
			    &mip->mi_notify_cb_list);
		}

		mutex_enter(mcbi->mcbi_lockp);
	}

	mip->mi_state_flags |= MIS_NOTIFY_DONE;
	cv_broadcast(&mcbi->mcbi_cv);

	/* CALLB_CPR_EXIT drops the lock */
	CALLB_CPR_EXIT(&cprinfo);
	thread_exit();
}
/*
 * mac_soft_ring_worker
 *
 * The soft ring worker routine to process any queued packets. In
 * normal case, the worker thread is bound to a CPU. It the soft
 * ring is dealing with TCP packets, then the worker thread will
 * be bound to the same CPU as the TCP squeue.
 */
static void
mac_soft_ring_worker(mac_soft_ring_t *ringp)
{
	kmutex_t *lock = &ringp->s_ring_lock;
	kcondvar_t *async = &ringp->s_ring_async;
	mac_soft_ring_set_t *srs = ringp->s_ring_set;
	callb_cpr_t cprinfo;

	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "mac_soft_ring");
	mutex_enter(lock);
start:
	for (;;) {
		while (((ringp->s_ring_first == NULL ||
		    (ringp->s_ring_state & (S_RING_BLOCK|S_RING_BLANK))) &&
		    !(ringp->s_ring_state & S_RING_PAUSE)) ||
		    (ringp->s_ring_state & S_RING_PROC)) {

			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(async, lock);
			CALLB_CPR_SAFE_END(&cprinfo, lock);
		}

		/*
		 * Either we have work to do, or we have been asked to
		 * shutdown temporarily or permanently
		 */
		if (ringp->s_ring_state & S_RING_PAUSE)
			goto done;

		ringp->s_ring_drain_func(ringp);
	}
done:
	mutex_exit(lock);
	mutex_enter(&srs->srs_lock);
	mutex_enter(lock);

	ringp->s_ring_state |= S_RING_QUIESCE_DONE;
	if (!(ringp->s_ring_state & S_RING_CONDEMNED)) {
		srs->srs_soft_ring_quiesced_count++;
		cv_broadcast(&srs->srs_async);
		mutex_exit(&srs->srs_lock);
		while (!(ringp->s_ring_state &
		    (S_RING_RESTART | S_RING_CONDEMNED)))
			cv_wait(&ringp->s_ring_async, &ringp->s_ring_lock);
		mutex_exit(lock);
		mutex_enter(&srs->srs_lock);
		mutex_enter(lock);
		srs->srs_soft_ring_quiesced_count--;
		if (ringp->s_ring_state & S_RING_RESTART) {
			ASSERT(!(ringp->s_ring_state & S_RING_CONDEMNED));
			ringp->s_ring_state &= ~(S_RING_RESTART |
			    S_RING_QUIESCE | S_RING_QUIESCE_DONE);
			cv_broadcast(&srs->srs_async);
			mutex_exit(&srs->srs_lock);
			goto start;
		}
	}
	ASSERT(ringp->s_ring_state & S_RING_CONDEMNED);
	ringp->s_ring_state |= S_RING_CONDEMNED_DONE;
	CALLB_CPR_EXIT(&cprinfo);
	srs->srs_soft_ring_condemned_count++;
	cv_broadcast(&srs->srs_async);
	mutex_exit(&srs->srs_lock);
	thread_exit();
}