Example #1
0
/*
 * The page_retire_thread loops forever, looking to see if there are
 * pages still waiting to be retired.
 */
static void
page_retire_thread(void)
{
	callb_cpr_t c;

	CALLB_CPR_INIT(&c, &pr_thread_mutex, callb_generic_cpr, "page_retire");

	mutex_enter(&pr_thread_mutex);
	for (;;) {
		if (pr_enable && PR_KSTAT_PENDING) {
			/*
			 * Sigh. It's SO broken how we have to try to shake
			 * loose the holder of the page. Since we have no
			 * idea who or what has it locked, we go bang on
			 * every door in the city to try to locate it.
			 */
			kmem_reap();
			seg_preap();
			page_retire_hunt(page_retire_thread_cb);
			CALLB_CPR_SAFE_BEGIN(&c);
			(void) cv_timedwait(&pr_cv, &pr_thread_mutex,
			    lbolt + pr_thread_shortwait);
			CALLB_CPR_SAFE_END(&c, &pr_thread_mutex);
		} else {
			CALLB_CPR_SAFE_BEGIN(&c);
			(void) cv_timedwait(&pr_cv, &pr_thread_mutex,
			    lbolt + pr_thread_longwait);
			CALLB_CPR_SAFE_END(&c, &pr_thread_mutex);
		}
	}
	/*NOTREACHED*/
}
Example #2
0
/*
 * Backup thread to commit task resource usage when taskq_dispatch() fails.
 */
static void
task_commit()
{
	callb_cpr_t cprinfo;

	CALLB_CPR_INIT(&cprinfo, &task_commit_lock, callb_generic_cpr,
	    "task_commit_thread");

	mutex_enter(&task_commit_lock);

	for (;;) {
		while (task_commit_head == NULL) {
			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(&task_commit_cv, &task_commit_lock);
			CALLB_CPR_SAFE_END(&cprinfo, &task_commit_lock);
		}
		while (task_commit_head != NULL) {
			task_t *tk;

			tk = task_commit_head;
			task_commit_head = task_commit_head->tk_commit_next;
			if (task_commit_head == NULL)
				task_commit_tail = NULL;
			mutex_exit(&task_commit_lock);
			exacct_commit_task(tk);
			mutex_enter(&task_commit_lock);
		}
	}
}
Example #3
0
/*
 * This is the main balloon thread.  Wait on the cv.  When woken, if our
 * reservation has changed, call the appropriate function to adjust the
 * reservation.
 */
static void
balloon_worker_thread(void)
{
	uint_t		bln_wait;
	callb_cpr_t	cprinfo;
	spgcnt_t	rv;

	bln_wait = bln_wait_sec;

	CALLB_CPR_INIT(&cprinfo, &bln_mutex, callb_generic_cpr, "balloon");
	for (;;) {
		rv = 0;

		mutex_enter(&bln_mutex);
		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		if (bln_stats.bln_new_target != bln_stats.bln_current_pages) {
			/*
			 * We weren't able to fully complete the request
			 * last time through, so try again.
			 */
			(void) cv_reltimedwait(&bln_cv, &bln_mutex,
			    (bln_wait * hz), TR_CLOCK_TICK);
		} else {
			cv_wait(&bln_cv, &bln_mutex);
		}
		CALLB_CPR_SAFE_END(&cprinfo, &bln_mutex);

		if (bln_stats.bln_new_target != bln_stats.bln_current_pages) {
			if (bln_stats.bln_new_target <
			    bln_stats.bln_current_pages) {
				/* reservation shrunk */
				rv = -balloon_dec_reservation(
				    bln_stats.bln_current_pages -
				    bln_stats.bln_new_target);
			} else if (bln_stats.bln_new_target >
			    bln_stats.bln_current_pages) {
				/* reservation grew */
				rv = balloon_inc_reservation(
				    bln_stats.bln_new_target -
				    bln_stats.bln_current_pages);
			}
		}
		if (rv == 0) {
			if (bln_wait == 0) {
				bln_wait = 1;
			} else {
				bln_wait <<= bln_wait_shift;
			}
		} else {
			bln_stats.bln_current_pages += rv;
			bln_wait = bln_wait_sec;
		}
		if (bln_stats.bln_current_pages < bln_stats.bln_low)
			bln_stats.bln_low = bln_stats.bln_current_pages;
		else if (bln_stats.bln_current_pages > bln_stats.bln_high)
			bln_stats.bln_high = bln_stats.bln_current_pages;
		mutex_exit(&bln_mutex);
	}
}
Example #4
0
static void
txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
{
	CALLB_CPR_SAFE_BEGIN(cpr);

	if (time)
		(void) cv_timedwait(cv, &tx->tx_sync_lock, time);
	else
		cv_wait(cv, &tx->tx_sync_lock);

	CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
Example #5
0
File: txg.c Project: awesome/zfs
static void
txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
{
	CALLB_CPR_SAFE_BEGIN(cpr);

	if (time)
		(void) cv_timedwait_interruptible(cv, &tx->tx_sync_lock,
		    ddi_get_lbolt() + time);
	else
		cv_wait_interruptible(cv, &tx->tx_sync_lock);

	CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
Example #6
0
/*
 * Worker thread for processing task queue.
 */
static void
taskq_thread(void *arg)
{
    taskq_t *tq = arg;
    taskq_ent_t *tqe;
    callb_cpr_t cprinfo;
    hrtime_t start, end;

    CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr, tq->tq_name);

    mutex_enter(&tq->tq_lock);
    while (tq->tq_flags & TASKQ_ACTIVE) {
        if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
            if (--tq->tq_active == 0)
                cv_broadcast(&tq->tq_wait_cv);
            if (tq->tq_flags & TASKQ_CPR_SAFE) {
                cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
            } else {
                CALLB_CPR_SAFE_BEGIN(&cprinfo);
                cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
                CALLB_CPR_SAFE_END(&cprinfo, &tq->tq_lock);
            }
            tq->tq_active++;
            continue;
        }
        tqe->tqent_prev->tqent_next = tqe->tqent_next;
        tqe->tqent_next->tqent_prev = tqe->tqent_prev;
        mutex_exit(&tq->tq_lock);

        rw_enter(&tq->tq_threadlock, RW_READER);
        start = gethrtime();
        DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
                      taskq_ent_t *, tqe);
        tqe->tqent_func(tqe->tqent_arg);
        DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
                      taskq_ent_t *, tqe);
        end = gethrtime();
        rw_exit(&tq->tq_threadlock);

        mutex_enter(&tq->tq_lock);
        tq->tq_totaltime += end - start;
        tq->tq_executed++;

        taskq_ent_free(tq, tqe);
    }
    tq->tq_nthreads--;
    cv_broadcast(&tq->tq_wait_cv);
    ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
    CALLB_CPR_EXIT(&cprinfo);
    thread_exit();
}
Example #7
0
void
zfs_delete_thread(void *arg)
{
	zfsvfs_t	*zfsvfs = arg;
	zfs_delete_t 	*zd = &zfsvfs->z_delete_head;
	znode_t		*zp;
	callb_cpr_t	cprinfo;
	int		drained;

	CALLB_CPR_INIT(&cprinfo, &zd->z_mutex, callb_generic_cpr, "zfs_delete");

	mutex_enter(&zd->z_mutex);

	if (!zd->z_drained && !zd->z_draining) {
		zd->z_draining = B_TRUE;
		mutex_exit(&zd->z_mutex);
		drained = zfs_drain_dq(zfsvfs);
		mutex_enter(&zd->z_mutex);
		zd->z_draining = B_FALSE;
		zd->z_drained = drained;
		cv_broadcast(&zd->z_quiesce_cv);
	}

	while (zd->z_thread_count <= zd->z_thread_target) {
		zp = list_head(&zd->z_znodes);
		if (zp == NULL) {
			ASSERT(zd->z_znode_count == 0);
			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(&zd->z_cv, &zd->z_mutex);
			CALLB_CPR_SAFE_END(&cprinfo, &zd->z_mutex);
			continue;
		}
		ASSERT(zd->z_znode_count != 0);
		list_remove(&zd->z_znodes, zp);
		if (--zd->z_znode_count == 0)
			cv_broadcast(&zd->z_quiesce_cv);
		mutex_exit(&zd->z_mutex);
		zfs_rmnode(zp);
		(void) zfs_delete_thread_target(zfsvfs, -1);
		mutex_enter(&zd->z_mutex);
	}

	ASSERT(zd->z_thread_count != 0);
	if (--zd->z_thread_count == 0)
		cv_broadcast(&zd->z_cv);

	CALLB_CPR_EXIT(&cprinfo);	/* NB: drops z_mutex */
	thread_exit();
}
static void
trans_roll_wait(mt_map_t *logmap, callb_cpr_t *cprinfop)
{
	mutex_enter(&logmap->mtm_mutex);
	logmap->mtm_ref = 0;
	if (logmap->mtm_flags & MTM_FORCE_ROLL) {
		cv_broadcast(&logmap->mtm_from_roll_cv);
	}
	logmap->mtm_flags &= ~(MTM_FORCE_ROLL | MTM_ROLLING);
	CALLB_CPR_SAFE_BEGIN(cprinfop);
	(void) cv_reltimedwait(&logmap->mtm_to_roll_cv, &logmap->mtm_mutex,
	    trans_roll_tics, TR_CLOCK_TICK);
	CALLB_CPR_SAFE_END(cprinfop, &logmap->mtm_mutex);
	logmap->mtm_flags |= MTM_ROLLING;
	mutex_exit(&logmap->mtm_mutex);
}
Example #9
0
static void
nvpflush_daemon(void)
{
	callb_cpr_t cprinfo;
	clock_t clk;
	int rval;
	int i;

	ASSERT(modrootloaded);

	nvpflush_thread = curthread;
	NVPDAEMON_DEBUG((CE_CONT, "nvpdaemon: init\n"));

	CALLB_CPR_INIT(&cprinfo, &nvpflush_lock, callb_generic_cpr, "nvp");
	mutex_enter(&nvpflush_lock);
	for (;;) {

		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		while (do_nvpflush == 0) {
			clk = cv_timedwait(&nvpflush_cv, &nvpflush_lock,
			    ddi_get_lbolt() +
				(nvpdaemon_idle_time * TICKS_PER_SECOND));
			if (clk == -1 &&
			    do_nvpflush == 0 && nvpflush_timer_busy == 0) {
				/*
				 * Note that CALLB_CPR_EXIT calls mutex_exit()
				 * on the lock passed in to CALLB_CPR_INIT,
				 * so the lock must be held when invoking it.
				 */
				CALLB_CPR_SAFE_END(&cprinfo, &nvpflush_lock);
				NVPDAEMON_DEBUG((CE_CONT, "nvpdaemon: exit\n"));
				ASSERT(mutex_owned(&nvpflush_lock));
				nvpflush_thr_id = NULL;
				nvpflush_daemon_active = 0;
				CALLB_CPR_EXIT(&cprinfo);
				thread_exit();
			}
		}
		CALLB_CPR_SAFE_END(&cprinfo, &nvpflush_lock);

		nvpbusy = 1;
		do_nvpflush = 0;
		mutex_exit(&nvpflush_lock);

		/*
		 * Try flushing what's dirty, reschedule if there's
		 * a failure or data gets marked as dirty again.
		 */
		for (i = 0; i < NCACHEFDS; i++) {
			rw_enter(&cachefds[i]->nvf_lock, RW_READER);
			if (NVF_IS_DIRTY(cachefds[i])) {
				NVPDAEMON_DEBUG((CE_CONT,
				    "nvpdaemon: flush %s\n",
				    cachefds[i]->nvf_name));
				rw_exit(&cachefds[i]->nvf_lock);
				rval = nvpflush_one(cachefds[i]);
				rw_enter(&cachefds[i]->nvf_lock, RW_READER);
				if (rval != DDI_SUCCESS ||
				    NVF_IS_DIRTY(cachefds[i])) {
					rw_exit(&cachefds[i]->nvf_lock);
					NVPDAEMON_DEBUG((CE_CONT,
					    "nvpdaemon: %s dirty again\n",
					    cachefds[i]->nvf_name));
					wake_nvpflush_daemon();
				} else {
					rw_exit(&cachefds[i]->nvf_lock);
					nvf_write_complete(cachefds[i]);
				}
			} else {
				NVPDAEMON_DEBUG((CE_CONT,
				    "nvpdaemon: not dirty %s\n",
				    cachefds[i]->nvf_name));
				rw_exit(&cachefds[i]->nvf_lock);
			}
		}

		mutex_enter(&nvpflush_lock);
		nvpbusy = 0;
	}
}
Example #10
0
File: mmp.c Project: LLNL/zfs
static void
mmp_thread(void *arg)
{
	spa_t *spa = (spa_t *)arg;
	mmp_thread_t *mmp = &spa->spa_mmp;
	boolean_t last_spa_suspended = spa_suspended(spa);
	boolean_t last_spa_multihost = spa_multihost(spa);
	callb_cpr_t cpr;
	hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
	    MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));

	mmp_thread_enter(mmp, &cpr);

	/*
	 * The mmp_write_done() function calculates mmp_delay based on the
	 * prior value of mmp_delay and the elapsed time since the last write.
	 * For the first mmp write, there is no "last write", so we start
	 * with fake, but reasonable, default non-zero values.
	 */
	mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
	    MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1);
	mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;

	while (!mmp->mmp_thread_exiting) {
		uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
		uint64_t mmp_interval = MSEC2NSEC(
		    MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
		boolean_t suspended = spa_suspended(spa);
		boolean_t multihost = spa_multihost(spa);
		hrtime_t start, next_time;

		start = gethrtime();
		if (multihost) {
			next_time = start + mmp_interval /
			    MAX(vdev_count_leaves(spa), 1);
		} else {
			next_time = start + MSEC2NSEC(MMP_DEFAULT_INTERVAL);
		}

		/*
		 * When MMP goes off => on, or spa goes suspended =>
		 * !suspended, we know no writes occurred recently.  We
		 * update mmp_last_write to give us some time to try.
		 */
		if ((!last_spa_multihost && multihost) ||
		    (last_spa_suspended && !suspended)) {
			mutex_enter(&mmp->mmp_io_lock);
			mmp->mmp_last_write = gethrtime();
			mutex_exit(&mmp->mmp_io_lock);
		} else if (last_spa_multihost && !multihost) {
			mutex_enter(&mmp->mmp_io_lock);
			mmp->mmp_delay = 0;
			mutex_exit(&mmp->mmp_io_lock);
		}
		last_spa_multihost = multihost;
		last_spa_suspended = suspended;

		/*
		 * Smooth max_fail_ns when its factors are decreased, because
		 * making (max_fail_ns < mmp_interval) results in the pool being
		 * immediately suspended before writes can occur at the new
		 * higher frequency.
		 */
		if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
			max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
			    mmp_fail_intervals)) / 32;
		} else {
			max_fail_ns = mmp_interval * mmp_fail_intervals;
		}

		/*
		 * Suspend the pool if no MMP write has succeeded in over
		 * mmp_interval * mmp_fail_intervals nanoseconds.
		 */
		if (!suspended && mmp_fail_intervals && multihost &&
		    (start - mmp->mmp_last_write) > max_fail_ns) {
			zio_suspend(spa, NULL);
		}

		if (multihost)
			mmp_write_uberblock(spa);

		CALLB_CPR_SAFE_BEGIN(&cpr);
		(void) cv_timedwait_sig(&mmp->mmp_thread_cv,
		    &mmp->mmp_thread_lock, ddi_get_lbolt() +
		    ((next_time - gethrtime()) / (NANOSEC / hz)));
		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
	}

	/* Outstanding writes are allowed to complete. */
	if (mmp->mmp_zio_root)
		zio_wait(mmp->mmp_zio_root);

	mmp->mmp_zio_root = NULL;
	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
}
Example #11
0
/*
 * log_event_deliver - event delivery thread
 *			Deliver all events on the event queue to syseventd.
 *			If the daemon can not process events, stop event
 *			delivery and wait for an indication from the
 *			daemon to resume delivery.
 *
 *			Once all event buffers have been delivered, wait
 *			until there are more to deliver.
 */
static void
log_event_deliver()
{
	log_eventq_t *q;
	int upcall_err;
	callb_cpr_t cprinfo;

	CALLB_CPR_INIT(&cprinfo, &eventq_head_mutex, callb_generic_cpr,
	    "logevent");

	/*
	 * eventq_head_mutex is exited (released) when there are no more
	 * events to process from the eventq in cv_wait().
	 */
	mutex_enter(&eventq_head_mutex);

	for (;;) {
		LOG_DEBUG1((CE_CONT, "log_event_deliver: head = %p\n",
		    (void *)log_eventq_head));

		upcall_err = 0;
		q = log_eventq_head;

		while (q) {
			log_eventq_t *next;

			/*
			 * Release event queue lock during upcall to
			 * syseventd
			 */
			if (log_event_delivery == LOGEVENT_DELIVERY_HOLD) {
				upcall_err = EAGAIN;
				break;
			}

			mutex_exit(&eventq_head_mutex);
			if ((upcall_err = log_event_upcall(&q->arg)) != 0) {
				mutex_enter(&eventq_head_mutex);
				break;
			}

			/*
			 * We may be able to add entries to
			 * the queue now.
			 */
			if (event_qfull_blocked > 0 &&
			    log_eventq_cnt < logevent_max_q_sz) {
				mutex_enter(&event_qfull_mutex);
				if (event_qfull_blocked > 0) {
					cv_signal(&event_qfull_cv);
				}
				mutex_exit(&event_qfull_mutex);
			}

			mutex_enter(&eventq_head_mutex);

			/*
			 * Daemon restart can cause entries to be moved from
			 * the sent queue and put back on the event queue.
			 * If this has occurred, replay event queue
			 * processing from the new queue head.
			 */
			if (q != log_eventq_head) {
				q = log_eventq_head;
				LOG_DEBUG((CE_CONT, "log_event_deliver: "
				    "door upcall/daemon restart race\n"));
			} else {
				/*
				 * Move the event to the sent queue when a
				 * successful delivery has been made.
				 */
				mutex_enter(&eventq_sent_mutex);
				next = q->next;
				q->next = log_eventq_sent;
				log_eventq_sent = q;
				q = next;
				log_eventq_head = q;
				log_eventq_cnt--;
				if (q == NULL) {
					ASSERT(log_eventq_cnt == 0);
					log_eventq_tail = NULL;
				}
				mutex_exit(&eventq_sent_mutex);
			}
		}

		switch (upcall_err) {
		case 0:
			/*
			 * Success. The queue is empty.
			 */
			sysevent_upcall_status = 0;
			break;
		case EAGAIN:
			/*
			 * Delivery is on hold (but functional).
			 */
			sysevent_upcall_status = 0;
			/*
			 * If the user has already signaled for delivery
			 * resumption, continue.  Otherwise, we wait until
			 * we are signaled to continue.
			 */
			if (log_event_delivery == LOGEVENT_DELIVERY_CONT) {
				log_event_delivery = LOGEVENT_DELIVERY_OK;
				continue;
			} else {
				log_event_delivery = LOGEVENT_DELIVERY_HOLD;
			}

			LOG_DEBUG1((CE_CONT, "log_event_deliver: EAGAIN\n"));
			break;
		default:
			LOG_DEBUG((CE_CONT, "log_event_deliver: "
			    "upcall err %d\n", upcall_err));
			sysevent_upcall_status = upcall_err;
			/*
			 * Signal everyone waiting that transport is down
			 */
			if (event_qfull_blocked > 0) {
				mutex_enter(&event_qfull_mutex);
				if (event_qfull_blocked > 0) {
					cv_broadcast(&event_qfull_cv);
				}
				mutex_exit(&event_qfull_mutex);
			}
			break;
		}

		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		cv_wait(&log_event_cv, &eventq_head_mutex);
		CALLB_CPR_SAFE_END(&cprinfo, &eventq_head_mutex);
	}
	/* NOTREACHED */
}
Example #12
0
/*
 * As part of file system hardening, this daemon is awakened
 * every second to flush cached data which includes the
 * buffer cache, the inode cache and mapped pages.
 */
void
fsflush()
{
	struct buf *bp, *dwp;
	struct hbuf *hp;
	int autoup;
	unsigned int ix, icount, count = 0;
	callb_cpr_t cprinfo;
	uint_t		bcount;
	kmutex_t	*hmp;
	struct vfssw *vswp;

	proc_fsflush = ttoproc(curthread);
	proc_fsflush->p_cstime = 0;
	proc_fsflush->p_stime =  0;
	proc_fsflush->p_cutime =  0;
	proc_fsflush->p_utime = 0;
	bcopy("fsflush", curproc->p_user.u_psargs, 8);
	bcopy("fsflush", curproc->p_user.u_comm, 7);

	mutex_init(&fsflush_lock, NULL, MUTEX_DEFAULT, NULL);
	sema_init(&fsflush_sema, 0, NULL, SEMA_DEFAULT, NULL);

	/*
	 * Setup page coalescing.
	 */
	fsf_npgsz = page_num_pagesizes();
	ASSERT(fsf_npgsz < MAX_PAGESIZES);
	for (ix = 0; ix < fsf_npgsz - 1; ++ix) {
		fsf_pgcnt[ix] =
		    page_get_pagesize(ix + 1) / page_get_pagesize(ix);
		fsf_mask[ix] = page_get_pagecnt(ix + 1) - 1;
	}

	autoup = v.v_autoup * hz;
	icount = v.v_autoup / tune.t_fsflushr;
	CALLB_CPR_INIT(&cprinfo, &fsflush_lock, callb_generic_cpr, "fsflush");
loop:
	sema_v(&fsflush_sema);
	mutex_enter(&fsflush_lock);
	CALLB_CPR_SAFE_BEGIN(&cprinfo);
	cv_wait(&fsflush_cv, &fsflush_lock);		/* wait for clock */
	CALLB_CPR_SAFE_END(&cprinfo, &fsflush_lock);
	mutex_exit(&fsflush_lock);
	sema_p(&fsflush_sema);

	/*
	 * Write back all old B_DELWRI buffers on the freelist.
	 */
	bcount = 0;
	for (ix = 0; ix < v.v_hbuf; ix++) {

		hp = &hbuf[ix];
		dwp = (struct buf *)&dwbuf[ix];

		bcount += (hp->b_length);

		if (dwp->av_forw == dwp) {
			continue;
		}

		hmp = &hbuf[ix].b_lock;
		mutex_enter(hmp);
		bp = dwp->av_forw;

		/*
		 * Go down only on the delayed write lists.
		 */
		while (bp != dwp) {

			ASSERT(bp->b_flags & B_DELWRI);

			if ((bp->b_flags & B_DELWRI) &&
			    (ddi_get_lbolt() - bp->b_start >= autoup) &&
			    sema_tryp(&bp->b_sem)) {
				bp->b_flags |= B_ASYNC;
				hp->b_length--;
				notavail(bp);
				mutex_exit(hmp);
				if (bp->b_vp == NULL) {
					BWRITE(bp);
				} else {
					UFS_BWRITE(VTOI(bp->b_vp)->i_ufsvfs,
					    bp);
				}
				mutex_enter(hmp);
				bp = dwp->av_forw;
			} else {
				bp = bp->av_forw;
			}
		}
		mutex_exit(hmp);
	}

	/*
	 *
	 * There is no need to wakeup any thread waiting on bio_mem_cv
	 * since brelse will wake them up as soon as IO is complete.
	 */
	bfreelist.b_bcount = bcount;

	if (dopageflush)
		fsflush_do_pages();

	if (!doiflush)
		goto loop;

	/*
	 * If the system was not booted to single user mode, skip the
	 * inode flushing until after fsflush_iflush_delay secs have elapsed.
	 */
	if ((boothowto & RB_SINGLE) == 0 &&
	    (ddi_get_lbolt64() / hz) < fsflush_iflush_delay)
		goto loop;

	/*
	 * Flush cached attribute information (e.g. inodes).
	 */
	if (++count >= icount) {
		count = 0;

		/*
		 * Sync back cached data.
		 */
		RLOCK_VFSSW();
		for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) {
			if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) {
				vfs_refvfssw(vswp);
				RUNLOCK_VFSSW();
				(void) fsop_sync_by_kind(vswp - vfssw,
				    SYNC_ATTR, kcred);
				vfs_unrefvfssw(vswp);
				RLOCK_VFSSW();
			}
		}
		RUNLOCK_VFSSW();
	}
	goto loop;
}
Example #13
0
/*ARGSUSED*/
static void
kctl_wr_thread(void *arg)
{
	callb_cpr_t cprinfo;
	kmutex_t cprlock;

	mutex_init(&cprlock, NULL, MUTEX_DEFAULT, NULL);
	CALLB_CPR_INIT(&cprinfo, &cprlock, callb_generic_cpr, "kmdb work");

	for (;;) {
		/*
		 * XXX what should I do here for panic?  It'll spin unless I
		 * can figure out a way to park it.  Presumably I don't want to
		 * let it exit.
		 */
		mutex_enter(&cprlock);
		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		mutex_exit(&cprlock);

		sema_p(&kctl.kctl_wr_avail_sem);

		mutex_enter(&cprlock);
		CALLB_CPR_SAFE_END(&cprinfo, &cprlock);
		mutex_exit(&cprlock);

		kctl_dprintf("kctl worker thread - waking up");

		if (kmdb_kdi_get_unload_request() ||
		    kctl.kctl_wr_state != KCTL_WR_ST_RUN) {
			/*
			 * We've either got a debugger-initiated unload (if
			 * unload_request returned true), or we're stopping due
			 * to an error discovered by the driver (if
			 * kctl_worker_run is no longer non-zero).  Start
			 * cleaning up.
			 */

			/*
			 * The debugger has already deactivated itself, and will
			 * have dumped a bunch of stuff on the queue.  We need
			 * to process it before exiting.
			 */
			(void) kmdb_wr_driver_process(kctl_wr_process_cb,
			    KCTL_WR_PROCESS_UNLOADING);
			break;
		}

		/*
		 * A non-zero return means we've passed messages back to the
		 * debugger for processing, so we need to wake the debugger up.
		 */
		if (kctl_wr_process() > 0)
			kdi_dvec_enter();
	}

	/*
	 * NULL out the dmod search path, so we can send the current one back
	 * to the debugger.  XXX this should probably be somewhere else.
	 */
	kctl_dmod_path_reset();

	/*
	 * The debugger will send us unload notifications for each dmod that it
	 * noticed.  If, for example, the debugger is unloaded before the first
	 * start, it won't have noticed any of the dmods we loaded.  We'll need
	 * to initiate the unloads ourselves.
	 */
	kctl_dmod_unload_all();

	kctl.kctl_wr_state = KCTL_WR_ST_STOPPED;

	/*
	 * Must be last, as it concludes by setting state to INACTIVE.  The
	 * kctl data structure must not be accessed by this thread after that
	 * point.
	 */
	kctl_cleanup();

	mutex_enter(&cprlock);
	CALLB_CPR_EXIT(&cprinfo);
	mutex_destroy(&cprlock);
}
Example #14
0
/*
 * Main routine for the callbacks notifications thread
 */
static void
i_mac_notify_thread(void *arg)
{
	mac_impl_t	*mip = arg;
	callb_cpr_t	cprinfo;
	mac_cb_t	*mcb;
	mac_cb_info_t	*mcbi;
	mac_notify_cb_t	*mncb;

	mcbi = &mip->mi_notify_cb_info;
	CALLB_CPR_INIT(&cprinfo, mcbi->mcbi_lockp, callb_generic_cpr,
	    "i_mac_notify_thread");

	mutex_enter(mcbi->mcbi_lockp);

	for (;;) {
		uint32_t	bits;
		uint32_t	type;

		bits = mip->mi_notify_bits;
		if (bits == 0) {
			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(&mcbi->mcbi_cv, mcbi->mcbi_lockp);
			CALLB_CPR_SAFE_END(&cprinfo, mcbi->mcbi_lockp);
			continue;
		}
		mip->mi_notify_bits = 0;
		if ((bits & (1 << MAC_NNOTE)) != 0) {
			/* request to quit */
			ASSERT(mip->mi_state_flags & MIS_DISABLED);
			break;
		}

		mutex_exit(mcbi->mcbi_lockp);

		/*
		 * Log link changes on the actual link, but then do reports on
		 * synthetic state (if part of a bridge).
		 */
		if ((bits & (1 << MAC_NOTE_LOWLINK)) != 0) {
			link_state_t newstate;
			mac_handle_t mh;

			i_mac_log_link_state(mip);
			newstate = mip->mi_lowlinkstate;
			if (mip->mi_bridge_link != NULL) {
				mutex_enter(&mip->mi_bridge_lock);
				if ((mh = mip->mi_bridge_link) != NULL) {
					newstate = mac_bridge_ls_cb(mh,
					    newstate);
				}
				mutex_exit(&mip->mi_bridge_lock);
			}
			if (newstate != mip->mi_linkstate) {
				mip->mi_linkstate = newstate;
				bits |= 1 << MAC_NOTE_LINK;
			}
		}

		/*
		 * Do notification callbacks for each notification type.
		 */
		for (type = 0; type < MAC_NNOTE; type++) {
			if ((bits & (1 << type)) == 0) {
				continue;
			}

			if (mac_notify_cb_list[type] != NULL)
				(*mac_notify_cb_list[type])(mip);

			/*
			 * Walk the list of notifications.
			 */
			MAC_CALLBACK_WALKER_INC(&mip->mi_notify_cb_info);
			for (mcb = mip->mi_notify_cb_list; mcb != NULL;
			    mcb = mcb->mcb_nextp) {
				mncb = (mac_notify_cb_t *)mcb->mcb_objp;
				mncb->mncb_fn(mncb->mncb_arg, type);
			}
			MAC_CALLBACK_WALKER_DCR(&mip->mi_notify_cb_info,
			    &mip->mi_notify_cb_list);
		}

		mutex_enter(mcbi->mcbi_lockp);
	}

	mip->mi_state_flags |= MIS_NOTIFY_DONE;
	cv_broadcast(&mcbi->mcbi_cv);

	/* CALLB_CPR_EXIT drops the lock */
	CALLB_CPR_EXIT(&cprinfo);
	thread_exit();
}
/*
 * mac_soft_ring_worker
 *
 * The soft ring worker routine to process any queued packets. In
 * normal case, the worker thread is bound to a CPU. It the soft
 * ring is dealing with TCP packets, then the worker thread will
 * be bound to the same CPU as the TCP squeue.
 */
static void
mac_soft_ring_worker(mac_soft_ring_t *ringp)
{
	kmutex_t *lock = &ringp->s_ring_lock;
	kcondvar_t *async = &ringp->s_ring_async;
	mac_soft_ring_set_t *srs = ringp->s_ring_set;
	callb_cpr_t cprinfo;

	CALLB_CPR_INIT(&cprinfo, lock, callb_generic_cpr, "mac_soft_ring");
	mutex_enter(lock);
start:
	for (;;) {
		while (((ringp->s_ring_first == NULL ||
		    (ringp->s_ring_state & (S_RING_BLOCK|S_RING_BLANK))) &&
		    !(ringp->s_ring_state & S_RING_PAUSE)) ||
		    (ringp->s_ring_state & S_RING_PROC)) {

			CALLB_CPR_SAFE_BEGIN(&cprinfo);
			cv_wait(async, lock);
			CALLB_CPR_SAFE_END(&cprinfo, lock);
		}

		/*
		 * Either we have work to do, or we have been asked to
		 * shutdown temporarily or permanently
		 */
		if (ringp->s_ring_state & S_RING_PAUSE)
			goto done;

		ringp->s_ring_drain_func(ringp);
	}
done:
	mutex_exit(lock);
	mutex_enter(&srs->srs_lock);
	mutex_enter(lock);

	ringp->s_ring_state |= S_RING_QUIESCE_DONE;
	if (!(ringp->s_ring_state & S_RING_CONDEMNED)) {
		srs->srs_soft_ring_quiesced_count++;
		cv_broadcast(&srs->srs_async);
		mutex_exit(&srs->srs_lock);
		while (!(ringp->s_ring_state &
		    (S_RING_RESTART | S_RING_CONDEMNED)))
			cv_wait(&ringp->s_ring_async, &ringp->s_ring_lock);
		mutex_exit(lock);
		mutex_enter(&srs->srs_lock);
		mutex_enter(lock);
		srs->srs_soft_ring_quiesced_count--;
		if (ringp->s_ring_state & S_RING_RESTART) {
			ASSERT(!(ringp->s_ring_state & S_RING_CONDEMNED));
			ringp->s_ring_state &= ~(S_RING_RESTART |
			    S_RING_QUIESCE | S_RING_QUIESCE_DONE);
			cv_broadcast(&srs->srs_async);
			mutex_exit(&srs->srs_lock);
			goto start;
		}
	}
	ASSERT(ringp->s_ring_state & S_RING_CONDEMNED);
	ringp->s_ring_state |= S_RING_CONDEMNED_DONE;
	CALLB_CPR_EXIT(&cprinfo);
	srs->srs_soft_ring_condemned_count++;
	cv_broadcast(&srs->srs_async);
	mutex_exit(&srs->srs_lock);
	thread_exit();
}