Esempio n. 1
0
int
pmclog_close(struct pmc_owner *po)
{

	PMCDBG1(LOG,CLO,1, "po=%p", po);

	mtx_lock(&pmc_kthread_mtx);

	/*
	 * Schedule the current buffer.
	 */
	mtx_lock_spin(&po->po_mtx);
	if (po->po_curbuf)
		pmclog_schedule_io(po);
	else
		wakeup_one(po);
	mtx_unlock_spin(&po->po_mtx);

	/*
	 * Initiate shutdown: no new data queued,
	 * thread will close file on last block.
	 */
	po->po_flags |= PMC_PO_SHUTDOWN;

	mtx_unlock(&pmc_kthread_mtx);

	return (0);
}
Esempio n. 2
0
u_int32_t
ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg)
{
	bus_space_tag_t tag = BUSTAG(ah);
	bus_space_handle_t h = ah->ah_sh;
	u_int32_t val;

	if (ah->ah_config.ah_serialise_reg_war)
		mtx_lock_spin(&ah_regser_mtx);
	val = bus_space_read_4(tag, h, reg);
	if (ah->ah_config.ah_serialise_reg_war)
		mtx_unlock_spin(&ah_regser_mtx);
	if (ath_hal_alq) {
		struct ale *ale = ath_hal_alq_get(ah);
		if (ale) {
			struct athregrec *r = (struct athregrec *) ale->ae_data;
			r->threadid = curthread->td_tid;
			r->op = OP_READ;
			r->reg = reg;
			r->val = val;
			alq_post(ath_hal_alq, ale);
		}
	}
	return val;
}
Esempio n. 3
0
static int
pmclog_get_buffer(struct pmc_owner *po)
{
	struct pmclog_buffer *plb;

	mtx_assert(&po->po_mtx, MA_OWNED);

	KASSERT(po->po_curbuf == NULL,
	    ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po));

	mtx_lock_spin(&pmc_bufferlist_mtx);
	if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL)
		TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next);
	mtx_unlock_spin(&pmc_bufferlist_mtx);

	PMCDBG2(LOG,GTB,1, "po=%p plb=%p", po, plb);

#ifdef	HWPMC_DEBUG
	if (plb)
		KASSERT(plb->plb_ptr == plb->plb_base &&
		    plb->plb_base < plb->plb_fence,
		    ("[pmclog,%d] po=%p buffer invariants: ptr=%p "
		    "base=%p fence=%p", __LINE__, po, plb->plb_ptr,
		    plb->plb_base, plb->plb_fence));
#endif

	po->po_curbuf = plb;

	/* update stats */
	atomic_add_int(&pmc_stats.pm_buffer_requests, 1);
	if (plb == NULL)
		atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1);

	return (plb ? 0 : ENOMEM);
}
Esempio n. 4
0
/*
 * Everything done, now reset
 */
static void
shutdown_reset(void *junk, int howto)
{

	printf("Rebooting...\n");
	DELAY(1000000);	/* wait 1 sec for printf's to complete and be read */

	/*
	 * Acquiring smp_ipi_mtx here has a double effect:
	 * - it disables interrupts avoiding CPU0 preemption
	 *   by fast handlers (thus deadlocking  against other CPUs)
	 * - it avoids deadlocks against smp_rendezvous() or, more 
	 *   generally, threads busy-waiting, with this spinlock held,
	 *   and waiting for responses by threads on other CPUs
	 *   (ie. smp_tlb_shootdown()).
	 *
	 * For the !SMP case it just needs to handle the former problem.
	 */
#ifdef SMP
	mtx_lock_spin(&smp_ipi_mtx);
#else
	spinlock_enter();
#endif

	/* cpu_boot(howto); */ /* doesn't do anything at the moment */
	cpu_reset();
	/* NOTREACHED */ /* assuming reset worked */
}
Esempio n. 5
0
void
mod_timer(struct timer_list *t, unsigned long expires)
{
	mtx_lock_spin(&t->mtx);
	callout_reset(&t->callout, expires - jiffies, run_timer, t);
	mtx_unlock_spin(&t->mtx);
}
Esempio n. 6
0
static void
_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
{

	if (td->td_owepreempt) {
		td->td_critnest++;
		critical_exit();
	}

	if (!tracker->rmp_flags)
		return;

	mtx_lock_spin(&rm_spinlock);
	LIST_REMOVE(tracker, rmp_qentry);

	if (tracker->rmp_flags & RMPF_SIGNAL) {
		struct rmlock *rm;
		struct turnstile *ts;

		rm = tracker->rmp_rmlock;

		turnstile_chain_lock(&rm->lock_object);
		mtx_unlock_spin(&rm_spinlock);

		ts = turnstile_lookup(&rm->lock_object);

		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
		turnstile_chain_unlock(&rm->lock_object);
	} else
		mtx_unlock_spin(&rm_spinlock);
}
Esempio n. 7
0
static int
quicc_bfe_ipend(struct scc_softc *sc)
{
	struct scc_bas *bas;
	struct scc_chan *ch;
	int c, ipend;
	uint16_t scce;

	bas = &sc->sc_bas;
	ipend = 0;
	for (c = 0; c < 4; c++) {
		ch = &sc->sc_chan[c];
		if (!ch->ch_enabled)
			continue;
		ch->ch_ipend = 0;
		mtx_lock_spin(&sc->sc_hwmtx);
		scce = quicc_read2(bas, QUICC_REG_SCC_SCCE(c));
		quicc_write2(bas, QUICC_REG_SCC_SCCE(c), ~0);
		mtx_unlock_spin(&sc->sc_hwmtx);
		if (scce & 0x0001)
			ch->ch_ipend |= SER_INT_RXREADY;
		if (scce & 0x0002)
			ch->ch_ipend |= SER_INT_TXIDLE;
		if (scce & 0x0004)
			ch->ch_ipend |= SER_INT_OVERRUN;
		if (scce & 0x0020)
			ch->ch_ipend |= SER_INT_BREAK;
		/* XXX SIGNALS */
		ipend |= ch->ch_ipend;
	}
	return (ipend);
}
Esempio n. 8
0
/*
 * Write scattered channel packet to TX bufring.
 *
 * The offset of this channel packet is written as a 64bits value
 * immediately after this channel packet.
 */
int
vmbus_txbr_write(struct vmbus_txbr *tbr, const struct iovec iov[], int iovlen,
    boolean_t *need_sig)
{
	uint32_t old_windex, windex, total;
	uint64_t save_windex;
	int i;

	total = 0;
	for (i = 0; i < iovlen; i++)
		total += iov[i].iov_len;
	total += sizeof(save_windex);

	mtx_lock_spin(&tbr->txbr_lock);

	/*
	 * NOTE:
	 * If this write is going to make br_windex same as br_rindex,
	 * i.e. the available space for write is same as the write size,
	 * we can't do it then, since br_windex == br_rindex means that
	 * the bufring is empty.
	 */
	if (vmbus_txbr_avail(tbr) <= total) {
		mtx_unlock_spin(&tbr->txbr_lock);
		return (EAGAIN);
	}

	/* Save br_windex for later use */
	old_windex = tbr->txbr_windex;

	/*
	 * Copy the scattered channel packet to the TX bufring.
	 */
	windex = old_windex;
	for (i = 0; i < iovlen; i++) {
		windex = vmbus_txbr_copyto(tbr, windex,
		    iov[i].iov_base, iov[i].iov_len);
	}

	/*
	 * Set the offset of the current channel packet.
	 */
	save_windex = ((uint64_t)old_windex) << 32;
	windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
	    sizeof(save_windex));

	/*
	 * Update the write index _after_ the channel packet
	 * is copied.
	 */
	__compiler_membar();
	tbr->txbr_windex = windex;

	mtx_unlock_spin(&tbr->txbr_lock);

	*need_sig = vmbus_txbr_need_signal(tbr, old_windex);

	return (0);
}
Esempio n. 9
0
static void
cyclic_add_xcall(cyc_xcallarg_t *arg)
{
	cyc_cpu_t *cpu = arg->cyx_cpu;
	cyc_handler_t *hdlr = arg->cyx_hdlr;
	cyc_time_t *when = arg->cyx_when;
	cyc_backend_t *be = cpu->cyp_backend;
	cyc_index_t ndx, nelems;
	cyb_arg_t bar = be->cyb_arg;
	cyclic_t *cyclic;

	ASSERT(cpu->cyp_nelems < cpu->cyp_size);

	/* Disable preemption and interrupts. */
	mtx_lock_spin(&cpu->cyp_mtx);
	nelems = cpu->cyp_nelems++;

	if (nelems == 0) {
		/*
		 * If this is the first element, we need to enable the
		 * backend on this CPU.
		 */
		be->cyb_enable(bar);
	}

	ndx = cpu->cyp_heap[nelems];
	cyclic = &cpu->cyp_cyclics[ndx];

	ASSERT(cyclic->cy_flags == CYF_FREE);
	cyclic->cy_interval = when->cyt_interval;

	if (when->cyt_when == 0) {
		/*
		 * If a start time hasn't been explicitly specified, we'll
		 * start on the next interval boundary.
		 */
		cyclic->cy_expire = (cyc_gethrtime() / cyclic->cy_interval + 1) *
		    cyclic->cy_interval;
	} else {
		cyclic->cy_expire = when->cyt_when;
	}

	cyclic->cy_handler = hdlr->cyh_func;
	cyclic->cy_arg = hdlr->cyh_arg;
	cyclic->cy_flags = arg->cyx_flags;

	if (cyclic_upheap(cpu, nelems)) {
		hrtime_t exp = cyclic->cy_expire;

		/*
		 * If our upheap propagated to the root, we need to
		 * reprogram the interrupt source.
		 */
		be->cyb_reprogram(bar, exp);
	}
	mtx_unlock_spin(&cpu->cyp_mtx);

	arg->cyx_ndx = ndx;
}
Esempio n. 10
0
void
writertc(int reg, u_char val)
{

	mtx_lock_spin(&atrtc_lock);
	rtcout_locked(reg, val);
	mtx_unlock_spin(&atrtc_lock);
}
Esempio n. 11
0
int
pmclog_deconfigure_log(struct pmc_owner *po)
{
	int error;
	struct pmclog_buffer *lb;

	PMCDBG(LOG,CFG,1, "de-config po=%p", po);

	if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
		return (EINVAL);

	KASSERT(po->po_sscount == 0,
	    ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po));
	KASSERT(po->po_file != NULL,
	    ("[pmclog,%d] po=%p no log file", __LINE__, po));

	/* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */
	pmclog_stop_kthread(po);

	KASSERT(po->po_kthread == NULL,
	    ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po));

	/* return all queued log buffers to the global pool */
	while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) {
		TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next);
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/* return the 'current' buffer to the global pool */
	if ((lb = po->po_curbuf) != NULL) {
		PMCLOG_INIT_BUFFER_DESCRIPTOR(lb);
		mtx_lock_spin(&pmc_bufferlist_mtx);
		TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next);
		mtx_unlock_spin(&pmc_bufferlist_mtx);
	}

	/* drop a reference to the fd */
	error = fdrop(po->po_file, curthread);
	po->po_file  = NULL;
	po->po_error = 0;

	return (error);
}
Esempio n. 12
0
/**
 * ntfs_usnjrnl_stamp - stamp the transaction log ($UsnJrnl) on an ntfs volume
 * @vol:	ntfs volume on which to stamp the transaction log
 *
 * Stamp the transaction log ($UsnJrnl) on the ntfs volume @vol and return 0
 * on success and errno on error.
 *
 * This function assumes that the transaction log has already been loaded and
 * consistency checked by a call to ntfs_vfsops.c::ntfs_usnjrnl_load().
 */
errno_t ntfs_usnjrnl_stamp(ntfs_volume *vol)
{
	ntfs_debug("Entering.");
	if (!NVolUsnJrnlStamped(vol)) {
		sle64 j_size, stamp;
		upl_t upl;
		upl_page_info_array_t pl;
		USN_HEADER *uh;
		ntfs_inode *max_ni;
		errno_t err;

		mtx_lock_spin(&vol->usnjrnl_j_ni->size_lock);
		j_size = vol->usnjrnl_j_ni->data_size;
		mtx_unlock_spin(&vol->usnjrnl_j_ni->size_lock);
		max_ni = vol->usnjrnl_max_ni;
		/*
		 * FIXME: Next If statement always false because of
		 * replacing vnode_get() with vhold()
		 */
		vhold(max_ni->vn);
		if (0) {
			ntfs_error(vol->mp, "Failed to get vnode for "
					"$UsnJrnl/$DATA/$Max.");
			return err;
		}
		sx_slock(&max_ni->lock);
		err = ntfs_page_map(max_ni, 0, &upl, &pl, (u8**)&uh, TRUE);
		if (err) {
			ntfs_error(vol->mp, "Failed to read from "
					"$UsnJrnl/$DATA/$Max attribute.");
			vdrop(max_ni->vn);
			return err;
		}
		stamp = ntfs_current_time();
		ntfs_debug("Stamping transaction log ($UsnJrnl): old "
				"journal_id 0x%llx, old lowest_valid_usn "
				"0x%llx, new journal_id 0x%llx, new "
				"lowest_valid_usn 0x%llx.",
				(unsigned long long)
				sle64_to_cpu(uh->journal_id),
				(unsigned long long)
				sle64_to_cpu(uh->lowest_valid_usn),
				(unsigned long long)sle64_to_cpu(stamp),
				(unsigned long long)j_size);
		uh->lowest_valid_usn = cpu_to_sle64(j_size);
		uh->journal_id = stamp;
		ntfs_page_unmap(max_ni, upl, pl, TRUE);
		sx_sunlock(&max_ni->lock);
		vdrop(max_ni->vn);
		/* Set the flag so we do not have to do it again on remount. */
		NVolSetUsnJrnlStamped(vol);
		// TODO: Should we mark any times on the base inode $UsnJrnl
		// for update here?
	}
	ntfs_debug("Done.");
	return 0;
}
Esempio n. 13
0
static void
ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
    struct ntb_queue_list *list)
{

	mtx_lock_spin(lock);
	STAILQ_INSERT_TAIL(list, entry, entry);
	mtx_unlock_spin(lock);
}
Esempio n. 14
0
/*
 * Append a character to a message buffer.
 */
void
msgbuf_addchar(struct msgbuf *mbp, int c)
{
    mtx_lock_spin(&mbp->msg_lock);

    msgbuf_do_addchar(mbp, &mbp->msg_wseq, c);

    mtx_unlock_spin(&mbp->msg_lock);
}
Esempio n. 15
0
static void
atrtc_start(void)
{

	mtx_lock_spin(&atrtc_lock);
	rtcout_locked(RTC_STATUSA, rtc_statusa);
	rtcout_locked(RTC_STATUSB, RTCSB_24HR);
	mtx_unlock_spin(&atrtc_lock);
}
Esempio n. 16
0
int
del_timer_sync(struct timer_list *t)
{
	mtx_lock_spin(&t->mtx);
	callout_stop(&t->callout);
	mtx_unlock_spin(&t->mtx);

	mtx_destroy(&t->mtx);
	return 0;
}
Esempio n. 17
0
static void
atrtc_disable_intr(void)
{

	rtc_statusb &= ~RTCSB_PINTR;
	mtx_lock_spin(&atrtc_lock);
	rtcout_locked(RTC_STATUSB, rtc_statusb);
	rtcin_locked(RTC_INTR);
	mtx_unlock_spin(&atrtc_lock);
}
Esempio n. 18
0
void
timer_spkr_setfreq(int freq)
{

	freq = i8254_freq / freq;
	mtx_lock_spin(&clock_lock);
	outb(TIMER_CNTR1, (freq) & 0xff);
	outb(TIMER_CNTR1, (freq) >> 8);
	mtx_unlock_spin(&clock_lock);
}
Esempio n. 19
0
static void
isa_enable_intr(int vector)
{
	int irq;

	irq = (vector - 0x800) >> 4;
	mtx_lock_spin(&icu_lock);
	isa_intr_enable(irq);
	mtx_unlock_spin(&icu_lock);
}
Esempio n. 20
0
int
rtcin(int reg)
{
	u_char val;

	mtx_lock_spin(&atrtc_lock);
	val = rtcin_locked(reg);
	mtx_unlock_spin(&atrtc_lock);
	return (val);
}
Esempio n. 21
0
static __inline void
a10_intr_eoi(struct a10_aintc_softc *sc, u_int irq)
{

	if (irq != SW_INT_IRQNO_ENMI)
		return;
	mtx_lock_spin(&sc->mtx);
	aintc_write_4(sc, SW_INT_IRQ_PENDING_REG(0),
	    (1 << SW_INT_IRQNO_ENMI));
	mtx_unlock_spin(&sc->mtx);
}
Esempio n. 22
0
/* Call an RTAS method by token */
int
rtas_call_method(cell_t token, int nargs, int nreturns, ...)
{
	vm_offset_t argsptr;
	faultbuf env, *oldfaultbuf;
	va_list ap;
	struct {
		cell_t token;
		cell_t nargs;
		cell_t nreturns;
		cell_t args_n_results[12];
	} args;
	int n, result;

	if (!rtas_exists() || nargs + nreturns > 12)
		return (-1);

	args.token = token;
	va_start(ap, nreturns);

	mtx_lock_spin(&rtas_mtx);
	rtas_bounce_offset = 0;

	args.nargs = nargs;
	args.nreturns = nreturns;

	for (n = 0; n < nargs; n++)
		args.args_n_results[n] = va_arg(ap, cell_t);

	argsptr = rtas_real_map(&args, sizeof(args));

	/* Get rid of any stale machine checks that have been waiting.  */
	__asm __volatile ("sync; isync");
	oldfaultbuf = curthread->td_pcb->pcb_onfault;
        if (!setfault(env)) {
		__asm __volatile ("sync");
		result = rtascall(argsptr, rtas_private_data);
		__asm __volatile ("sync; isync");
	} else {
		result = RTAS_HW_ERROR;
	}
	curthread->td_pcb->pcb_onfault = oldfaultbuf;
	__asm __volatile ("sync");

	rtas_real_unmap(argsptr, &args, sizeof(args));
	mtx_unlock_spin(&rtas_mtx);

	if (result < 0)
		return (result);

	for (n = nargs; n < nargs + nreturns; n++)
		*va_arg(ap, cell_t *) = args.args_n_results[n];
	return (result);
}
Esempio n. 23
0
void
ath_hal_reg_write(struct ath_hal *ah, u_int32_t reg, u_int32_t val)
{
    bus_space_tag_t tag = BUSTAG(ah);
    bus_space_handle_t h = ah->ah_sh;

    if (ah->ah_config.ah_serialise_reg_war)
        mtx_lock_spin(&ah_regser_mtx);
    bus_space_write_4(tag, h, reg, val);
    if (ah->ah_config.ah_serialise_reg_war)
        mtx_unlock_spin(&ah_regser_mtx);
}
Esempio n. 24
0
void
smp_rendezvous_cpus(cpumask_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int i, ncpus = 0;

	if (!smp_started) {
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		return;
	}

	CPU_FOREACH(i) {
		if (((1 << i) & map) != 0)
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with map=0x%x", map);

	/* obtain rendezvous lock */
	mtx_lock_spin(&smp_ipi_mtx);

	/* set static function pointers */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/* signal other processors, which will enter the IPI with interrupts off */
	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if ((map & (1 << curcpu)) != 0)
		smp_rendezvous_action();

	if (teardown_func == smp_no_rendevous_barrier)
		while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
			cpu_spinwait();

	/* release lock */
	mtx_unlock_spin(&smp_ipi_mtx);
}
Esempio n. 25
0
static void
isa_disable_intr(int vector)
{
        int irq = (vector - 0x800) >> 4;

	mtx_lock_spin(&icu_lock);
	if (irq > 7)
		outb(IO_ICU2, 0x20 | (irq & 7));
	outb(IO_ICU1, 0x20 | (irq > 7 ? 2 : irq));

	isa_intr_disable(irq);
	mtx_unlock_spin(&icu_lock);
}
Esempio n. 26
0
/*
 * Deferred release must be used when in a context that is not safe to
 * allocate/free.  This places any unreferenced sets on the list 'head'.
 */
static void
cpuset_rel_defer(struct setlist *head, struct cpuset *set)
{

	if (refcount_release(&set->cs_ref) == 0)
		return;
	mtx_lock_spin(&cpuset_lock);
	LIST_REMOVE(set, cs_siblings);
	if (set->cs_id != CPUSET_INVALID)
		LIST_REMOVE(set, cs_link);
	LIST_INSERT_HEAD(head, set, cs_link);
	mtx_unlock_spin(&cpuset_lock);
}
Esempio n. 27
0
void
atrtc_restore(void)
{

	/* Restore all of the RTC's "status" (actually, control) registers. */
	mtx_lock_spin(&atrtc_lock);
	rtcin_locked(RTC_STATUSA);	/* dummy to get rtc_reg set */
	rtcout_locked(RTC_STATUSB, RTCSB_24HR);
	rtcout_locked(RTC_STATUSA, rtc_statusa);
	rtcout_locked(RTC_STATUSB, rtc_statusb);
	rtcin_locked(RTC_INTR);
	mtx_unlock_spin(&atrtc_lock);
}
Esempio n. 28
0
u_int32_t
ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg)
{
    bus_space_tag_t tag = BUSTAG(ah);
    bus_space_handle_t h = ah->ah_sh;
    u_int32_t val;

    if (ah->ah_config.ah_serialise_reg_war)
        mtx_lock_spin(&ah_regser_mtx);
    val = bus_space_read_4(tag, h, reg);
    if (ah->ah_config.ah_serialise_reg_war)
        mtx_unlock_spin(&ah_regser_mtx);
    return val;
}
Esempio n. 29
0
static void
isa_handle_fast_intr(void *arg)
{
	struct isa_intr *ii = arg;
	int irq = ii->irq;

	ii->intr(ii->arg);

	mtx_lock_spin(&icu_lock);
	if (irq > 7)
		outb(IO_ICU2, 0x20 | (irq & 7));
	outb(IO_ICU1, 0x20 | (irq > 7 ? 2 : irq));
	mtx_unlock_spin(&icu_lock);
}
Esempio n. 30
0
static void
vm_pagezero(void __unused *arg)
{
	struct rtprio rtp;
	struct thread *td;
	int pages, pri;

	td = curthread;
	rtp.prio = RTP_PRIO_MAX;
	rtp.type = RTP_PRIO_IDLE;
	pages = 0;
	mtx_lock_spin(&sched_lock);
	rtp_to_pri(&rtp, td->td_ksegrp);
	pri = td->td_priority;
	mtx_unlock_spin(&sched_lock);
	idlezero_enable = idlezero_enable_default;

	for (;;) {
		if (vm_page_zero_check()) {
			pages += vm_page_zero_idle();
#ifndef PREEMPTION
			if (pages > idlezero_maxrun || sched_runnable()) {
				mtx_lock_spin(&sched_lock);
				mi_switch(SW_VOL, NULL);
				mtx_unlock_spin(&sched_lock);
				pages = 0;
			}
#endif
		} else {
			vm_page_lock_queues();
			wakeup_needed = TRUE;
			msleep(&zero_state, &vm_page_queue_mtx, PDROP | pri,
			    "pgzero", hz * 300);
			pages = 0;
		}
	}
}