Ejemplo n.º 1
0
unsigned long
gnttab_end_foreign_transfer_ref(grant_ref_t ref)
{
	unsigned long frame;
	uint16_t      flags;

	/*
         * If a transfer is not even yet started, try to reclaim the grant
         * reference and return failure (== 0).
         */
	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
		if ( synch_cmpxchg(&shared[ref].flags, flags, 0) == flags )
			return (0);
		cpu_spinwait();
	}

	/* If a transfer is in progress then wait until it is completed. */
	while (!(flags & GTF_transfer_completed)) {
		flags = shared[ref].flags;
		cpu_spinwait();
	}

	/* Read the frame number /after/ reading completion status. */
	rmb();
	frame = shared[ref].frame;
	KASSERT(frame != 0, ("grant table inconsistent"));

	return (frame);
}
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    RT_ASSERT_PREEMPT_CPUID_VAR();
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE)
    {
        for (;;)
        {
            uint32_t fIntSaved = ASMIntDisableFlags();
            critical_enter();

            int c = 50;
            for (;;)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
                    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
                    pThis->fIntSaved = fIntSaved;
                    return;
                }
                if (--c <= 0)
                    break;
                cpu_spinwait();
            }

            /* Enable interrupts while we sleep. */
            ASMSetFlags(fIntSaved);
            critical_exit();
            DELAY(1);
        }
    }
    else
    {
        for (;;)
        {
            critical_enter();

            int c = 50;
            for (;;)
            {
                if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0))
                {
                    RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis);
                    return;
                }
                if (--c <= 0)
                    break;
                cpu_spinwait();
            }

            critical_exit();
            DELAY(1);
        }
    }
}
Ejemplo n.º 3
0
/*
 * When called the executing CPU will send an IPI to all other CPUs
 *  requesting that they halt execution.
 *
 * Usually (but not necessarily) called with 'other_cpus' as its arg.
 *
 *  - Signals all CPUs in map to stop.
 *  - Waits for each to stop.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 *
 */
static int
generic_stop_cpus(cpuset_t map, u_int type)
{
#ifdef KTR
	char cpusetbuf[CPUSETBUFSIZ];
#endif
	static volatile u_int stopping_cpu = NOCPU;
	int i;
	volatile cpuset_t *cpus;

	KASSERT(
#if defined(__amd64__) || defined(__i386__)
	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
#else
	    type == IPI_STOP || type == IPI_STOP_HARD,
#endif
	    ("%s: invalid stop type", __func__));

	if (!smp_started)
		return (0);

	CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
	    cpusetobj_strprint(cpusetbuf, &map), type);

	if (stopping_cpu != PCPU_GET(cpuid))
		while (atomic_cmpset_int(&stopping_cpu, NOCPU,
		    PCPU_GET(cpuid)) == 0)
			while (stopping_cpu != NOCPU)
				cpu_spinwait(); /* spin */

	/* send the stop IPI to all CPUs in map */
	ipi_selected(map, type);

#if defined(__amd64__) || defined(__i386__)
	if (type == IPI_SUSPEND)
		cpus = &suspended_cpus;
	else
#endif
		cpus = &stopped_cpus;

	i = 0;
	while (!CPU_SUBSET(cpus, &map)) {
		/* spin */
		cpu_spinwait();
		i++;
		if (i == 100000000) {
			printf("timeout stopping cpus\n");
			break;
		}
	}

	stopping_cpu = NOCPU;
	return (1);
}
Ejemplo n.º 4
0
void
execute_the_co_test(struct callout_run *rn)
{
	int i, ret, cpu;
	uint32_t tk_s, tk_e, tk_d;

	mtx_lock(&rn->lock);
	rn->callout_waiting = 0;
	for(i=0; i<rn->co_number_callouts; i++) {
		if (rn->co_test == 1) {
			/* start all on spread out cpu's */
			cpu = i % mp_ncpus;
			callout_reset_sbt_on(&rn->co_array[i], 3, 0, test_callout, rn, 
					     cpu, 0);
		} else {
			/* Start all on the same CPU */
			callout_reset_sbt_on(&rn->co_array[i], 3, 0, test_callout, rn, 
					     rn->index, 0);
		}
	}
	tk_s = ticks;
	while (rn->callout_waiting != rn->co_number_callouts) {
		cpu_spinwait();
		tk_e = ticks;
		tk_d = tk_e - tk_s;
		if (tk_d > 100) {
			break;
		}
	}
	/* OK everyone is waiting and we have the lock */
	for(i=0; i<rn->co_number_callouts; i++) {
		ret = callout_async_drain(&rn->co_array[i], drainit);
		if (ret) {
			rn->cnt_one++;
		} else {
			rn->cnt_zero++;
		}
	}
	rn->callout_waiting -= rn->cnt_one;
	mtx_unlock(&rn->lock);
	/* Now wait until all are done */
	tk_s = ticks;
	while (rn->callout_waiting > 0) {
		cpu_spinwait();
		tk_e = ticks;
		tk_d = tk_e - tk_s;
		if (tk_d > 100) {
			break;
		}
	}
	co_saydone((void *)rn);
}
Ejemplo n.º 5
0
/*
 * When called the executing CPU will send an IPI to all other CPUs
 *  requesting that they halt execution.
 *
 * Usually (but not necessarily) called with 'other_cpus' as its arg.
 *
 *  - Signals all CPUs in map to suspend.
 *  - Waits for each to suspend.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 *
 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
 *            from executing at same time.
 */
int
suspend_cpus(cpumask_t map)
{
	int i;

	if (!smp_started)
		return (0);

	CTR1(KTR_SMP, "suspend_cpus(%x)", map);

	/* send the suspend IPI to all CPUs in map */
	ipi_selected(map, IPI_SUSPEND);

	i = 0;
	while ((stopped_cpus & map) != map) {
		/* spin */
		cpu_spinwait();
		i++;
#ifdef DIAGNOSTIC
		if (i == 100000) {
			printf("timeout suspending cpus\n");
			break;
		}
#endif
	}

	return (1);
}
Ejemplo n.º 6
0
/*
 * When called the executing CPU will send an IPI to all other CPUs
 *  requesting that they halt execution.
 *
 * Usually (but not necessarily) called with 'other_cpus' as its arg.
 *
 *  - Signals all CPUs in map to stop.
 *  - Waits for each to stop.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 *
 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs
 *            from executing at same time.
 */
static int
generic_stop_cpus(cpumask_t map, u_int type)
{
	int i;

	KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
	    ("%s: invalid stop type", __func__));

	if (!smp_started)
		return 0;

	CTR2(KTR_SMP, "stop_cpus(%x) with %u type", map, type);

	/* send the stop IPI to all CPUs in map */
	ipi_selected(map, type);

	i = 0;
	while ((stopped_cpus & map) != map) {
		/* spin */
		cpu_spinwait();
		i++;
#ifdef DIAGNOSTIC
		if (i == 100000) {
			printf("timeout stopping cpus\n");
			break;
		}
#endif
	}

	return 1;
}
Ejemplo n.º 7
0
static void
dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
{
	uint32_t head;
	int bytes;

	DMAR_ASSERT_LOCKED(unit);
	bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
	for (;;) {
		if (bytes <= unit->inv_queue_avail)
			break;
		/* refill */
		head = dmar_read4(unit, DMAR_IQH_REG);
		head &= DMAR_IQH_MASK;
		unit->inv_queue_avail = head - unit->inv_queue_tail -
		    DMAR_IQ_DESCR_SZ;
		if (head <= unit->inv_queue_tail)
			unit->inv_queue_avail += unit->inv_queue_size;
		if (bytes <= unit->inv_queue_avail)
			break;

		/*
		 * No space in the queue, do busy wait.  Hardware must
		 * make a progress.  But first advance the tail to
		 * inform the descriptor streamer about entries we
		 * might have already filled, otherwise they could
		 * clog the whole queue..
		 */
		dmar_qi_advance_tail(unit);
		unit->inv_queue_full++;
		cpu_spinwait();
	}
	unit->inv_queue_avail -= bytes;
}
Ejemplo n.º 8
0
static void
dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq,
    bool emit_wait)
{
	struct dmar_qi_genseq gsec;
	uint32_t seq;

	KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
	DMAR_ASSERT_LOCKED(unit);
	if (unit->inv_waitd_seq == 0xffffffff) {
		gsec.gen = unit->inv_waitd_gen;
		gsec.seq = unit->inv_waitd_seq;
		dmar_qi_ensure(unit, 1);
		dmar_qi_emit_wait_descr(unit, gsec.seq, false, true, false);
		dmar_qi_advance_tail(unit);
		while (!dmar_qi_seq_processed(unit, &gsec))
			cpu_spinwait();
		unit->inv_waitd_gen++;
		unit->inv_waitd_seq = 1;
	}
	seq = unit->inv_waitd_seq++;
	pseq->gen = unit->inv_waitd_gen;
	pseq->seq = seq;
	if (emit_wait) {
		dmar_qi_ensure(unit, 1);
		dmar_qi_emit_wait_descr(unit, seq, true, true, false);
	}
}
Ejemplo n.º 9
0
/*
 * Set the time of day clock based on the value of the struct timespec arg.
 * Return 0 on success, an error number otherwise.
 */
static int
at91_rtc_settime(device_t dev, struct timespec *ts)
{
	struct at91_rtc_softc *sc;
	struct clocktime ct;
	int rv;

	sc = device_get_softc(dev);
	clock_ts_to_ct(ts, &ct);

	/*
	 * Can't set the clock unless a second has elapsed since we last did so.
	 */
	while ((RD4(sc, RTC_SR) & RTC_SR_SECEV) == 0)
		cpu_spinwait();

	/*
	 * Stop the clocks for an update; wait until hardware is ready.
	 * Clear the update-ready status after it gets asserted (the manual says
	 * to do this before updating the value registers).
	 */
	WR4(sc, RTC_CR, RTC_CR_UPDCAL | RTC_CR_UPDTIM);
	while ((RD4(sc, RTC_SR) & RTC_SR_ACKUPD) == 0)
		cpu_spinwait();
	WR4(sc, RTC_SCCR, RTC_SR_ACKUPD);

	/*
	 * Set the values in the hardware, then check whether the hardware was
	 * happy with them so we can return the correct status.
	 */
	WR4(sc, RTC_TIMR, RTC_TIMR_MK(ct.hour, ct.min, ct.sec));
	WR4(sc, RTC_CALR, RTC_CALR_MK(ct.year, ct.mon, ct.day, ct.dow+1));

	if (RD4(sc, RTC_VER) & (RTC_VER_NVTIM | RTC_VER_NVCAL))
		rv = EINVAL;
	else
		rv = 0;

	/*
	 * Restart the clocks (turn off the update bits).
	 * Clear the second-event bit (because the manual says to).
	 */
	WR4(sc, RTC_CR, RD4(sc, RTC_CR) & ~(RTC_CR_UPDCAL | RTC_CR_UPDTIM));
	WR4(sc, RTC_SCCR, RTC_SR_SECEV);

	return (0);
}
Ejemplo n.º 10
0
void *
virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
	void *cookie;

	while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
		cpu_spinwait();

	return (cookie);
}
Ejemplo n.º 11
0
static int
dmar_disable_qi(struct dmar_unit *unit)
{

	DMAR_ASSERT_LOCKED(unit);
	unit->hw_gcmd &= ~DMAR_GCMD_QIE;
	dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
	/* XXXKIB should have a timeout */
	while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) != 0)
		cpu_spinwait();
	return (0);
}
Ejemplo n.º 12
0
void
delay_boot(int usec)
{
	u_long end;

	if (usec < 0)
		return;

	end = rd(tick) + (u_long)usec * clock_boot / 1000000;
	while (rd(tick) < end)
		cpu_spinwait();
}
Ejemplo n.º 13
0
void
smp_rendezvous_cpus(cpumask_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int i, ncpus = 0;

	if (!smp_started) {
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		return;
	}

	CPU_FOREACH(i) {
		if (((1 << i) & map) != 0)
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with map=0x%x", map);

	/* obtain rendezvous lock */
	mtx_lock_spin(&smp_ipi_mtx);

	/* set static function pointers */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/* signal other processors, which will enter the IPI with interrupts off */
	ipi_selected(map & ~(1 << curcpu), IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if ((map & (1 << curcpu)) != 0)
		smp_rendezvous_action();

	if (teardown_func == smp_no_rendevous_barrier)
		while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
			cpu_spinwait();

	/* release lock */
	mtx_unlock_spin(&smp_ipi_mtx);
}
Ejemplo n.º 14
0
/*
 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
 *
 * This is only called if we need to actually spin for the lock. Recursion
 * is handled inline.
 */
void
_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
    int line)
{
	int i = 0;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif

	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);

	lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime);
	while (!_obtain_lock(m, tid)) {

		/* Give interrupts a chance while we spin. */
		spinlock_exit();
		while (m->mtx_lock != MTX_UNOWNED) {
			if (i++ < 10000000) {
				cpu_spinwait();
				continue;
			}
			if (i < 60000000 || kdb_active || panicstr != NULL)
				DELAY(1);
			else
				_mtx_lock_spin_failed(m);
			cpu_spinwait();
		}
		spinlock_enter();
	}

	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);

	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
	    contested, waittime, (file), (line));
	LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i);
}
Ejemplo n.º 15
0
/*
 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function 
 * (if specified), rendezvous, execute the action function (if specified),
 * rendezvous again, execute the teardown function (if specified), and then
 * resume.
 *
 * Note that the supplied external functions _must_ be reentrant and aware
 * that they are running in parallel and in an unknown lock context.
 */
void
smp_rendezvous_action(void)
{
	void* local_func_arg = smp_rv_func_arg;
	void (*local_setup_func)(void*)   = smp_rv_setup_func;
	void (*local_action_func)(void*)   = smp_rv_action_func;
	void (*local_teardown_func)(void*) = smp_rv_teardown_func;

	/* Ensure we have up-to-date values. */
	atomic_add_acq_int(&smp_rv_waiters[0], 1);
	while (smp_rv_waiters[0] < smp_rv_ncpus)
		cpu_spinwait();

	/* setup function */
	if (local_setup_func != smp_no_rendevous_barrier) {
		if (smp_rv_setup_func != NULL)
			smp_rv_setup_func(smp_rv_func_arg);

		/* spin on entry rendezvous */
		atomic_add_int(&smp_rv_waiters[1], 1);
		while (smp_rv_waiters[1] < smp_rv_ncpus)
                	cpu_spinwait();
	}

	/* action function */
	if (local_action_func != NULL)
		local_action_func(local_func_arg);

	/* spin on exit rendezvous */
	atomic_add_int(&smp_rv_waiters[2], 1);
	if (local_teardown_func == smp_no_rendevous_barrier)
                return;
	while (smp_rv_waiters[2] < smp_rv_ncpus)
		cpu_spinwait();

	/* teardown function */
	if (local_teardown_func != NULL)
		local_teardown_func(local_func_arg);
}
Ejemplo n.º 16
0
/*
 * Take a local copy of a policy.
 *
 * The destination policy isn't write-barriered; this is used
 * for doing local copies into something that isn't shared.
 */
void
vm_domain_policy_localcopy(struct vm_domain_policy *dst,
    const struct vm_domain_policy *src)
{
	seq_t seq;

	for (;;) {
		seq = seq_read(&src->seq);
		*dst = *src;
		if (seq_consistent(&src->seq, seq))
			return;
		cpu_spinwait();
	}
}
Ejemplo n.º 17
0
static void
dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq)
{

	DMAR_ASSERT_LOCKED(unit);
	unit->inv_seq_waiters++;
	while (!dmar_qi_seq_processed(unit, gseq)) {
		if (cold) {
			cpu_spinwait();
		} else {
			msleep(&unit->inv_seq_waiters, &unit->lock, 0,
			    "dmarse", hz);
		}
	}
	unit->inv_seq_waiters--;
}
Ejemplo n.º 18
0
void
vm_domain_iterator_set_policy(struct vm_domain_iterator *vi,
    const struct vm_domain_policy *vt)
{
	seq_t seq;
	struct vm_domain_policy vt_lcl;

	for (;;) {
		seq = seq_read(&vt->seq);
		vt_lcl = *vt;
		if (seq_consistent(&vt->seq, seq)) {
			_vm_domain_iterator_set_policy(vi, &vt_lcl);
			return;
		}
		cpu_spinwait();
	}
}
Ejemplo n.º 19
0
static void
delay_tsc(int n)
{
	uint64_t end, now;

	/*
	 * Pin the current thread ensure correct behavior if the TSCs
	 * on different CPUs are not in sync.
	 */
	sched_pin();
	now = rdtsc();
	end = now + tsc_freq * n / 1000000;
	do {
		cpu_spinwait();
		now = rdtsc();
	} while (now < end);
	sched_unpin();
}
Ejemplo n.º 20
0
/*
 * Called by a CPU to restart stopped CPUs. 
 *
 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
 *
 *  - Signals all CPUs in map to restart.
 *  - Waits for each to restart.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 */
int
restart_cpus(cpumask_t map)
{

	if (!smp_started)
		return 0;

	CTR1(KTR_SMP, "restart_cpus(%x)", map);

	/* signal other cpus to restart */
	atomic_store_rel_int(&started_cpus, map);

	/* wait for each to clear its bit */
	while ((stopped_cpus & map) != 0)
		cpu_spinwait();

	return 1;
}
Ejemplo n.º 21
0
/*
 * Take a write-barrier copy of a policy.
 *
 * The destination policy is write -barriered; this is used
 * for doing copies into policies that may be read by other
 * threads.
 */
void
vm_domain_policy_copy(struct vm_domain_policy *dst,
    const struct vm_domain_policy *src)
{
	seq_t seq;
	struct vm_domain_policy d;

	for (;;) {
		seq = seq_read(&src->seq);
		d = *src;
		if (seq_consistent(&src->seq, seq)) {
			seq_write_begin(&dst->seq);
			dst->p.domain = d.p.domain;
			dst->p.policy = d.p.policy;
			seq_write_end(&dst->seq);
			return;
		}
		cpu_spinwait();
	}
}
Ejemplo n.º 22
0
void
delay_tick(int usec)
{
	u_long end;

	if (usec < 0)
		return;

	/*
	 * We avoid being migrated to another CPU with a possibly
	 * unsynchronized TICK timer while spinning.
	 */
	sched_pin();

	end = rd(tick) + (u_long)usec * PCPU_GET(clock) / 1000000;
	while (rd(tick) < end)
		cpu_spinwait();

	sched_unpin();
}
Ejemplo n.º 23
0
/*
 * Called by a CPU to restart stopped CPUs. 
 *
 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
 *
 *  - Signals all CPUs in map to restart.
 *  - Waits for each to restart.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 */
int
restart_cpus(cpuset_t map)
{
#ifdef KTR
	char cpusetbuf[CPUSETBUFSIZ];
#endif

	if (!smp_started)
		return 0;

	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));

	/* signal other cpus to restart */
	CPU_COPY_STORE_REL(&map, &started_cpus);

	/* wait for each to clear its bit */
	while (CPU_OVERLAP(&stopped_cpus, &map))
		cpu_spinwait();

	return 1;
}
Ejemplo n.º 24
0
/*
 * Reconfigure specified timer.
 * For per-CPU timers use IPI to make other CPUs to reconfigure.
 */
static void
configtimer(int i)
{
#ifdef SMP
	tc *conf;
	int cpu;

	critical_enter();
#endif
	/* Start/stop global timer or per-CPU timer of this CPU. */
	if (i == 0 ? timer1hz : timer2hz)
		et_start(timer[i], NULL, &timerperiod[i]);
	else
		et_stop(timer[i]);
#ifdef SMP
	if ((timer[i]->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
		critical_exit();
		return;
	}
	/* Set reconfigure flags for other CPUs. */
	CPU_FOREACH(cpu) {
		conf = DPCPU_ID_PTR(cpu, configtimer);
		atomic_store_rel_int(*conf + i, (cpu == curcpu) ? 0 : 1);
	}
	/* Send reconfigure IPI. */
	ipi_all_but_self(i == 0 ? IPI_HARDCLOCK : IPI_STATCLOCK);
	/* Wait for reconfiguration completed. */
restart:
	cpu_spinwait();
	CPU_FOREACH(cpu) {
		if (cpu == curcpu)
			continue;
		conf = DPCPU_ID_PTR(cpu, configtimer);
		if (atomic_load_acq_int(*conf + i))
			goto restart;
	}
	critical_exit();
#endif
}
Ejemplo n.º 25
0
static int
delay_tc(int n)
{
	struct timecounter *tc;
	timecounter_get_t *func;
	uint64_t end, freq, now;
	u_int last, mask, u;

	/*
	 * Only use the TSC if it is P-state invariant.  If the TSC is
	 * not P-state invariant and the CPU is not running at the
	 * "full" P-state, then the TSC will increment at some rate
	 * less than tsc_freq and delay_tsc() will wait too long.
	 */
	if (tsc_is_invariant && tsc_freq != 0) {
		delay_tsc(n);
		return (1);
	}
	tc = timecounter;
	if (tc->tc_quality <= 0)
		return (0);
	func = tc->tc_get_timecount;
	mask = tc->tc_counter_mask;
	freq = tc->tc_frequency;
	now = 0;
	end = freq * n / 1000000;
	last = func(tc) & mask;
	do {
		cpu_spinwait();
		u = func(tc) & mask;
		if (u < last)
			now += mask - last + u + 1;
		else
			now += u - last;
		last = u;
	} while (now < end);
	return (1);
}
Ejemplo n.º 26
0
static int
delay_tc(int n)
{
	struct timecounter *tc;
	timecounter_get_t *func;
	uint64_t end, freq, now;
	u_int last, mask, u;

	tc = timecounter;
	freq = atomic_load_acq_64(&tsc_freq);
	if (tsc_is_invariant && freq != 0) {
		func = get_tsc;
		mask = ~0u;
	} else {
		if (tc->tc_quality <= 0)
			return (0);
		func = tc->tc_get_timecount;
		mask = tc->tc_counter_mask;
		freq = tc->tc_frequency;
	}
	now = 0;
	end = freq * n / 1000000;
	if (func == get_tsc)
		sched_pin();
	last = func(tc) & mask;
	do {
		cpu_spinwait();
		u = func(tc) & mask;
		if (u < last)
			now += mask - last + u + 1;
		else
			now += u - last;
		last = u;
	} while (now < end);
	if (func == get_tsc)
		sched_unpin();
	return (1);
}
Ejemplo n.º 27
0
static void
mpc85xx_jog_set_int(void *arg)
{
	struct jog_rv_args *args = arg;
	uint32_t reg;

	if (PCPU_GET(cpuid) == args->cpu) {
		reg = ccsr_read4(GUTS_PMJCR);
		reg &= ~PMJCR_CORE_MULT(PMJCR_RATIO_M, args->cpu);
		reg |= PMJCR_CORE_MULT(args->mult, args->cpu);
		if (args->slow)
			reg &= ~(1 << (12 + args->cpu));
		else
			reg |= (1 << (12 + args->cpu));

		ccsr_write4(GUTS_PMJCR, reg);

		reg = ccsr_read4(GUTS_POWMGTCSR);
		reg |= POWMGTCSR_JOG | POWMGTCSR_INT_MASK;
		ccsr_write4(GUTS_POWMGTCSR, reg);

		/* Wait for completion */
		do {
			DELAY(100);
			reg = ccsr_read4(GUTS_POWMGTCSR);
		} while (reg & POWMGTCSR_JOG);

		reg = ccsr_read4(GUTS_POWMGTCSR);
		ccsr_write4(GUTS_POWMGTCSR, reg & ~POWMGTCSR_INT_MASK);
		ccsr_read4(GUTS_POWMGTCSR);

		args->inprogress = 0;
	} else {
		while (args->inprogress)
			cpu_spinwait();
	}
}
Ejemplo n.º 28
0
/*
 * Called by a CPU to restart stopped CPUs. 
 *
 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
 *
 *  - Signals all CPUs in map to restart.
 *  - Waits for each to restart.
 *
 * Returns:
 *  -1: error
 *   0: NA
 *   1: ok
 */
static int
generic_restart_cpus(cpuset_t map, u_int type)
{
#ifdef KTR
	char cpusetbuf[CPUSETBUFSIZ];
#endif
	volatile cpuset_t *cpus;

	KASSERT(
#if defined(__amd64__) || defined(__i386__)
	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
#else
	    type == IPI_STOP || type == IPI_STOP_HARD,
#endif
	    ("%s: invalid stop type", __func__));

	if (!smp_started)
		return 0;

	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));

#if defined(__amd64__) || defined(__i386__)
	if (type == IPI_SUSPEND)
		cpus = &suspended_cpus;
	else
#endif
		cpus = &stopped_cpus;

	/* signal other cpus to restart */
	CPU_COPY_STORE_REL(&map, &started_cpus);

	/* wait for each to clear its bit */
	while (CPU_OVERLAP(cpus, &map))
		cpu_spinwait();

	return 1;
}
Ejemplo n.º 29
0
/*
 * Enqueue n items and maybe drain the ring for some time.
 *
 * Returns an errno.
 */
int
mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
{
	union ring_state os, ns;
	uint16_t pidx_start, pidx_stop;
	int i;

	MPASS(items != NULL);
	MPASS(n > 0);

	/*
	 * Reserve room for the new items.  Our reservation, if successful, is
	 * from 'pidx_start' to 'pidx_stop'.
	 */
	for (;;) {
		os.state = r->state;
		if (n >= space_available(r, os)) {
			counter_u64_add(r->drops, n);
			MPASS(os.flags != IDLE);
			if (os.flags == STALLED)
				mp_ring_check_drainage(r, 0);
			return (ENOBUFS);
		}
		ns.state = os.state;
		ns.pidx_head = increment_idx(r, os.pidx_head, n);
		critical_enter();
		if (atomic_cmpset_64(&r->state, os.state, ns.state))
			break;
		critical_exit();
		cpu_spinwait();
	}
	pidx_start = os.pidx_head;
	pidx_stop = ns.pidx_head;

	/*
	 * Wait for other producers who got in ahead of us to enqueue their
	 * items, one producer at a time.  It is our turn when the ring's
	 * pidx_tail reaches the begining of our reservation (pidx_start).
	 */
	while (ns.pidx_tail != pidx_start) {
		cpu_spinwait();
		ns.state = r->state;
	}

	/* Now it is our turn to fill up the area we reserved earlier. */
	i = pidx_start;
	do {
		r->items[i] = *items++;
		if (__predict_false(++i == r->size))
			i = 0;
	} while (i != pidx_stop);

	/*
	 * Update the ring's pidx_tail.  The release style atomic guarantees
	 * that the items are visible to any thread that sees the updated pidx.
	 */
	do {
		os.state = ns.state = r->state;
		ns.pidx_tail = pidx_stop;
		ns.flags = BUSY;
	} while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
	critical_exit();
	counter_u64_add(r->enqueues, n);

	/*
	 * Turn into a consumer if some other thread isn't active as a consumer
	 * already.
	 */
	if (os.flags != BUSY)
		drain_ring(r, ns, os.flags, budget);

	return (0);
}
Ejemplo n.º 30
0
void
_thread_lock_flags(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid;
	int i;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	uint64_t spin_cnt = 0;
#endif

	i = 0;
	tid = (uintptr_t)curthread;
	for (;;) {
retry:
		spinlock_enter();
		m = td->td_lock;
		KASSERT(m->mtx_lock != MTX_DESTROYED,
		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
		    ("thread_lock() of sleep mutex %s @ %s:%d",
		    m->lock_object.lo_name, file, line));
		if (mtx_owned(m))
			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
			    m->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&m->lock_object,
		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
		while (!_obtain_lock(m, tid)) {
#ifdef KDTRACE_HOOKS
			spin_cnt++;
#endif
			if (m->mtx_lock == tid) {
				m->mtx_recurse++;
				break;
			}
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			while (m->mtx_lock != MTX_UNOWNED) {
				if (i++ < 10000000)
					cpu_spinwait();
				else if (i < 60000000 ||
				    kdb_active || panicstr != NULL)
					DELAY(1);
				else
					_mtx_lock_spin_failed(m);
				cpu_spinwait();
				if (m != td->td_lock)
					goto retry;
			}
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		_rel_spin_lock(m);	/* does spinlock_exit() */
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
	}
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
		    m, contested, waittime, (file), (line));
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
}