/*
 * rw_oncpu:
 *
 *	Return true if an rwlock owner is running on a CPU in the system.
 *	If the target is waiting on the kernel big lock, then we must
 *	release it.  This is necessary to avoid deadlock.
 */
static bool
rw_oncpu(uintptr_t owner)
{
#ifdef MULTIPROCESSOR
	struct cpu_info *ci;
	lwp_t *l;

	KASSERT(kpreempt_disabled());

	if ((owner & (RW_WRITE_LOCKED|RW_HAS_WAITERS)) != RW_WRITE_LOCKED) {
		return false;
	}

	/*
	 * See lwp_dtor() why dereference of the LWP pointer is safe.
	 * We must have kernel preemption disabled for that.
	 */
	l = (lwp_t *)(owner & RW_THREAD);
	ci = l->l_cpu;

	if (ci && ci->ci_curlwp == l) {
		/* Target is running; do we need to block? */
		return (ci->ci_biglock_wanted != l);
	}
#endif
	/* Not running.  It may be safe to block now. */
	return false;
}
예제 #2
0
/*
 * mutex_oncpu:
 *
 *	Return true if an adaptive mutex owner is running on a CPU in the
 *	system.  If the target is waiting on the kernel big lock, then we
 *	must release it.  This is necessary to avoid deadlock.
 */
static bool
mutex_oncpu(uintptr_t owner)
{
	struct cpu_info *ci;
	lwp_t *l;

	KASSERT(kpreempt_disabled());

	if (!MUTEX_OWNED(owner)) {
		return false;
	}

	/*
	 * See lwp_dtor() why dereference of the LWP pointer is safe.
	 * We must have kernel preemption disabled for that.
	 */
	l = (lwp_t *)MUTEX_OWNER(owner);
	ci = l->l_cpu;

	if (ci && ci->ci_curlwp == l) {
		/* Target is running; do we need to block? */
		return (ci->ci_biglock_wanted != l);
	}

	/* Not running.  It may be safe to block now. */
	return false;
}
예제 #3
0
파일: cpu_subr.c 프로젝트: ryo/netbsd-src
bool
cpu_kpreempt_enter(uintptr_t where, int s)
{
	KASSERT(kpreempt_disabled());

#if 0
	if (where == (intptr_t)-2) {
		KASSERT(curcpu()->ci_mtx_count == 0);
		/*
		 * We must be called via kern_intr (which already checks for
		 * IPL_NONE so of course we call be preempted).
		 */
		return true;
	}
	/*
	 * We are called from KPREEMPT_ENABLE().  If we are at IPL_NONE,
	 * of course we can be preempted.  If we aren't, ask for a
	 * softint so that kern_intr can call kpreempt.
	 */
	if (s == IPL_NONE) {
		KASSERT(curcpu()->ci_mtx_count == 0);
		return true;
	}
	softint_trigger(SOFTINT_KPREEMPT);
#endif
	return false;
}
예제 #4
0
파일: kern_cctr.c 프로젝트: ryo/netbsd-src
/*
 * called once per clock tick via the pps callback
 * for the calibration of the TSC counters.
 * it is called only for the PRIMARY cpu. all
 * other cpus are called via a broadcast IPI
 * calibration interval is 1 second - we call
 * the calibration code only every hz calls
 */
static void
cc_calibrate(struct timecounter *tc)
{
	static int calls;
	struct cpu_info *ci;

	KASSERT(kpreempt_disabled());

	 /*
	  * XXX: for high interrupt frequency
	  * support: ++calls < hz / tc_tick
	  */
	if (++calls < hz)
		return;

	calls = 0;
	ci = curcpu();
	/* pick up reference ticks */
	cc_cal_val = cpu_counter32();

#if defined(MULTIPROCESSOR)
	cc_calibrate_mp(ci);
#endif
	cc_calibrate_cpu(ci);
}
예제 #5
0
/*
 * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility,
 * perform TLB flush and thus update the local view.  Main purpose is
 * to handle kernel preemption, while emap is in use.
 *
 * => called from mi_switch(), when LWP returns after block or preempt.
 */
void
uvm_emap_switch(lwp_t *l)
{
	struct uvm_cpu *ucpu;
	u_int curgen, gen;

	KASSERT(kpreempt_disabled());

	/* If LWP did not use emap, then nothing to do. */
	if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) {
		return;
	}

	/*
	 * No need to synchronise if generation number of current CPU is
	 * newer than the number of this LWP.
	 *
	 * This test assumes two's complement arithmetic and allows
	 * ~2B missed updates before it will produce bad results.
	 */
	ucpu = curcpu()->ci_data.cpu_uvm;
	curgen = ucpu->emap_gen;
	gen = l->l_emap_gen;
	if (__predict_true((signed int)(curgen - gen) >= 0)) {
		return;
	}

	/*
	 * See comments in uvm_emap_consume() about memory
	 * barriers and race conditions.
	 */
	curgen = uvm_emap_gen_return();
	pmap_emap_sync(false);
	ucpu->emap_gen = curgen;
}
예제 #6
0
파일: cpu_subr.c 프로젝트: ryo/netbsd-src
void
cpu_need_resched(struct cpu_info *ci, int flags)
{
	struct lwp * const l = ci->ci_data.cpu_onproc;
#ifdef MULTIPROCESSOR
	struct cpu_info * const cur_ci = curcpu();
#endif

	KASSERT(kpreempt_disabled());

	ci->ci_want_resched |= flags;

	if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
		/*
		 * No point doing anything, it will switch soon.
		 * Also here to prevent an assertion failure in
		 * kpreempt() due to preemption being set on a
		 * soft interrupt LWP.
		 */
		return;
	}

	if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
#ifdef MULTIPROCESSOR
		/*
		 * If the other CPU is idling, it must be waiting for an
		 * interrupt.  So give it one.
		 */
		if (__predict_false(ci != cur_ci))
			cpu_send_ipi(ci, IPI_NOP);
#endif
		return;
	}

#ifdef MULTIPROCESSOR
	atomic_or_uint(&ci->ci_want_resched, flags);
#else
	ci->ci_want_resched |= flags;
#endif

	if (flags & RESCHED_KPREEMPT) {
#ifdef __HAVE_PREEMPTION
		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
		if (ci == cur_ci) {
			softint_trigger(SOFTINT_KPREEMPT);
		} else {
			cpu_send_ipi(ci, IPI_KPREEMPT);
		}
#endif
		return;
	}
	l->l_md.md_astpending = 1;		/* force call to ast() */
#ifdef MULTIPROCESSOR
	if (ci != cur_ci && (flags & RESCHED_IMMED)) {
		cpu_send_ipi(ci, IPI_AST);
	}
#endif
}
예제 #7
0
파일: cpu_subr.c 프로젝트: ryo/netbsd-src
void
cpu_need_proftick(struct lwp *l)
{
	KASSERT(kpreempt_disabled());
	KASSERT(l->l_cpu == curcpu());

	l->l_pflag |= LP_OWEUPC;
	l->l_md.md_astpending = 1;		/* force call to ast() */
}
예제 #8
0
파일: cpu_subr.c 프로젝트: ryo/netbsd-src
void
cpu_signotify(struct lwp *l)
{
	KASSERT(kpreempt_disabled());
#ifdef __HAVE_FAST_SOFTINTS
	KASSERT(lwp_locked(l, NULL));
#endif
	KASSERT(l->l_stat == LSONPROC || l->l_stat == LSRUN || l->l_stat == LSSTOP);

	l->l_md.md_astpending = 1; 		/* force call to ast() */
}
예제 #9
0
void
xc_send_ipi(struct cpu_info *ci)
{
	
	KASSERT(kpreempt_disabled());
	KASSERT(curcpu() != ci);

	if (ci) {
		/* Unicast: remote CPU. */
		hppa_ipi_send(ci, HPPA_IPI_XCALL);
	} else {
		/* Broadcast: all, but local CPU (caller will handle it). */
		hppa_ipi_broadcast(HPPA_IPI_XCALL);
	}
}