예제 #1
0
/*
 * uvm_emap_switch: if the CPU is 'behind' the LWP in emap visibility,
 * perform TLB flush and thus update the local view.  Main purpose is
 * to handle kernel preemption, while emap is in use.
 *
 * => called from mi_switch(), when LWP returns after block or preempt.
 */
void
uvm_emap_switch(lwp_t *l)
{
	struct uvm_cpu *ucpu;
	u_int curgen, gen;

	KASSERT(kpreempt_disabled());

	/* If LWP did not use emap, then nothing to do. */
	if (__predict_true(l->l_emap_gen == UVM_EMAP_INACTIVE)) {
		return;
	}

	/*
	 * No need to synchronise if generation number of current CPU is
	 * newer than the number of this LWP.
	 *
	 * This test assumes two's complement arithmetic and allows
	 * ~2B missed updates before it will produce bad results.
	 */
	ucpu = curcpu()->ci_data.cpu_uvm;
	curgen = ucpu->emap_gen;
	gen = l->l_emap_gen;
	if (__predict_true((signed int)(curgen - gen) >= 0)) {
		return;
	}

	/*
	 * See comments in uvm_emap_consume() about memory
	 * barriers and race conditions.
	 */
	curgen = uvm_emap_gen_return();
	pmap_emap_sync(false);
	ucpu->emap_gen = curgen;
}
예제 #2
0
/*
 * uvm_emap_consume: update the current CPU and LWP to the given generation
 * of the emap.  In a case of LWP migration to a different CPU after block
 * or preempt, uvm_emap_switch() will synchronise.
 *
 * => may be called from both interrupt and thread context.
 */
void
uvm_emap_consume(u_int gen)
{
	struct cpu_info *ci;
	struct uvm_cpu *ucpu;
	lwp_t *l = curlwp;
	u_int curgen;

	if (gen == UVM_EMAP_INACTIVE) {
		return;
	}

	/*
	 * No need to synchronise if generation number of current CPU is
	 * newer than the number of this LWP.
	 *
	 * This test assumes two's complement arithmetic and allows
	 * ~2B missed updates before it will produce bad results.
	 */
	KPREEMPT_DISABLE(l);
	ci = l->l_cpu;
	ucpu = ci->ci_data.cpu_uvm;
	if (__predict_true((signed int)(ucpu->emap_gen - gen) >= 0)) {
		l->l_emap_gen = ucpu->emap_gen;
		KPREEMPT_ENABLE(l);
		return;
	}

	/*
	 * Record the current generation _before_ issuing the TLB flush.
	 * No need for a memory barrier before, as reading a stale value
	 * for uvm_emap_gen is not a problem.
	 *
	 * pmap_emap_sync() must implicitly perform a full memory barrier,
	 * which prevents us from fetching a value from after the TLB flush
	 * has occurred (which would be bad).
	 *
	 * We can race with an interrupt on the current CPU updating the
	 * counter to a newer value.  This could cause us to set a stale
	 * value into ucpu->emap_gen, overwriting a newer update from the
	 * interrupt.  However, it does not matter since:
	 *  (1) Interrupts always run to completion or block.
	 *  (2) Interrupts will only ever install a newer value and,
	 *  (3) We will roll the value forward later.
	 */
	curgen = uvm_emap_gen_return();
	pmap_emap_sync(true);
	ucpu->emap_gen = curgen;
	l->l_emap_gen = curgen;
	KASSERT((signed int)(curgen - gen) >= 0);
	KPREEMPT_ENABLE(l);
}
예제 #3
0
static inline void
pmap_tlb_invalidate(const pmap_tlb_packet_t *tp)
{
	int i;

	/* Find out what we need to invalidate. */
	if (tp->tp_count == (uint16_t)-1) {
		u_int egen = uvm_emap_gen_return();
		if (tp->tp_pte & PG_G) {
			/* Invalidating user and kernel TLB entries. */
			tlbflushg();
		} else {
			/* Invalidating user TLB entries only. */
			tlbflush();
		}
		uvm_emap_update(egen);
	} else {
		/* Invalidating a single page or a range of pages. */
		for (i = tp->tp_count - 1; i >= 0; i--) {
			pmap_update_pg(tp->tp_va[i]);
		}
	}
}