コード例 #1
0
void
tlb_context_demap(struct pmap *pm)
{
	void *cookie;
	register_t s;

	/*
	 * It is important that we are not interrupted or preempted while
	 * doing the IPIs. The interrupted CPU may hold locks, and since
	 * it will wait for the CPU that sent the IPI, this can lead
	 * to a deadlock when an interrupt comes in on that CPU and it's
	 * handler tries to grab one of that locks. This will only happen for
	 * spin locks, but these IPI types are delivered even if normal
	 * interrupts are disabled, so the lock critical section will not
	 * protect the target processor from entering the IPI handler with
	 * the lock held.
	 */
	PMAP_STATS_INC(tlb_ncontext_demap);
	cookie = ipi_tlb_context_demap(pm);
	if (pm->pm_active & PCPU_GET(cpumask)) {
		KASSERT(pm->pm_context[curcpu] != -1,
		    ("tlb_context_demap: inactive pmap?"));
		s = intr_disable();
		stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
		stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
		flush(KERNBASE);
		intr_restore(s);
	}
	ipi_wait(cookie);
}
コード例 #2
0
void
tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
	u_long flags;
	void *cookie;
	register_t s;

	PMAP_STATS_INC(tlb_npage_demap);
	cookie = ipi_tlb_page_demap(pm, va);
	if (pm->pm_active & PCPU_GET(cpumask)) {
		KASSERT(pm->pm_context[curcpu] != -1,
		    ("tlb_page_demap: inactive pmap?"));
		if (pm == kernel_pmap)
			flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
		else
			flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;

		s = intr_disable();
		stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
		stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
		flush(KERNBASE);
		intr_restore(s);
	}
	ipi_wait(cookie);
}
コード例 #3
0
ファイル: tlb.c プロジェクト: MarginC/kame
void
tlb_page_demap(struct pmap *pm, vm_offset_t va)
{
	u_long flags;
	void *cookie;
	u_long s;

	critical_enter();
	cookie = ipi_tlb_page_demap(pm, va);
	if (pm->pm_active & PCPU_GET(cpumask)) {
		KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1,
		    ("tlb_page_demap: inactive pmap?"));
		if (pm == kernel_pmap)
			flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
		else
			flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;
	
		s = intr_disable();
		stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
		stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
		membar(Sync);
		intr_restore(s);
	}
	ipi_wait(cookie);
	critical_exit();
}
コード例 #4
0
/*
 * Flush all lines from the level 1 caches.
 */
void
cheetah_cache_flush(void)
{
	u_long addr, lsu;
	register_t s;

	s = intr_disable();
	for (addr = 0; addr < PCPU_GET(cache.dc_size);
	    addr += PCPU_GET(cache.dc_linesize))
		/*
		 * Note that US-IV+ additionally require a membar #Sync before
		 * a load or store to ASI_DCACHE_TAG.
		 */
		__asm __volatile(
		    "membar #Sync;"
		    "stxa %%g0, [%0] %1;"
		    "membar #Sync"
		    : : "r" (addr), "n" (ASI_DCACHE_TAG));

	/* The I$ must be disabled when flushing it so ensure it's off. */
	lsu = ldxa(0, ASI_LSU_CTL_REG);
	stxa(0, ASI_LSU_CTL_REG, lsu & ~(LSU_IC));
	flush(KERNBASE);
	for (addr = CHEETAH_ICACHE_TAG_LOWER;
	    addr < PCPU_GET(cache.ic_size) * 2;
	    addr += PCPU_GET(cache.ic_linesize) * 2)
		__asm __volatile(
		    "stxa %%g0, [%0] %1;"
		    "membar #Sync"
		    : : "r" (addr), "n" (ASI_ICACHE_TAG));
	stxa(0, ASI_LSU_CTL_REG, lsu);
	flush(KERNBASE);
	intr_restore(s);
}
コード例 #5
0
ファイル: tlb.c プロジェクト: dcui/FreeBSD-9.3_kernel
void
tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
{
	vm_offset_t va;
	void *cookie;
	u_long flags;
	register_t s;

	PMAP_STATS_INC(tlb_nrange_demap);
	cookie = ipi_tlb_range_demap(pm, start, end);
	s = intr_disable();
	if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) {
		KASSERT(pm->pm_context[curcpu] != -1,
		    ("tlb_range_demap: inactive pmap?"));
		if (pm == kernel_pmap)
			flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE;
		else
			flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE;

		for (va = start; va < end; va += PAGE_SIZE) {
			stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0);
			stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0);
			flush(KERNBASE);
		}
	}
	intr_restore(s);
	ipi_wait(cookie);
}
コード例 #6
0
ファイル: ipifuncs.c プロジェクト: ryo/netbsd-src
/*
 * Send an interprocessor interrupt - sun4u.
 */
void
sparc64_send_ipi_sun4u(int upaid, ipifunc_t func, uint64_t arg1, uint64_t arg2)
{
	int i, ik, shift = 0;
	uint64_t intr_func;

	KASSERT(upaid != curcpu()->ci_cpuid);

	/*
	 * UltraSPARC-IIIi CPUs select the BUSY/NACK pair based on the
	 * lower two bits of the ITID.
	 */
	if (CPU_IS_USIIIi())
		shift = (upaid & 0x3) * 2;

	if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift))
		panic("recursive IPI?");

	intr_func = (uint64_t)(u_long)func;

	/* Schedule an interrupt. */
	for (i = 0; i < 10000; i++) {
		int s = intr_disable();

		stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_func);
		stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, arg1);
		stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, arg2);
		stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0);
		membar_Sync();
		/* Workaround for SpitFire erratum #54, from FreeBSD */
		if (CPU_IS_SPITFIRE()) {
			(void)ldxa(P_DCR_0, ASI_INTERRUPT_RECEIVE_DATA);
			membar_Sync();
		}

		for (ik = 0; ik < 1000000; ik++) {
			if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift))
				continue;
			else
				break;
		}
		intr_restore(s);

		if (ik == 1000000)
			break;

		if ((ldxa(0, ASI_IDSR) & (IDSR_NACK << shift)) == 0)
			return;
		/*
		 * Wait for a while with enabling interrupts to avoid
		 * deadlocks.  XXX - random value is better.
		 */
		DELAY(1);
	}

	if (panicstr == NULL)
		panic("cpu%d: ipi_send: couldn't send ipi to UPAID %u"
			" (tried %d times)", cpu_number(), upaid, i);
}
コード例 #7
0
ファイル: zeus.c プロジェクト: dcui/FreeBSD-9.3_kernel
/*
 * Flush all non-locked mappings from the TLBs.
 */
void
zeus_tlb_flush_nonlocked(void)
{

	stxa(TLB_DEMAP_ALL, ASI_DMMU_DEMAP, 0);
	stxa(TLB_DEMAP_ALL, ASI_IMMU_DEMAP, 0);
	flush(KERNBASE);
}
コード例 #8
0
ファイル: main.c プロジェクト: UnitedMarsupials/kame
static int
watch_virt_set_mask(vm_offset_t va, u_long mask)
{
	u_long lsucr;

	stxa(AA_DMMU_VWPR, ASI_DMMU, va & (((2UL << 41) - 1) << 3));
	lsucr = ldxa(0, ASI_LSU_CTL_REG);
	lsucr = ((lsucr | LSU_VW) & ~LSU_VM_MASK) |
	    (mask << LSU_VM_SHIFT);
	stxa(0, ASI_LSU_CTL_REG, lsucr);
	return (0);
}
コード例 #9
0
ファイル: main.c プロジェクト: UnitedMarsupials/kame
static int
watch_phys_set_mask(vm_offset_t pa, u_long mask)
{
	u_long lsucr;

	stxa(AA_DMMU_PWPR, ASI_DMMU, pa & (((2UL << 38) - 1) << 3));
	lsucr = ldxa(0, ASI_LSU_CTL_REG);
	lsucr = ((lsucr | LSU_PW) & ~LSU_PM_MASK) |
	    (mask << LSU_PM_SHIFT);
	stxa(0, ASI_LSU_CTL_REG, lsucr);
	return (0);
}
コード例 #10
0
ファイル: zeus.c プロジェクト: dcui/FreeBSD-9.3_kernel
/*
 * Enable level 1 caches.
 */
void
zeus_cache_enable(u_int cpu_impl)
{
	u_long lsu;

	lsu = ldxa(0, ASI_LSU_CTL_REG);
	stxa(0, ASI_LSU_CTL_REG, lsu | LSU_IC | LSU_DC);
	flush(KERNBASE);
}
コード例 #11
0
ファイル: ipifuncs.c プロジェクト: ryo/netbsd-src
/*
 * Send an interprocessor interrupt - sun4v.
 */
void
sparc64_send_ipi_sun4v(int cpuid, ipifunc_t func, uint64_t arg1, uint64_t arg2)
{
	struct cpu_info *ci = curcpu();
	int err, i;
	
	stha(ci->ci_cpuset, ASI_PHYS_CACHED, cpuid);
	stxa(ci->ci_mondo, ASI_PHYS_CACHED, (vaddr_t)func);
	stxa(ci->ci_mondo + 8, ASI_PHYS_CACHED, arg1);
	stxa(ci->ci_mondo + 16, ASI_PHYS_CACHED, arg2);
	
	for (i = 0; i < SPARC64_IPI_RETRIES; i++) {
		err = hv_cpu_mondo_send(1, ci->ci_cpuset, ci->ci_mondo);
		if (err != H_EWOULDBLOCK)
			break;
		delay(10);
	}
	if (err != H_EOK)
		panic("Unable to send mondo %lx to cpu %d: %d",
		    (long unsigned int)func, cpuid, err);
}
コード例 #12
0
ファイル: zeus.c プロジェクト: dcui/FreeBSD-9.3_kernel
/*
 * CPU-specific initialization for Fujitsu Zeus CPUs
 */
void
zeus_init(u_int cpu_impl)
{
	u_long val;

	/* Ensure the TSB Extension Registers hold 0 as TSB_Base. */

	stxa(AA_DMMU_TSB_PEXT_REG, ASI_DMMU, 0);
	stxa(AA_IMMU_TSB_PEXT_REG, ASI_IMMU, 0);
	membar(Sync);

	stxa(AA_DMMU_TSB_SEXT_REG, ASI_DMMU, 0);
	/*
	 * NB: the secondary context was removed from the iMMU.
	 */
	membar(Sync);

	stxa(AA_DMMU_TSB_NEXT_REG, ASI_DMMU, 0);
	stxa(AA_IMMU_TSB_NEXT_REG, ASI_IMMU, 0);
	membar(Sync);

	val = ldxa(AA_MCNTL, ASI_MCNTL);
	/* Ensure MCNTL_JPS1_TSBP is 0. */
	val &= ~MCNTL_JPS1_TSBP;
	/*
	 * Ensure 4-Mbyte page entries are stored in the 1024-entry, 2-way set
	 * associative TLB.
	 */
	val = (val & ~MCNTL_RMD_MASK) | MCNTL_RMD_1024;
	stxa(AA_MCNTL, ASI_MCNTL, val);
}
コード例 #13
0
/*
 * Enable level 1 caches.
 */
void
cheetah_cache_enable(u_int cpu_impl)
{
	u_long lsu;

	lsu = ldxa(0, ASI_LSU_CTL_REG);
	if (cpu_impl == CPU_IMPL_ULTRASPARCIII) {
		/* Disable P$ due to US-III erratum #18. */
		lsu &= ~LSU_PE;
	}
	stxa(0, ASI_LSU_CTL_REG, lsu | LSU_IC | LSU_DC);
	flush(KERNBASE);
}
コード例 #14
0
/*
 * CPU-specific initialization - this is used for both the Sun Cheetah and
 * later as well as the Fujitsu Zeus and later CPUs.
 */
void
cheetah_init(u_int cpu_impl)
{
	u_long val;

	/* Ensure the TSB Extension Registers hold 0 as TSB_Base. */

	stxa(AA_DMMU_TSB_PEXT_REG, ASI_DMMU, 0);
	stxa(AA_IMMU_TSB_PEXT_REG, ASI_IMMU, 0);
	membar(Sync);

	stxa(AA_DMMU_TSB_SEXT_REG, ASI_DMMU, 0);
	/*
	 * NB: the secondary context was removed from the iMMU.
	 */
	membar(Sync);

	stxa(AA_DMMU_TSB_NEXT_REG, ASI_DMMU, 0);
	stxa(AA_IMMU_TSB_NEXT_REG, ASI_IMMU, 0);
	membar(Sync);

	if (cpu_impl == CPU_IMPL_SPARC64V) {
		/* Ensure MCNTL_JPS1_TSBP is 0. */
		val = ldxa(AA_MCNTL, ASI_MCNTL);
		val &= ~MCNTL_JPS1_TSBP;
		stxa(AA_MCNTL, ASI_MCNTL, val);
		return;
	}

	/*
	 * Configure the first large dTLB to hold 4MB pages (e.g. for direct
	 * mappings) for all three contexts and ensure the second one is set
	 * up to hold 8k pages for them.  Note that this is constraint by
	 * US-IV+, whose large dTLBs can only hold entries of certain page
	 * sizes each.
	 * For US-IV+, additionally ensure that the large iTLB is set up to
	 * hold 8k pages for nucleus and primary context (still no secondary
	 * iMMU context.
	 * NB: according to documentation, changing the page size of the same
	 * context requires a context demap before changing the corresponding
	 * page size, but we hardly can flush our locked pages here, so we use
	 * a demap all instead.
	 */
	stxa(TLB_DEMAP_ALL, ASI_DMMU_DEMAP, 0);
	membar(Sync);
	val = (TS_4M << TLB_PCXR_N_PGSZ0_SHIFT) |
	    (TS_8K << TLB_PCXR_N_PGSZ1_SHIFT) |
	    (TS_4M << TLB_PCXR_P_PGSZ0_SHIFT) |
	    (TS_8K << TLB_PCXR_P_PGSZ1_SHIFT);
	if (cpu_impl == CPU_IMPL_ULTRASPARCIVp)
		val |= (TS_8K << TLB_PCXR_N_PGSZ_I_SHIFT) |
		    (TS_8K << TLB_PCXR_P_PGSZ_I_SHIFT);
	stxa(AA_DMMU_PCXR, ASI_DMMU, val);
	val = (TS_4M << TLB_SCXR_S_PGSZ0_SHIFT) |
	    (TS_8K << TLB_SCXR_S_PGSZ1_SHIFT);
	stxa(AA_DMMU_SCXR, ASI_DMMU, val);
	flush(KERNBASE);

	/*
	 * Ensure DCR_IFPOE is disabled as long as we haven't implemented
	 * support for it (if ever) as most if not all firmware versions
	 * apparently turn it on.  Not making use of DCR_IFPOE should also
	 * avoid Cheetah erratum #109.
	 */
	val = rd(asr18) & ~DCR_IFPOE;
	if (cpu_impl == CPU_IMPL_ULTRASPARCIVp) {
		/*
		 * Ensure the branch prediction mode is set to PC indexing
		 * in order to work around US-IV+ erratum #2.
		 */
		val = (val & ~DCR_BPM_MASK) | DCR_BPM_PC;
		/*
		 * XXX disable dTLB parity error reporting as otherwise we
		 * get seemingly false positives when copying in the user
		 * window by simulating a fill trap on return to usermode in
		 * case single issue is disabled, which thus appears to be
		 * a CPU bug.
		 */
		val &= ~DCR_DTPE;
	}
	wr(asr18, val, 0);
}