Ejemplo n.º 1
0
void
mipsNN_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
{
	struct mips_cache_info * const mci = &mips_cache_info;
	const vaddr_t way_size = mci->mci_pdcache_way_size;
	const vaddr_t way_mask = way_size - 1;
	const u_int ways = mci->mci_pdcache_ways;
	vaddr_t eva;

	va &= way_mask;
	eva = round_line16(va + size);
	va = trunc_line16(va);

	/*
	 * If we are going to flush more than is in a way, we are flushing
	 * everything.
	 */
	if (eva - va >= way_size) {
		mipsNN_pdcache_wbinv_all_16();
		return;
	}

	/*
	 * Invalidate each way.  If the address range wraps past the end of
	 * the way, we will be invalidating in two ways but eventually things
	 * work out since the last way will wrap into the first way.
	 */
	for (u_int way = 0; way < ways; way++) {
		mipsNN_pdcache_wbinv_range_index_16_intern(va, eva);
		va += way_size;
		eva += way_size;
	}
}
Ejemplo n.º 2
0
void
mipsNN_icache_sync_range_index_16(vaddr_t va, vsize_t size)
{
	struct mips_cache_info * const mci = &mips_cache_info;
	vaddr_t eva, tmpva;
	int i, stride, loopcount;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask);

	eva = round_line16(va + size);
	va = trunc_line16(va);

	/*
	 * If we are going to flush more than is in a way, we are flushing
	 * everything.
	 */
	if (eva - va >= mci->mci_picache_way_size) {
		mipsNN_icache_sync_all_16();
		return;
	}

	/*
	 * GCC generates better code in the loops if we reference local
	 * copies of these global variables.
	 */
	stride = picache_stride;
	loopcount = picache_loopcount;

	mips_intern_dcache_wbinv_range_index(va, (eva - va));

	while ((eva - va) >= (8 * 16)) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride) {
			cache_r4k_op_8lines_16(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		}
		va += 8 * 16;
	}

	while (va < eva) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride) {
			cache_op_r4k_line(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		}
		va += 16;
	}
}
Ejemplo n.º 3
0
void
mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size)
{
	unsigned int eva, tmpva;
	int i, stride, loopcount;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);

	eva = round_line16(va + size);
	va = trunc_line16(va);

	/*
	 * GCC generates better code in the loops if we reference local
	 * copies of these global variables.
	 */
	stride = picache_stride;
	loopcount = picache_loopcount;

	mips_intern_dcache_wbinv_range_index(va, (eva - va));

	while ((eva - va) >= (8 * 16)) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride)
			cache_r4k_op_8lines_16(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 8 * 16;
	}

	while (va < eva) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride)
			cache_op_r4k_line(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 16;
	}
}
Ejemplo n.º 4
0
void
mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size)
{
	vm_offset_t eva;

	eva = round_line16(va + size);
	va = trunc_line16(va);

	while ((eva - va) >= (32 * 16)) {
		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
		va += (32 * 16);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
		va += 16;
	}

	SYNC;
}
Ejemplo n.º 5
0
void
mipsNN_icache_sync_range_16(vaddr_t va, vsize_t size)
{
	vaddr_t eva;

	eva = round_line16(va + size);
	va = trunc_line16(va);

	mips_intern_dcache_wb_range(va, (eva - va));

	while ((eva - va) >= (32 * 16)) {
		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += (32 * 16);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += 16;
	}

	SYNC;
}