Пример #1
0
void
r10k_icache_sync_range_index(vaddr_t va, vsize_t size)
{
	vaddr_t eva, orig_va;

	orig_va = va;

	eva = round_line(va + size);
	va = trunc_line(va);

	mips_dcache_wbinv_range_index(va, (eva - va));

	__asm volatile("sync");

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);

	eva = round_line(va + size);
	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va+0, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		cache_op_r4k_line(va+1, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 64;
	}
}
Пример #2
0
void
r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
{
	vaddr_t eva;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_sdcache_size - 1));

	eva = round_line(va + size);
	va = trunc_line(va);

	while ((eva - va) >= (32 * 128)) {
		cache_r4k_op_32lines_128(va,
		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
		va += (32 * 128);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
		va += 128;
	}
}
Пример #3
0
void
tx3920_icache_sync_range_16wt(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	tx3920_icache_do_inv_16(va, eva);
}
Пример #4
0
void
r10k_pdcache_inv_range(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
		va += 32;
	}
}
Пример #5
0
void
r10k_pdcache_wb_range(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while (va < eva) {
		/* R10000 does not support HitWriteBack operation */
		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
		va += 32;
	}
}
Пример #6
0
void
r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);
	int line_size = mips_cache_info.mci_sdcache_line_size;

	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
		va += line_size;
	}
}
Пример #7
0
void
r10k_sdcache_inv_range(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);
	int line_size = mips_sdcache_line_size;

	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
		va += line_size;
	}
}
Пример #8
0
void
tx3920_icache_sync_range_16wb(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	mips_dcache_wb_range(va, (eva - va));

	__asm volatile(".set push; .set mips2; sync; .set pop");

	tx3920_icache_do_inv_16(va, eva);
}
Пример #9
0
void
r10k_sdcache_inv_range(vaddr_t va, vsize_t size)
{
	const struct mips_cache_info * const mci = &mips_cache_info;
	vaddr_t eva = round_line(va + size);
	vsize_t line_size = mci->mci_sdcache_line_size;

	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
		va += line_size;
	}
}
Пример #10
0
void
r3k_pdcache_inv_range(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	if ((eva - va) >= mips_cache_info.mci_pdcache_size) {
		r3k_pdcache_wbinv_all();
		return;
	}

	r3k_pdcache_do_inv(va, eva);
}
Пример #11
0
void
tx3900_icache_sync_range_16(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	if ((eva - va) >= mips_cache_info.mci_picache_size) {
		/* Just hit the whole thing. */
		va = MIPS_PHYS_TO_KSEG0(0);
		eva = MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size);
	}

	tx3900_icache_do_inv_index_16(va, eva);
}
Пример #12
0
void
r10k_sdcache_wb_range(vaddr_t va, vsize_t size)
{
	const struct mips_cache_info * const mci = &mips_cache_info;
	vaddr_t eva = round_line(va + size);
	vsize_t line_size = mci->mci_sdcache_line_size;

	va = trunc_line(va);

	while (va < eva) {
		/* R10000 does not support HitWriteBack operation */
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
		va += line_size;
	}
}
Пример #13
0
void
r10k_icache_sync_range(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	mips_dcache_wb_range(va, (eva - va));

	__asm volatile("sync");

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += 64;
	}
}
Пример #14
0
void
r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while ((eva - va) >= (32 * 16)) {
		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
		va += (32 * 16);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
		va += 16;
	}
}
Пример #15
0
void
r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while ((eva - va) >= (32 * 32)) {
		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
		va += (32 * 32);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
		va += 32;
	}
}
Пример #16
0
void
r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while ((eva - va) >= (32 * 128)) {
		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
		va += (32 * 128);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
		va += 128;
	}
}
Пример #17
0
void
tx3920_pdcache_wb_range_16wb(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while ((eva - va) >= (32 * 16)) {
		cache_tx39_op_32lines_16(va,
		    CACHE_TX39_D|CACHEOP_TX3920_HIT_WB);
		va += (32 * 16);
	}

	while (va < eva) {
		cache_op_tx39_line(va, CACHE_TX39_D|CACHEOP_TX3920_HIT_WB);
		va += 16;
	}
}
Пример #18
0
void
tx3900_pdcache_inv_range_4(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	while ((eva - va) >= (32 * 4)) {
		cache_tx39_op_32lines_4(va,
		    CACHE_TX39_D|CACHEOP_TX3900_HIT_INV);
		va += (32 * 4);
	};

	while (va < eva) {
		cache_op_tx39_line(va, CACHE_TX39_D|CACHEOP_TX3900_HIT_INV);
		va += 4;
	}
}
Пример #19
0
void
r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	mips_dcache_wb_range(va, (eva - va));

	__asm volatile("sync");

	while ((eva - va) >= (32 * 16)) {
		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += (32 * 16);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
		va += 16;
	}
}
Пример #20
0
void
r10k_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
{
	vaddr_t eva;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & mips_pdcache_way_mask);

	eva = round_line(va + size);
	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va+0, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		cache_op_r4k_line(va+1, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		va += 32;
	}
}