void r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size) { vaddr_t eva, orig_va; orig_va = va; eva = round_line(va + size); va = trunc_line(va); mips_dcache_wbinv_range_index(va, (eva - va)); __asm volatile("sync"); /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(orig_va & mips_cache_info.mci_picache_way_mask); eva = round_line(va + size); va = trunc_line(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 16; } }
void r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size) { vaddr_t eva; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_pdcache_size - 1)); eva = round_line(va + size); va = trunc_line(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += 16; } }
void r4k_pdcache_wbinv_all_16(void) { vaddr_t va = MIPS_PHYS_TO_KSEG0(0); vaddr_t eva = va + mips_cache_info.mci_pdcache_size; while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 16); } }
void r4k_icache_sync_all_16(void) { vaddr_t va = MIPS_PHYS_TO_KSEG0(0); vaddr_t eva = va + mips_cache_info.mci_picache_size; mips_dcache_wbinv_all(); __asm volatile("sync"); while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 16); } }
void r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size) { vaddr_t eva = round_line(va + size); va = trunc_line(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += 16; } }
void mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line16(va + size); va = trunc_line16(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += 16; } SYNC; }
void r4k_icache_sync_range_16(vaddr_t va, vsize_t size) { vaddr_t eva = round_line(va + size); va = trunc_line(va); mips_dcache_wb_range(va, (eva - va)); __asm volatile("sync"); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += 16; } }
void mipsNN_pdcache_wbinv_all_16(void) { vm_offset_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + pdcache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); va += (32 * 16); } SYNC; }
void mipsNN_icache_sync_range_16(vaddr_t va, vsize_t size) { vaddr_t eva; eva = round_line16(va + size); va = trunc_line16(va); mips_intern_dcache_wb_range(va, (eva - va)); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += 16; } SYNC; }
void mipsNN_icache_sync_all_16(void) { struct mips_cache_info * const mci = &mips_cache_info; vaddr_t va, eva; va = MIPS_PHYS_TO_KSEG0(0); eva = va + mci->mci_picache_size; /* * Since we're hitting the whole thing, we don't have to * worry about the N different "ways". */ mips_intern_dcache_wbinv_all(); while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 16); } SYNC; }