void mipsNN_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size) { struct mips_cache_info * const mci = &mips_cache_info; const vaddr_t way_size = mci->mci_pdcache_way_size; const vaddr_t way_mask = way_size - 1; const u_int ways = mci->mci_pdcache_ways; vaddr_t eva; va &= way_mask; eva = round_line32(va + size); va = trunc_line32(va); /* * If we are going to flush more than is in a way, we are flushing * everything. */ if (eva - va >= way_size) { mipsNN_pdcache_wbinv_all_32(); return; } /* * Invalidate each way. If the address range wraps past the end of * the way, we will be invalidating in two ways but eventually things * work out since the last way will wrap into the first way. */ for (u_int way = 0; way < ways; way++) { mipsNN_pdcache_wbinv_range_index_32_intern(va, eva); va += way_size; eva += way_size; } }
void mipsNN_icache_sync_range_index_32(vaddr_t va, vsize_t size) { struct mips_cache_info * const mci = &mips_cache_info; vaddr_t eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask); eva = round_line32(va + size); va = trunc_line32(va); /* * If we are going to flush more than is in a way, we are flushing * everything. */ if (eva - va >= mci->mci_picache_way_size) { mipsNN_icache_sync_all_32(); return; } /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = picache_stride; loopcount = picache_loopcount; mips_intern_dcache_wbinv_range_index(va, (eva - va)); while ((eva - va) >= (8 * 32)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) { cache_r4k_op_8lines_32(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); } va += 8 * 32; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) { cache_op_r4k_line(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); } va += 32; } }
void mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size) { unsigned int eva, tmpva; int i, stride, loopcount; /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask); eva = round_line32(va + size); va = trunc_line32(va); /* * GCC generates better code in the loops if we reference local * copies of these global variables. */ stride = picache_stride; loopcount = picache_loopcount; mips_intern_dcache_wbinv_range_index(va, (eva - va)); while ((eva - va) >= (8 * 32)) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_r4k_op_8lines_32(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 8 * 32; } while (va < eva) { tmpva = va; for (i = 0; i < loopcount; i++, tmpva += stride) cache_op_r4k_line(tmpva, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 32; } }
void mipsNN_pdcache_wb_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB); va += 32; } SYNC; }
void mipsNN_pdcache_inv_range_32(vaddr_t va, vsize_t size) { vaddr_t eva; eva = round_line32(va + size); va = trunc_line32(va); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); va += 32; } SYNC; }
void mipsNN_icache_sync_range_32(vm_offset_t va, vm_size_t size) { vm_offset_t eva; eva = round_line32(va + size); va = trunc_line32(va); mips_intern_dcache_wb_range(va, (eva - va)); while ((eva - va) >= (32 * 32)) { cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += (32 * 32); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); va += 32; } SYNC; }