uval h_protect(struct cpu *pcop, uval flags, uval tlb_id) { union tlbe tlbe; uval tlbx = tlb_id; if (flags & H_EADDR) { if (-1 == (tlbx = EADDR_TO_TLBX(tlb_id))) return H_NOT_FOUND; } else if (CHECK_INDEX(tlb_id)) { return H_Parameter; } tlbre(tlbx, &tlbe.words.epnWord, &tlbe.words.rpnWord, &tlbe.words.attribWord); tlbe.bits.up = 0; tlbe.bits.up |= !!(flags & H_UX) << 2; tlbe.bits.up |= !!(flags & H_UW) << 1; tlbe.bits.up |= !!(flags & H_UR); tlbe.bits.sp = 0; tlbe.bits.sp |= !!(flags & H_SX) << 2; tlbe.bits.sp |= !!(flags & H_SW) << 1; tlbe.bits.sp |= !!(flags & H_SR); tlbwe(tlbx, tlbe.words.epnWord, tlbe.words.rpnWord, tlbe.words.attribWord); return H_Success; }
uval h_remove(struct cpu *pcop, uval flags, uval tlb_id) { uval tlbx = tlb_id; /* XXX * handle flags: * H_AVPN * H_ANDCOND */ /* remove all OS TLBEs */ if (flags & H_ALL) { for (tlbx = MIN_OS_TLBX; tlbx < pcop->tlb_lowest_bolted; ++tlbx) { if (pcop->utlb[tlbx].bits.v) { tlbwe(tlbx, 0, 0, 0); pcop->utlb[tlbx].words.epnWord = 0; } } return H_Success; } /* otherwise remove only 1 */ if (flags & H_EADDR) { if (-1 == (tlbx = EADDR_TO_TLBX(tlb_id))) return H_NOT_FOUND; } else if (CHECK_INDEX(tlb_id)) { return H_Parameter; } /* load up gpr3-5 with the old TLBE's words */ tlbre(tlbx, &pcop->reg_gprs[4], &pcop->reg_gprs[5], &pcop->reg_gprs[6]); /* now invalidate that TLBE. ignore MMUCR:STID, since V=0 */ tlbwe(tlbx, 0, 0, 0); pcop->utlb[tlbx].words.epnWord = 0; return H_Success; }
void MM_TlbSetup( const struct TlbEntry *tblTable ) { int32_t i = 0; /* Setup the TLBs */ while( tblTable[i].entry != (-1UL) ) { set_spr(SPR_MAS0, tblTable[i].mas0); set_spr(SPR_MAS1, tblTable[i].mas1); set_spr(SPR_MAS2, tblTable[i].mas2); set_spr(SPR_MAS3, tblTable[i].mas3); msync(); isync(); tlbwe(); i++; } }
uval h_enter(struct cpu *pcop, uval flags, uval tlb_id, uval epnWord, uval rpnWord, uval attribWord) { union tlbe localTlbe; uval tlbx = tlb_id; if (flags & H_BOLTED) { tlbx = -1; if (flags & H_EADDR) { tlbx = EADDR_TO_TLBX(tlb_id); if (tlbx != -1 && tlbx < pcop->tlb_lowest_bolted) { /* We're adding a bolted TLBE, replacing a previous non-bolted * one. Invalidate the old non-bolted entry. */ tlbwe(tlbx, 0, 0, 0); tlbx = -1; } } if (tlbx == -1) { tlbx = --pcop->tlb_lowest_bolted; assert(pcop->tlb_lowest_bolted > 4, "too many bolted TLBEs!"); } } else if (flags & H_EADDR) { if (-1 == (tlbx = EADDR_TO_TLBX(tlb_id))) { /* select new index with wraparound */ tlbx = pcop->tlb_last_used + 1; if (tlbx >= pcop->tlb_lowest_bolted) { tlbx = MIN_OS_TLBX; } pcop->tlb_last_used = tlbx; } } else if (CHECK_INDEX(tlb_id)) { return H_Parameter; } localTlbe.words.epnWord = epnWord; localTlbe.words.rpnWord = rpnWord; localTlbe.words.attribWord = attribWord; /* translate OS's Real to hypervisor's Logical */ localTlbe.bits.rpn = RPN_R2L(pcop, localTlbe.bits.rpn); /* XXX * validate RPN (including page size) * validate attribute bits (IO vs memory) * clear reserved bits * handle flags: * H_ZERO_PAGE * H_ICACHE_INVALIDATE * H_ICACHE_SYNCHRONIZE * H_EXACT * H_LARGE_PAGE */ /* Record the TID so we can get later context switches right. */ localTlbe.bits.tid = get_mmucr() & MMUCR_STID_MASK; /* store TLBE in struct cpu's TLB mirror */ pcop->utlb[tlbx] = localTlbe; /* enter TLBE into the UTLB */ tlbwe(tlbx, localTlbe.words.epnWord, localTlbe.words.rpnWord, localTlbe.words.attribWord); pcop->reg_gprs[4] = tlbx; return H_Success; }