int sh64_put_wired_dtlb_entry(unsigned long long entry) { __flush_tlb_slot(entry); /* * We don't do any particularly useful tracking of wired entries, * so this approach works like a stack .. last one to be allocated * has to be the first one to be freed. * * We could potentially load wired entries into a list and work on * rebalancing the list periodically (which also entails moving the * contents of a TLB entry) .. though I have a feeling that this is * more trouble than it's worth. */ /* * Entry must be valid .. we don't want any ITLB addresses! */ if (entry <= DTLB_FIXED) return -EINVAL; /* * Next, check if we're within range to be freed. (ie, must be the * entry beneath the first 'free' entry! */ if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) return -EINVAL; /* If we are, then bring this entry back into the list */ cpu_data->dtlb.first -= cpu_data->dtlb.step; cpu_data->dtlb.next = entry; return 0; }
int sh64_put_wired_dtlb_entry(unsigned long long entry) { __flush_tlb_slot(entry); if (entry <= DTLB_FIXED) return -EINVAL; if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) return -EINVAL; cpu_data->dtlb.first -= cpu_data->dtlb.step; cpu_data->dtlb.next = entry; return 0; }
/* Callable from fault.c, so not static */ inline void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte) { unsigned long long ptel; unsigned long long pteh=0; struct tlb_info *tlbp; unsigned long long next; /* Get PTEL first */ ptel = pte_val(*pte); /* * Set PTEH register */ pteh = address & MMU_VPN_MASK; /* Sign extend based on neff. */ #if (NEFF == 32) /* Faster sign extension */ pteh = (unsigned long long)(signed long long)(signed long)pteh; #else /* General case */ pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; #endif /* Set the ASID. */ pteh |= get_asid() << PTEH_ASID_SHIFT; pteh |= PTEH_VALID; /* Set PTEL register, set_pte has performed the sign extension */ ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); next = tlbp->next; __flush_tlb_slot(next); asm volatile ("putcfg %0,1,%2\n\n\t" "putcfg %0,0,%1\n" : : "r" (next), "r" (pteh), "r" (ptel) ); next += TLB_STEP; if (next > tlbp->last) next = tlbp->first; tlbp->next = next; }