void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t * pte) { struct page *page; unsigned long flags; unsigned long pfn = pte_pfn(*pte); if (!pfn_valid(pfn)) return; if (vma->vm_mm == current->active_mm) { local_irq_save(flags); __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN); __nds32__tlbop_rwr(*pte); __nds32__isb(); local_irq_restore(flags); } page = pfn_to_page(pfn); if (test_and_clear_bit(PG_dcache_dirty, &page->flags) || (vma->vm_flags & VM_EXEC)) { local_irq_save(flags); cpu_dcache_wbinval_page((unsigned long)page_address(page)); local_irq_restore(flags); } }
static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa) { unsigned long kaddr, pte; #define BASE_ADDR1 0xffff8000 kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask); pte = (pa | PAGE_KERNEL); __nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN); __nds32__tlbop_rwlk(pte); __nds32__isb(); return kaddr; }
void _nds32_init_mem(void) //The function is weak (optional) { /* System without SDRAM. Use data local memory as system memory. */ extern char __data_start; register unsigned int dlmsize; dlmsize = DLM_SIZE;//0x1000 << ((__nds32__mfsr(NDS32_SR_DLMB) >> 1) & 0xf); #ifndef DLM_8K __nds32__mtsr(ILM_BASE|1, NDS32_SR_ILMB); __nds32__isb(); #endif /* Set DLM base to .data start address and enable it */ __nds32__mtsr(DLM_BASE|1, NDS32_SR_DLMB);//__nds32__mtsr((unsigned)&__data_start|1, NDS32_SR_DLMB); __nds32__dsb(); /* Update stack pointer to end of DLM * We suppose the .data + .bss + stack less then DLM size */ __nds32__set_current_sp((unsigned)&__data_start + dlmsize); }
static inline void kunmap01(unsigned long kaddr) { __nds32__tlbop_unlk(kaddr); __nds32__tlbop_inv(kaddr); __nds32__isb(); }