/* * Flush the mmu and reset associated register to default values. */ void init_mmu(void) { #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) /* * Writing zeros to the instruction and data TLBCFG special * registers ensure that valid values exist in the register. * * For existing PGSZID<w> fields, zero selects the first element * of the page-size array. For nonexistent PGSZID<w> fields, * zero is the best value to write. Also, when changing PGSZID<w> * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); #endif init_kio(); local_flush_tlb_all(); /* Set rasid register to a known value. */ set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); /* Set PTEVADDR special register to the start of the page * table, which is in kernel mappable space (ie. not * statically mapped). This register's value is undefined on * reset. */ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); struct mm_struct *mm = vma->vm_mm; unsigned long flags; if (mm->context.asid[cpu] == NO_CONTEXT) return; #if 0 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context.asid[cpu], start, end); #endif local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); start &= PAGE_MASK; if (vma->vm_flags & VM_EXEC) while(start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } else while(start < end) { invalidate_dtlb_mapping(start); start += PAGE_SIZE; } set_rasid_register(oldpid); } else { local_flush_tlb_mm(mm); } local_irq_restore(flags); }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if (mm->context.asid[cpu] == NO_CONTEXT) return; local_irq_save(flags); oldpid = get_rasid_register(); set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); }
void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) { struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if(mm->context == NO_CONTEXT) return; local_save_flags(flags); oldpid = get_rasid_register(); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); }
/* * Flush the mmu and reset associated register to default values. */ void __init init_mmu(void) { /* Writing zeros to the <t>TLBCFG special registers ensure * that valid values exist in the register. For existing * PGSZID<w> fields, zero selects the first element of the * page-size array. For nonexistent PGSZID<w> fields, zero is * the best value to write. Also, when changing PGSZID<w> * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); flush_tlb_all(); /* Set rasid register to a known value. */ set_rasid_register(ASID_USER_FIRST); /* Set PTEVADDR special register to the start of the page * table, which is in kernel mappable space (ie. not * statically mapped). This register's value is undefined on * reset. */ set_ptevaddr_register(PGTABLE_START); }