void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && end - start < _TLB_ENTRIES << PAGE_SHIFT) { start &= PAGE_MASK; while (start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } } else { local_flush_tlb_all(); } }
void update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct page *page; if (!pfn_valid(pfn)) return; page = pfn_to_page(pfn); invalidate_itlb_mapping(addr); invalidate_dtlb_mapping(addr); #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); unsigned long paddr = (unsigned long) page_address(page); unsigned long phys = page_to_phys(page); __flush_invalidate_dcache_page(paddr); __flush_invalidate_dcache_page_alias(vaddr, phys); __invalidate_icache_page_alias(vaddr, phys); clear_bit(PG_arch_1, &page->flags); } #else if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags) && (vma->vm_flags & VM_EXEC) != 0) { unsigned long paddr = (unsigned long) page_address(page); __flush_dcache_page(paddr); __invalidate_icache_page(paddr); set_bit(PG_arch_1, &page->flags); } #endif }
void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) { struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if(mm->context == NO_CONTEXT) return; local_save_flags(flags); oldpid = get_rasid_register(); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); struct mm_struct *mm = vma->vm_mm; unsigned long flags; if (mm->context.asid[cpu] == NO_CONTEXT) return; #if 0 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context.asid[cpu], start, end); #endif local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); start &= PAGE_MASK; if (vma->vm_flags & VM_EXEC) while(start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } else while(start < end) { invalidate_dtlb_mapping(start); start += PAGE_SIZE; } set_rasid_register(oldpid); } else { local_flush_tlb_mm(mm); } local_irq_restore(flags); }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if (mm->context.asid[cpu] == NO_CONTEXT) return; local_irq_save(flags); oldpid = get_rasid_register(); set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); }