void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if(vma->vm_mm == NULL) flush_tlb_kernel_range_common(start, end); else fix_range(vma->vm_mm, start, end, 0); }
void flush_tlb_mm_tt(struct mm_struct *mm) { unsigned long seq; if(mm != current->mm) return; fix_range(mm, 0, STACK_TOP, 0); seq = atomic_read(&vmchange_seq); if(current->thread.mode.tt.vm_seq == seq) return; current->thread.mode.tt.vm_seq = seq; flush_tlb_kernel_range_common(start_vm, end_vm); }
void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if(vma->vm_mm != current->mm) return; /* Assumes that the range start ... end is entirely within * either process memory or kernel vm */ if((start >= start_vm) && (start < end_vm)){ if(flush_tlb_kernel_range_common(start, end)) atomic_inc(&vmchange_seq); } else fix_range(vma->vm_mm, start, end, 0); }
void __flush_tlb_one_skas(unsigned long addr) { flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE); }
void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end) { if(flush_tlb_kernel_range_common(start, end)) atomic_inc(&vmchange_seq); }
void force_flush_all_tt(void) { fix_range(current->mm, 0, STACK_TOP, 1); flush_tlb_kernel_range_common(start_vm, end_vm); }