color_t color_fix(color_t col) { col.r = fix_range(col.r, 0, 255); col.g = fix_range(col.g, 0, 255); col.b = fix_range(col.b, 0, 255); return col; }
void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if(vma->vm_mm == NULL) flush_tlb_kernel_range_common(start, end); else fix_range(vma->vm_mm, start, end, 0); }
void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if(vma->vm_mm != current->mm) return; /* Assumes that the range start ... end is entirely within * either process memory or kernel vm */ if((start >= start_vm) && (start < end_vm)) flush_kernel_vm_range(start, end, 1); else fix_range(vma->vm_mm, start, end, 0); }
void flush_tlb_mm_skas(struct mm_struct *mm) { unsigned long end; /* Don't bother flushing if this address space is about to be * destroyed. */ if(atomic_read(&mm->mm_users) == 0) return; end = proc_mm ? task_size : CONFIG_STUB_START; fix_range(mm, 0, end, 0); }
void flush_tlb_mm_tt(struct mm_struct *mm) { unsigned long seq; if(mm != current->mm) return; fix_range(mm, 0, STACK_TOP, 0); seq = atomic_read(&vmchange_seq); if(current->thread.mode.tt.vm_seq == seq) return; current->thread.mode.tt.vm_seq = seq; flush_kernel_vm_range(start_vm, end_vm, 0); }
void force_flush_all_skas(void) { unsigned long end = proc_mm ? task_size : CONFIG_STUB_START; fix_range(current->mm, 0, end, 1); }
void force_flush_all_skas(void) { fix_range(current->mm, 0, host_task_size, 1); }
void flush_tlb_mm_skas(struct mm_struct *mm) { flush_tlb_kernel_vm_skas(); fix_range(mm, 0, host_task_size, 0); }
void force_flush_all_tt(void) { fix_range(current->mm, 0, STACK_TOP, 1); flush_tlb_kernel_range_common(start_vm, end_vm); }
void force_flush_all_tt(void) { fix_range(current->mm, 0, STACK_TOP, 1); flush_kernel_vm_range(start_vm, end_vm, 0); }