void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); }
void flush_tlb_mm(struct mm_struct *mm) { if (tlb_ops_need_broadcast()) on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); else local_flush_tlb_mm(mm); }
void flush_tlb_mm(struct mm_struct *mm) { if (tlb_ops_need_broadcast()) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); broadcast_tlb_mm_a15_erratum(mm); }
void flush_tlb_kernel_page(unsigned long kaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = uaddr; on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); } else local_flush_tlb_page(vma, uaddr); }
void flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = start; ta.ta_end = end; on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); }
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = start; ta.ta_end = end; on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); } else local_flush_tlb_range(vma, start, end); }
void flush_tlb_all(void) { if (IS_ENABLED(CONFIG_L4)) { l4x_unmap_sync_all(); return; } if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else local_flush_tlb_all(); broadcast_tlb_a15_erratum(); }
void flush_tlb_mm(struct mm_struct *mm) { if (IS_ENABLED(CONFIG_L4)) { l4x_unmap_sync_mm(mm); l4x_del_task(mm); return; } if (tlb_ops_need_broadcast()) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else local_flush_tlb_mm(mm); broadcast_tlb_mm_a15_erratum(mm); }
void flush_tlb_kernel_page(unsigned long kaddr) { if (IS_ENABLED(CONFIG_L4)) { l4x_unmap_sync_all(); return; } if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); broadcast_tlb_a15_erratum(); }
/** 20131026 * CONFIG_SMP일 경우 * start ~ end 사이의 커널 주소 공간에 대해 flush tlb 를 수행 **/ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { /** 20131026 * tlb operation이 broadcast되어야 하는 경우 * (operation이 local structures에만 반영되는 경우) **/ if (tlb_ops_need_broadcast()) { /** 20131026 * tlb_args 구조체를 채운다. * kernel range이므로 vm_area_struct는 채우지 않는다. **/ struct tlb_args ta; ta.ta_start = start; ta.ta_end = end; on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { if (IS_ENABLED(CONFIG_L4)) { l4x_unmap_sync_mm(vma->vm_mm); l4x_unmap_page(vma->vm_mm, uaddr); return; } if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = uaddr; on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); } else local_flush_tlb_page(vma, uaddr); broadcast_tlb_mm_a15_erratum(vma->vm_mm); }