void smp_flush_page_to_ram(unsigned long page) { #if 1 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); #endif local_flush_page_to_ram(page); }
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { if(mm->cpu_vm_mask == (1 << smp_processor_id())) local_flush_sig_insns(mm, insn_addr); else xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); }
void smp_flush_cache_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { if(mm->cpu_vm_mask != (1 << smp_processor_id())) xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); local_flush_cache_mm(mm); } }
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { cpumask_t cpu_mask = mm->cpu_vm_mask; cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); local_flush_sig_insns(mm, insn_addr); }
void arch_send_call_function_ipi_mask(const struct cpumask *mask) { int cpu; /* */ for_each_cpu(cpu, mask) BTFIXUP_CALL(smp_ipi_mask_one)(cpu); }
void smp_send_reschedule(int cpu) { /* * CPU model dependent way of implementing IPI generation targeting * a single CPU. The trap handler needs only to do trap entry/return * to call schedule. */ BTFIXUP_CALL(smp_ipi_resched)(cpu); }
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); local_flush_sig_insns(mm, insn_addr); }
void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { if(mm->context != NO_CONTEXT) { if(mm->cpu_vm_mask != (1 << smp_processor_id())) xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) mm, start, end); local_flush_tlb_range(mm, start, end); } }
void smp_send_reschedule(int cpu) { /* */ BTFIXUP_CALL(smp_ipi_resched)(cpu); }
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; if(mm->context != NO_CONTEXT) { if(mm->cpu_vm_mask != (1 << smp_processor_id())) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); local_flush_tlb_page(vma, page); } }
void smp_flush_cache_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { cpumask_t cpu_mask = mm->cpu_vm_mask; cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); local_flush_cache_mm(mm); } }
void smp_flush_cache_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); local_flush_cache_mm(mm); } }
void smp_flush_tlb_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { if(mm->cpu_vm_mask != (1 << smp_processor_id())) { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) mm->cpu_vm_mask = (1 << smp_processor_id()); } local_flush_tlb_mm(mm); } }
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; if(mm->context != NO_CONTEXT) { cpumask_t cpu_mask = mm->cpu_vm_mask; cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); local_flush_tlb_page(vma, page); } }
void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; if(mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); local_flush_cache_page(vma, page); } }
void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask = *mm_cpumask(mm); cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); local_flush_tlb_range(vma, start, end); } }
void smp_flush_page_to_ram(unsigned long page) { /* Current theory is that those who call this are the one's * who have just dirtied their cache with the pages contents * in kernel space, therefore we only run this on local cpu. * * XXX This experiment failed, research further... -DaveM */ #if 1 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); #endif local_flush_page_to_ram(page); }
void smp_flush_tlb_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { cpumask_t cpu_mask = mm->cpu_vm_mask; cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) { xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); } local_flush_tlb_mm(mm); } }
void smp_flush_tlb_all(void) { xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); local_flush_tlb_all(); }
void arch_send_call_function_single_ipi(int cpu) { /* */ BTFIXUP_CALL(smp_ipi_single)(cpu); }
void arch_send_call_function_single_ipi(int cpu) { /* trigger one IPI single call on one CPU */ BTFIXUP_CALL(smp_ipi_single)(cpu); }
void smp_flush_cache_all(void) { xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); }