void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned long cpu; cpu = get_cpu(); if (!cpu_isset(cpu, flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) { if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) { if (flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); } else leave_mm(cpu); } ack_APIC_irq(); smp_mb__before_clear_bit(); cpu_clear(cpu, flush_cpumask); smp_mb__after_clear_bit(); out: put_cpu_no_resched(); inc_irq_stat(irq_tlb_count); }
void paravirt_leave_lazy(enum paravirt_lazy_mode mode) { BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode); BUG_ON(preemptible()); x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); }
static inline void enter_lazy(enum paravirt_lazy_mode mode) { BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); BUG_ON(preemptible()); x86_write_percpu(paravirt_lazy_mode, mode); }
static void do_flush_tlb_all(void *info) { unsigned long cpu = smp_processor_id(); __flush_tlb_all(); if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY) leave_mm(cpu); }
/* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. * * We need to reload %cr3 since the page tables may be going * away from under us.. */ void leave_mm(int cpu) { BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK); cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask); load_cr3(swapper_pg_dir); }
enum paravirt_lazy_mode paravirt_get_lazy_mode(void) { return x86_read_percpu(paravirt_lazy_mode); }