/* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ static inline void leave_mm(unsigned long cpu) { if (read_pda(mmu_state) == TLBSTATE_OK) BUG(); cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); load_cr3(init_mm.pgd); }
/* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ void leave_mm(int cpu) { if (read_pda(mmu_state) == TLBSTATE_OK) BUG(); cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); load_cr3(swapper_pg_dir); }
/* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ static inline void leave_mm (unsigned long cpu) { if (read_pda(mmu_state) == TLBSTATE_OK) BUG(); clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask); __flush_tlb(); }
asmlinkage void smp_invalidate_interrupt (void) { unsigned long cpu; cpu = get_cpu(); if (!cpu_isset(cpu, flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (flush_mm == read_pda(active_mm)) { if (read_pda(mmu_state) == TLBSTATE_OK) { if (flush_va == FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); } else leave_mm(cpu); } ack_APIC_irq(); cpu_clear(cpu, flush_cpumask); out: put_cpu_no_resched(); }
/* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. */ static inline void leave_mm (unsigned long cpu) { if (read_pda(mmu_state) == TLBSTATE_OK) BUG(); clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask); load_cr3(swapper_pg_dir); }
static void do_flush_tlb_all(void *info) { unsigned long cpu = smp_processor_id(); __flush_tlb_all(); if (read_pda(mmu_state) == TLBSTATE_LAZY) leave_mm(cpu); }
void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) { int sum; int touched = 0; sum = read_pda(apic_timer_irqs); if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; } #ifdef CONFIG_X86_MCE /* Could check oops_in_progress here too, but it's safer not too */ if (atomic_read(&mce_entry) > 0) touched = 1; #endif if (!touched && __get_cpu_var(last_irq_sum) == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ local_inc(&__get_cpu_var(alert_counter)); if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) { if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { local_set(&__get_cpu_var(alert_counter), 0); return; } die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs); } } else { __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } if (nmi_perfctr_msr) { if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) { /* * P4 quirks: * - An overflown perfctr will assert its interrupt * until the OVF flag in its CCCR is cleared. * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { /* * For Intel based architectural perfmon * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ apic_write(APIC_LVTPC, APIC_DM_NMI); } wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); } }
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) { int cpu; int sender; union smp_flush_state *f; cpu = smp_processor_id(); /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &per_cpu(flush_state, sender); if (!cpu_isset(cpu, f->flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == read_pda(active_mm)) { if (read_pda(mmu_state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); cpu_clear(cpu, f->flush_cpumask); add_pda(irq_tlb_count, 1); }
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) { int sum, cpu; cpu = safe_smp_processor_id(); sum = read_pda(apic_timer_irqs); if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_BAD) { alert_counter[cpu] = 0; return; } spin_lock(&nmi_print_lock); /* * We are in trouble anyway, lets at least try * to get a message out. */ bust_spinlocks(1); printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu); show_registers(regs); if (panic_on_timeout || panic_on_oops) panic("nmi watchdog"); printk("console shuts up ...\n"); console_silent(); spin_unlock(&nmi_print_lock); bust_spinlocks(0); do_exit(SIGSEGV); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); }
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) { int sum, cpu; cpu = safe_smp_processor_id(); sum = read_pda(apic_timer_irqs); if (nmi_show_regs[cpu]) { nmi_show_regs[cpu] = 0; spin_lock(&nmi_print_lock); show_regs(regs); spin_unlock(&nmi_print_lock); } if (last_irq_sums[cpu] == sum) { /* * Ayiee, looks like this CPU is stuck ... * wait a few IRQs (5 seconds) before doing the oops ... */ alert_counter[cpu]++; if (alert_counter[cpu] == 5*nmi_hz) { int i; for (i = 0; i < NR_CPUS; i++) nmi_show_regs[i] = 1; } if (alert_counter[cpu] == 5*nmi_hz) { if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) { alert_counter[cpu] = 0; return; } die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs); } } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } if (nmi_perfctr_msr) wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1); }