void rthal_nmi_arm(unsigned long delay) { rthal_nmi_wd_t *wd = &rthal_nmi_wds[rthal_processor_id()]; if (!wd->perfctr_msr) return; /* If linux watchdog could tick now, make it tick now. */ if ((long long) (rthal_rdtsc() - wd->next_linux_check) >= 0) { unsigned long flags; /* Protect from an interrupt handler calling rthal_nmi_arm. */ rthal_local_irq_save(flags); wd->armed = 0; wmb(); wrmsrl(wd->perfctr_msr, -1); asm("nop"); rthal_local_irq_restore(flags); } wd->tick_date = rthal_rdtsc() + (delay - rthal_maxlat_tsc); wmb(); wrmsrl(wd->perfctr_msr, 0 - delay); wmb(); wd->armed = 1; }
unsigned long rthal_timer_calibrate(void) { unsigned long v, flags; rthal_time_t t, dt; int i; v = RTHAL_COMPAT_TIMERFREQ / HZ; flags = rthal_critical_enter(NULL); rthal_timer_program_shot(v); t = rthal_rdtsc(); for (i = 0; i < 100; i++) rthal_timer_program_shot(v); dt = (rthal_rdtsc() - t); rthal_critical_exit(flags); #ifdef CONFIG_IPIPE_TRACE_IRQSOFF /* Reset the max trace, since it contains the calibration time now. */ rthal_trace_max_reset(); #endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ return rthal_ulldiv(dt, i + 5, NULL); }
static int rthal_nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) #endif /* Linux >= 2.6.19 */ { int cpu = rthal_processor_id(); rthal_nmi_wd_t *wd = &rthal_nmi_wds[cpu]; unsigned long long now; if (wd->armed) { if (rthal_rdtsc() - wd->tick_date < rthal_maxlat_tsc) { ++wd->early_shots; wd->next_linux_check = wd->tick_date + rthal_maxlat_tsc; } else { printk("NMI early shots: %d\n", wd->early_shots); rthal_nmi_emergency(regs); } } now = rthal_rdtsc(); if ((long long)(now - wd->next_linux_check) >= 0) { CALL_LINUX_NMI; do { wd->next_linux_check += RTHAL_CPU_FREQ; } while ((long long)(now - wd->next_linux_check) >= 0); } if (wd->perfctr_msr == MSR_P4_IQ_COUNTER0) { /* * P4 quirks: * - An overflown perfctr will assert its interrupt * until the OVF flag in its CCCR is cleared. * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ wrmsr(MSR_P4_IQ_CCCR0, wd->p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if (rthal_nmi_perfctr_msr == MSR_P6_PERFCTR0) { /* Only P6 based Pentium M need to re-unmask * the apic vector but it doesn't hurt * other P6 variant */ apic_write(APIC_LVTPC, APIC_DM_NMI); } wrmsrl(wd->perfctr_msr, now - wd->next_linux_check); NMI_RETURN; }
unsigned long rthal_timer_calibrate(void) { unsigned long flags; rthal_time_t t, dt; int i, count; rthal_local_irq_save_hw(flags); /* Read the current latch value, whatever the current mode is. */ outb_p(0x00, PIT_MODE); count = inb_p(PIT_CH0); count |= inb_p(PIT_CH0) << 8; if (count > LATCH) /* For broken VIA686a hardware. */ count = LATCH - 1; /* * We only want to measure the average time needed to program * the next shot, so we basically don't care about the current * PIT mode. We just rewrite the original latch value at each * iteration. */ t = rthal_rdtsc(); for (i = 0; i < 20; i++) { outb(count & 0xff, PIT_CH0); outb(count >> 8, PIT_CH0); } dt = rthal_rdtsc() - t; rthal_local_irq_restore_hw(flags); #ifdef CONFIG_IPIPE_TRACE_IRQSOFF /* Reset the max trace, since it contains the calibration time now. */ rthal_trace_max_reset(); #endif /* CONFIG_IPIPE_TRACE_IRQSOFF */ return rthal_ulldiv(dt, i, NULL); }
unsigned long rthal_timer_calibrate(void) { unsigned long flags, delay; rthal_time_t t, dt; int i; delay = RTHAL_CPU_FREQ; /* 1s */ flags = rthal_critical_enter(NULL); t = rthal_rdtsc(); for (i = 0; i < 10000; i++) rthal_timer_program_shot(delay); dt = rthal_rdtsc() - t; rthal_critical_exit(flags); return rthal_imuldiv(dt, 100000, RTHAL_CPU_FREQ); }
static void rthal_touch_nmi_watchdog(void) { unsigned long long next_linux_check; int i; next_linux_check = rthal_rdtsc() + RTHAL_CPU_FREQ; for (i = 0; i < NR_CPUS; i++) { rthal_nmi_wd_t *wd = &rthal_nmi_wds[i]; wd->perfctr_msr = rthal_nmi_perfctr_msr; wd->p4_cccr_val = rthal_nmi_p4_cccr_val; wd->armed = 0; wd->next_linux_check = next_linux_check; } }