void rthal_nmi_arm(unsigned long delay) { rthal_nmi_wd_t *wd = &rthal_nmi_wds[rthal_processor_id()]; if (!wd->perfctr_msr) return; /* If linux watchdog could tick now, make it tick now. */ if ((long long) (rthal_rdtsc() - wd->next_linux_check) >= 0) { unsigned long flags; /* Protect from an interrupt handler calling rthal_nmi_arm. */ rthal_local_irq_save(flags); wd->armed = 0; wmb(); wrmsrl(wd->perfctr_msr, -1); asm("nop"); rthal_local_irq_restore(flags); } wd->tick_date = rthal_rdtsc() + (delay - rthal_maxlat_tsc); wmb(); wrmsrl(wd->perfctr_msr, 0 - delay); wmb(); wd->armed = 1; }
static inline int do_exception_event(unsigned event, unsigned domid, void *data) { if (domid == RTHAL_DOMAIN_ID) { rthal_realtime_faults[rthal_processor_id()][event]++; if (rthal_trap_handler != NULL && rthal_trap_handler(event, domid, data) != 0) return RTHAL_EVENT_STOP; } return RTHAL_EVENT_PROPAGATE; }
void rthal_timer_notify_switch(enum clock_event_mode mode, struct clock_event_device *cdev) { if (rthal_processor_id() > 0) /* * We assume all CPUs switch the same way, so we only * track mode switches from the boot CPU. */ return; rthal_ktimer_saved_mode = mode; }
static int rthal_nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) #endif /* Linux >= 2.6.19 */ { int cpu = rthal_processor_id(); rthal_nmi_wd_t *wd = &rthal_nmi_wds[cpu]; unsigned long long now; if (wd->armed) { if (rthal_rdtsc() - wd->tick_date < rthal_maxlat_tsc) { ++wd->early_shots; wd->next_linux_check = wd->tick_date + rthal_maxlat_tsc; } else { printk("NMI early shots: %d\n", wd->early_shots); rthal_nmi_emergency(regs); } } now = rthal_rdtsc(); if ((long long)(now - wd->next_linux_check) >= 0) { CALL_LINUX_NMI; do { wd->next_linux_check += RTHAL_CPU_FREQ; } while ((long long)(now - wd->next_linux_check) >= 0); } if (wd->perfctr_msr == MSR_P4_IQ_COUNTER0) { /* * P4 quirks: * - An overflown perfctr will assert its interrupt * until the OVF flag in its CCCR is cleared. * - LVTPC is masked on interrupt and must be * unmasked by the LVTPC handler. */ wrmsr(MSR_P4_IQ_CCCR0, wd->p4_cccr_val, 0); apic_write(APIC_LVTPC, APIC_DM_NMI); } else if (rthal_nmi_perfctr_msr == MSR_P6_PERFCTR0) { /* Only P6 based Pentium M need to re-unmask * the apic vector but it doesn't hurt * other P6 variant */ apic_write(APIC_LVTPC, APIC_DM_NMI); } wrmsrl(wd->perfctr_msr, now - wd->next_linux_check); NMI_RETURN; }
void pse51_schedule_lostage(int request, void *arg, size_t size) { int cpuid = rthal_processor_id(), reqnum; struct pse51_lostageq_t *rq = &pse51_lostageq[cpuid]; spl_t s; /* Signal the APC, to have it delegate signals to Linux. */ splhigh(s); reqnum = rq->in; rq->req[reqnum].type = request; rq->req[reqnum].arg = arg; rq->req[reqnum].size = size; rq->in = (reqnum + 1) & (PSE51_LO_MAX_REQUESTS - 1); splexit(s); rthal_apc_schedule(pse51_lostage_apc); }
void rthal_nmi_disarm(void) { rthal_nmi_wds[rthal_processor_id()].armed = 0; }
static inline void rthal_disarm_decr(int disarmed) { per_cpu(disarm_decr, rthal_processor_id()) = disarmed; }
static void rthal_set_itv(void) { rthal_itm_next[rthal_processor_id()] = ia64_get_itc(); ia64_set_itv(irq_to_vector(rthal_tick_irq)); }
static void rthal_adjust_before_relay(unsigned irq, void *cookie) { rthal_itm_next[rthal_processor_id()] = ia64_get_itc(); rthal_propagate_irq(irq); }