void unregister_timer_hook(int (*hook)(struct pt_regs *)) { WARN_ON(hook != timer_hook); timer_hook = NULL; /* make sure all CPUs see the NULL hook */ synchronize_kernel(); }
void __exit cleanup_one_kcs(struct kcs_info *to_clean) { int rv; unsigned long flags; if (! to_clean) return; /* Tell the timer and interrupt handlers that we are shutting down. */ spin_lock_irqsave(&(to_clean->kcs_lock), flags); spin_lock(&(to_clean->msg_lock)); to_clean->stop_operation = 1; if (to_clean->irq != 0) free_irq(to_clean->irq, to_clean); if (to_clean->port) { printk(KERN_INFO "ipmi_kcs: Releasing BMC @ port=0x%x\n", to_clean->port); release_region (to_clean->port, 2); } if (to_clean->addr) { printk(KERN_INFO "ipmi_kcs: Releasing BMC @ addr=0x%lx\n", to_clean->physaddr); iounmap(to_clean->addr); release_mem_region(to_clean->physaddr, 2); } spin_unlock(&(to_clean->msg_lock)); spin_unlock_irqrestore(&(to_clean->kcs_lock), flags); /* Wait until we know that we are out of any interrupt handlers might have been running before we freed the interrupt. */ synchronize_kernel(); /* Wait for the timer to stop. This avoids problems with race conditions removing the timer here. */ while (!to_clean->timer_stopped) { schedule_timeout(1); } rv = ipmi_unregister_smi(to_clean->intf); if (rv) { printk(KERN_ERR "ipmi_kcs: Unable to unregister device: errno=%d\n", rv); } initialized = 0; kfree(to_clean->kcs_sm); kfree(to_clean); }
static void timer_stop(void) { enable_timer_nmi_watchdog(); unset_nmi_callback(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) synchronize_sched(); /* Allow already-started NMIs to complete. */ #else synchronize_kernel(); #endif #ifdef RRPROFILE disable_poll_idle(); #endif // RRPROFILE }
static void timer_stop(void) { enable_timer_nmi_watchdog(); unset_nmi_callback(); synchronize_kernel(); }