Exemple #1
0
void native_machine_crash_shutdown(struct pt_regs *regs)
{
	/* This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means shooting down the other cpus in
	 * an SMP system.
	 */
	/* The kernel is broken so disable interrupts */
	local_irq_disable();

	/* Make a note of crashing cpu. Will be used in NMI callback.*/
	crashing_cpu = safe_smp_processor_id();
	nmi_shootdown_cpus();
	lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
	disable_IO_APIC();
#endif
#ifdef CONFIG_HPET_TIMER
	hpet_disable();
#endif
	crash_save_cpu(regs, safe_smp_processor_id());
}
static rx_handler_result_t drop_packet(struct sk_buff **pskb)
{
	struct sk_buff *skb = *pskb;
	netif_info *pt_netif = NULL;
	int this_cpu = safe_smp_processor_id();
	unsigned long pkt_cnt=0, byte_cnt=0;

	rcu_read_lock();
	/* netif_info on a net_device may be freed dynamicaly by ourself */
	pt_netif = netif_info_get_rcu(skb->dev);
	if (pt_netif)
	{
		pt_netif->pkt_drop_cnt[this_cpu]++;
		pt_netif->byte_drop_cnt[this_cpu] += skb->len;
		pkt_cnt = pt_netif->pkt_drop_cnt[this_cpu];
		byte_cnt = pt_netif->byte_drop_cnt[this_cpu];
	}

	rcu_read_unlock();
	
	kfree_skb(*pskb);
	*pskb = NULL;

	if (pt_netif)
		DBG_PRINT("==cpu=%d, drop_pkt_cnt=%lu, drop_byte_cnt=%lu", this_cpu, pkt_cnt, byte_cnt);
	
	return RX_HANDLER_CONSUMED;
}
void native_machine_crash_shutdown(struct pt_regs *regs)
{
	/* This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means shooting down the other cpus in
	 * an SMP system.
	 */
	/* The kernel is broken so disable interrupts */
	local_irq_disable();

	kdump_nmi_shootdown_cpus();

	/* Booting kdump kernel with VMX or SVM enabled won't work,
	 * because (among other limitations) we can't disable paging
	 * with the virt flags.
	 */
	cpu_emergency_vmxoff();
	cpu_emergency_svm_disable();

	lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
	disable_IO_APIC();
#endif
#ifdef CONFIG_HPET_TIMER
	hpet_disable();
#endif
	crash_save_cpu(regs, safe_smp_processor_id());
}
/*
 * Halt all other CPUs, calling the specified function on each of them
 *
 * This function can be used to halt all other CPUs on crash
 * or emergency reboot time. The function passed as parameter
 * will be called inside a NMI handler on all CPUs.
 */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
	unsigned long msecs;
	local_irq_disable();

	/* Make a note of crashing cpu. Will be used in NMI callback. */
	crashing_cpu = safe_smp_processor_id();

	shootdown_callback = callback;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
	/* Would it be better to replace the trap vector here? */
	if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
				 NMI_FLAG_FIRST, "crash"))
		return;		/* Return what? */
	/*
	 * Ensure the new callback function is set before sending
	 * out the NMI
	 */
	wmb();

	smp_send_nmi_allbutself();

	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}

	/* Leave the nmi callback set */
}
Exemple #5
0
static void smp_halt(void)
{
	int cpuid = safe_smp_processor_id(); 
	static int first_entry = 1;

	if (reboot_force)
		return;

	if (first_entry) {
		first_entry = 0;
		smp_call_function((void *)machine_restart, NULL, 1, 0);
	}
			
	smp_stop_cpu(); 

	/* AP calling this. Just halt */
	if (cpuid != boot_cpu_id) { 
		for (;;) 
			asm("hlt");
	}

	/* Wait for all other CPUs to have run smp_stop_cpu */
	while (!cpus_empty(cpu_online_map))
		rep_nop(); 
}
Exemple #6
0
static void smp_send_nmi_allbutself(void)
{
	cpumask_t mask = cpu_online_map;
	cpu_clear(safe_smp_processor_id(), mask);
	if (!cpus_empty(mask))
		send_IPI_mask(mask, NMI_VECTOR);
}
Exemple #7
0
void native_machine_crash_shutdown(struct pt_regs *regs)
{
	/* This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means shooting down the other cpus in
	 * an SMP system.
	 */
	/* The kernel is broken so disable interrupts */
	local_irq_disable();

	kdump_nmi_shootdown_cpus();

	/*
	 * VMCLEAR VMCSs loaded on this cpu if needed.
	 */
	cpu_crash_vmclear_loaded_vmcss();

	/* Booting kdump kernel with VMX or SVM enabled won't work,
	 * because (among other limitations) we can't disable paging
	 * with the virt flags.
	 */
	cpu_emergency_vmxoff();
	cpu_emergency_svm_disable();

	lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
	disable_IO_APIC(1);
#endif
	if (mcp55_rewrite) {
		u32 cfg;
		printk(KERN_CRIT "REWRITING MCP55 CFG REG\n");
		/*
		 * We have a mcp55 chip on board which has been
		 * flagged as only sending legacy interrupts
		 * to the BSP, and we are crashing on an AP
		 * This is obviously bad, and we need to
		 * fix it up.  To do this we write to the
		 * flagged device, to the register at offset 0x74
		 * and we make sure that bit 2 and bit 15 are clear
		 * This forces legacy interrupts to be broadcast
		 * to all cpus
		 */
		pci_read_config_dword(mcp55_rewrite, 0x74, &cfg);
		cfg &= ~((1 << 2) | (1 << 15));
		printk(KERN_CRIT "CFG = %x\n", cfg);
		pci_write_config_dword(mcp55_rewrite, 0x74, cfg);
	}

#ifdef CONFIG_HPET_TIMER
	hpet_disable();
#endif

	crash_save_cpu(regs, safe_smp_processor_id());
}
Exemple #8
0
asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
{
	int cpu = safe_smp_processor_id();

	nmi_enter();
	add_pda(__nmi_count,1);
	if (!nmi_callback(regs, cpu))
		default_do_nmi(regs);
	nmi_exit();
}
Exemple #9
0
void die_nmi(char *str, struct pt_regs *regs)
{
	unsigned long flags = oops_begin();

	/*
	 * We are in trouble anyway, lets at least try
	 * to get a message out.
	 */
	printk(str, safe_smp_processor_id());
	show_registers(regs);
	if (panic_on_timeout || panic_on_oops)
		panic("nmi watchdog");
	printk("console shuts up ...\n");
	oops_end(flags);
	do_exit(SIGSEGV);
}
Exemple #10
0
unsigned long oops_begin(void)
{
	int cpu = safe_smp_processor_id();
	unsigned long flags;

	/* racy, but better than risking deadlock. */
	local_irq_save(flags);
	if (!spin_trylock(&die_lock)) { 
		if (cpu == die_owner) 
			/* nested oops. should stop eventually */;
		else
			spin_lock(&die_lock);
	}
	die_owner = cpu;
	console_verbose();
	bust_spinlocks(1);
	return flags;
}
void native_machine_crash_shutdown(struct pt_regs *regs)
{
	/* This function is only called after the system
	 * has panicked or is otherwise in a critical state.
	 * The minimum amount of code to allow a kexec'd kernel
	 * to run successfully needs to happen here.
	 *
	 * In practice this means shooting down the other cpus in
	 * an SMP system.
	 */
	/* The kernel is broken so disable interrupts */
	local_irq_disable();

	kdump_nmi_shootdown_cpus();

	/*
	 * VMCLEAR VMCSs loaded on this cpu if needed.
	 */
	cpu_crash_vmclear_loaded_vmcss();

	/* Booting kdump kernel with VMX or SVM enabled won't work,
	 * because (among other limitations) we can't disable paging
	 * with the virt flags.
	 */
	cpu_emergency_vmxoff();
	cpu_emergency_svm_disable();

	/*
	 * Disable Intel PT to stop its logging
	 */
	cpu_emergency_stop_pt();

#ifdef CONFIG_X86_IO_APIC
	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
	ioapic_zap_locks();
	disable_IO_APIC();
#endif
	lapic_shutdown();
#ifdef CONFIG_HPET_TIMER
	hpet_disable();
#endif
	crash_save_cpu(regs, safe_smp_processor_id());
}
Exemple #12
0
void show_registers(struct pt_regs *regs)
{
	int i;
	int in_kernel = !user_mode(regs);
	unsigned long rsp;
	const int cpu = safe_smp_processor_id(); 
	struct task_struct *cur = cpu_pda[cpu].pcurrent; 

		rsp = regs->rsp;

	printk("CPU %d ", cpu);
	__show_regs(regs);
	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
		cur->comm, cur->pid, cur->thread_info, cur);

	/*
	 * When in-kernel, we also print out the stack and code at the
	 * time of the fault..
	 */
	if (in_kernel) {

		printk("Stack: ");
		show_stack(NULL, (unsigned long*)rsp);

		printk("\nCode: ");
		if(regs->rip < PAGE_OFFSET)
			goto bad;

		for(i=0;i<20;i++)
		{
			unsigned char c;
			if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
bad:
				printk(" Bad RIP value.");
				break;
			}
			printk("%02x ", c);
		}
	}
	printk("\n");
}	
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
{
	int sum, cpu;

	cpu = safe_smp_processor_id();
	sum = read_pda(apic_timer_irqs);
	if (last_irq_sums[cpu] == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		alert_counter[cpu]++;
		if (alert_counter[cpu] == 5*nmi_hz) {
			if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_BAD) { 
				alert_counter[cpu] = 0; 
				return;
			} 
			spin_lock(&nmi_print_lock);
			/*
			 * We are in trouble anyway, lets at least try
			 * to get a message out.
			 */
			bust_spinlocks(1);
			printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu);
			show_registers(regs);
			if (panic_on_timeout || panic_on_oops)
				panic("nmi watchdog");
			printk("console shuts up ...\n");
			console_silent();
			spin_unlock(&nmi_print_lock);
			bust_spinlocks(0);
			do_exit(SIGSEGV);
		}
	} else {
		last_irq_sums[cpu] = sum;
		alert_counter[cpu] = 0;
	}
	if (nmi_perfctr_msr)
		wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
}
Exemple #14
0
static void native_nmi_stop_other_cpus(int wait)
{
	unsigned long flags;
	unsigned long timeout;

	if (reboot_force)
		return;

	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 */
	if (num_online_cpus() > 1) {
		/* did someone beat us here? */
		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
			return;

		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
					 NMI_FLAG_FIRST, "smp_stop"))
			/* Note: we ignore failures here */
			return;

		/* sync above data before sending NMI */
		wmb();

		apic->send_IPI_allbutself(NMI_VECTOR);

		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}

	local_irq_save(flags);
	disable_local_APIC();
	local_irq_restore(flags);
}
Exemple #15
0
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
{
	int sum, cpu;

	cpu = safe_smp_processor_id();
	sum = read_pda(apic_timer_irqs);
	if (nmi_show_regs[cpu]) {
		nmi_show_regs[cpu] = 0;
		spin_lock(&nmi_print_lock);
		show_regs(regs);
		spin_unlock(&nmi_print_lock);
	}
	if (last_irq_sums[cpu] == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		alert_counter[cpu]++;
		if (alert_counter[cpu] == 5*nmi_hz) {
			int i;

			for (i = 0; i < NR_CPUS; i++)
				nmi_show_regs[i] = 1;
		}
		if (alert_counter[cpu] == 5*nmi_hz) {
			if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
							== NOTIFY_STOP) {
				alert_counter[cpu] = 0; 
				return;
			} 
			die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs);
		}
	} else {
		last_irq_sums[cpu] = sum;
		alert_counter[cpu] = 0;
	}
	if (nmi_perfctr_msr)
		wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
}
Exemple #16
0
void show_stack(struct task_struct *tsk, unsigned long * rsp)
{
	unsigned long *stack;
	int i;
	const int cpu = safe_smp_processor_id();
	unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
	unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);    

	// debugging aid: "show_stack(NULL, NULL);" prints the
	// back trace for this cpu.

	if (rsp == NULL) {
		if (tsk)
			rsp = (unsigned long *)tsk->thread.rsp;
		else
			rsp = (unsigned long *)&rsp;
	}

	stack = rsp;
	for(i=0; i < kstack_depth_to_print; i++) {
		if (stack >= irqstack && stack <= irqstack_end) {
			if (stack == irqstack_end) {
				stack = (unsigned long *) (irqstack_end[-1]);
				printk(" <EOI> ");
			}
		} else {
		if (((long) stack & (THREAD_SIZE-1)) == 0)
			break;
		}
		if (i && ((i % 4) == 0))
			printk("\n       ");
		printk("%016lx ", *stack++);
		touch_nmi_watchdog();
	}
	show_trace((unsigned long *)rsp);
}
Exemple #17
0
void __init time_init_hook(void)
{
	irq0.mask = cpumask_of_cpu(safe_smp_processor_id());
	setup_irq(0, &irq0);
}
Exemple #18
0
void show_trace(unsigned long *stack)
{
	unsigned long addr;
	const unsigned cpu = safe_smp_processor_id();
	unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr;
	int i;
	unsigned used = 0;

	printk("\nCall Trace:");

#define HANDLE_STACK(cond) \
	do while (cond) { \
		addr = *stack++; \
		if (kernel_text_address(addr)) { \
			/* \
			 * If the address is either in the text segment of the \
			 * kernel, or in the region which contains vmalloc'ed \
			 * memory, it *may* be the address of a calling \
			 * routine; if so, print it so that someone tracing \
			 * down the cause of the crash will be able to figure \
			 * out the call path that was taken. \
			 */ \
			i += printk_address(addr); \
			if (i > 50) { \
				printk("\n       "); \
				i = 0; \
			} \
			else \
				i += printk(" "); \
		} \
	} while (0)

	for(i = 0; ; ) {
		const char *id;
		unsigned long *estack_end;
		estack_end = in_exception_stack(cpu, (unsigned long)stack,
						&used, &id);

		if (estack_end) {
			i += printk(" <%s> ", id);
			HANDLE_STACK (stack < estack_end);
			i += printk(" <EOE> ");
			stack = (unsigned long *) estack_end[-2];
			continue;
		}
		if (irqstack_end) {
			unsigned long *irqstack;
			irqstack = irqstack_end -
				(IRQSTACKSIZE - 64) / sizeof(*irqstack);

			if (stack >= irqstack && stack < irqstack_end) {
				i += printk(" <IRQ> ");
				HANDLE_STACK (stack < irqstack_end);
				stack = (unsigned long *) (irqstack_end[-1]);
				irqstack_end = NULL;
				i += printk(" <EOI> ");
				continue;
			}
		}
		break;
	}

	HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
#undef HANDLE_STACK
	printk("\n");
}
Exemple #19
0
static void generic_machine_check(struct pt_regs * regs, long error_code)
{
    int recover=1;
    u32 alow, ahigh, high, low;
    u32 mcgstl, mcgsth;
    int i;
    struct notifier_mc_err mc_err;

    rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
    if(mcgstl&(1<<0))	/* Recoverable ? */
        recover=0;

    /* Make sure unrecoverable MCEs reach the console */
    if(recover & 3)
        oops_in_progress++;

    printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
           smp_processor_id(), mcgsth, mcgstl);

    if (regs && (mcgstl & 2))
        printk(KERN_EMERG "RIP <%02lx>:%016lx RSP %016lx\n",
               regs->cs, regs->rip, regs->rsp);

    for(i=0; i<banks; i++)
    {
        if ((1UL<<i) & ignored_banks)
            continue;

        rdmsr(MSR_IA32_MC0_STATUS+i*4,low, high);
        if(high&(1<<31))
        {
            memset(&mc_err, 0x00, sizeof(mc_err));
            mc_err.cpunum = safe_smp_processor_id();
            mc_err.banknum = i;
            mc_err.mci_status = ((u64)high << 32) | low;
            if(high&(1<<29))
                recover|=1;
            if(high&(1<<25))
                recover|=2;
            printk(KERN_EMERG "Bank %d: %08x%08x", i, high, low);
            high&=~(1<<31);
            if(high&(1<<27))
            {
                rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
                mc_err.mci_misc = ((u64)ahigh << 32) | alow;
                printk("[%08x%08x]", alow, ahigh);
            }
            if(high&(1<<26))
            {
                rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
                mc_err.mci_addr = ((u64)ahigh << 32) | alow;
                printk(" at %08x%08x",
                       ahigh, alow);
            }
            rdmsr(MSR_IA32_MC0_CTL+i*4, alow, ahigh);
            mc_err.mci_ctl = ((u64)ahigh << 32) | alow;

            printk("\n");

            /* Clear it */
            wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
            /* Serialize */
            wmb();
            notifier_call_chain(&mc_notifier_list, X86_VENDOR_INTEL, &mc_err);
        }
    }

    if(recover&2)
        panic("CPU context corrupt");
    if(recover&1)
        panic("Unable to continue");
    printk(KERN_EMERG "Attempting to continue.\n");
    mcgstl&=~(1<<2);
    wrmsr(MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
}
Exemple #20
0
static void native_stop_other_cpus(int wait)
{
	unsigned long flags;
	unsigned long timeout;

	if (reboot_force)
		return;

	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 */

	/*
	 * We start by using the REBOOT_VECTOR irq.
	 * The irq is treated as a sync point to allow critical
	 * regions of code on other cpus to release their spin locks
	 * and re-enable irqs.  Jumping straight to an NMI might
	 * accidentally cause deadlocks with further shutdown/panic
	 * code.  By syncing, we give the cpus up to one second to
	 * finish their work before we force them off with the NMI.
	 */
	if (num_online_cpus() > 1) {
		/* did someone beat us here? */
		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
			return;

		/* sync above data before sending IRQ */
		wmb();

		apic->send_IPI_allbutself(REBOOT_VECTOR);

		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}
	
	/* if the REBOOT_VECTOR didn't work, try with the NMI */
	if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  {
		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
					 NMI_FLAG_FIRST, "smp_stop"))
			/* Note: we ignore failures here */
			/* Hope the REBOOT_IRQ is good enough */
			goto finish;

		/* sync above data before sending IRQ */
		wmb();

		pr_emerg("Shutting down cpus with NMI\n");

		apic->send_IPI_allbutself(NMI_VECTOR);

		/*
		 * Don't wait longer than a 10 ms if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_MSEC * 10;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}

finish:
	local_irq_save(flags);
	disable_local_APIC();
	local_irq_restore(flags);
}
void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
{

	/*
	 * Since current-> is always on the stack, and we always switch
	 * the stack NMI-atomically, it's safe to use smp_processor_id().
	 */
	int sum, cpu = safe_smp_processor_id();

	if (nmi_watchdog_disabled)
		return;

	sum = apic_timer_irqs[cpu];

	if (last_irq_sums[cpu] == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		alert_counter[cpu]++;
		if (alert_counter[cpu] == 5*nmi_hz) {


			if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_BAD) { 
				alert_counter[cpu] = 0; 
				return;
			} 


			spin_lock(&nmi_print_lock);
			/*
			 * We are in trouble anyway, lets at least try
			 * to get a message out.
			 */
			bust_spinlocks(1);
			printk("NMI Watchdog detected LOCKUP on CPU%d, eip %16lx, registers:\n", cpu, regs->rip);
			show_registers(regs);
			if (panic_on_timeout)
				panic("NMI lockup");
			printk("console shuts up ...\n");
			console_silent();
			spin_unlock(&nmi_print_lock);
			bust_spinlocks(0);
			do_exit(SIGSEGV);
		}
	} else {
		last_irq_sums[cpu] = sum;
		alert_counter[cpu] = 0;
	}
	if (nmi_perfctr_msr) {
#ifndef CONFIG_MK8
		if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
			/*
			 * P4 quirks:
			 * - An overflown perfctr will assert its interrupt
			 *   until the OVF flag in its CCCR is cleared.
			 * - LVTPC is masked on interrupt and must be
			 *   unmasked by the LVTPC handler.
			 */
			wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0, 0);
			apic_write(APIC_LVTPC, APIC_DM_NMI);
		}
#endif
		wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
	}
}