Example #1
0
static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
#ifdef CONFIG_HPWDT_NMI_DECODING
	int retval;
	/*
	 * Only one function can register for NMI_UNKNOWN
	 */
	retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout, 0, "hpwdt");
	if (retval)
		goto error;
	retval = register_nmi_handler(NMI_SERR, hpwdt_pretimeout, 0, "hpwdt");
	if (retval)
		goto error1;
	retval = register_nmi_handler(NMI_IO_CHECK, hpwdt_pretimeout, 0, "hpwdt");
	if (retval)
		goto error2;

	dev_info(&dev->dev,
		"HPE Watchdog Timer Driver: NMI decoding initialized\n");

	return 0;

error2:
	unregister_nmi_handler(NMI_SERR, "hpwdt");
error1:
	unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
error:
	dev_warn(&dev->dev,
		"Unable to register a die notifier (err=%d).\n",
		retval);
	return retval;
#endif	/* CONFIG_HPWDT_NMI_DECODING */
	return 0;
}
Example #2
0
static void __init test_nmi_ipi(struct cpumask *mask)
{
	unsigned long timeout;

	if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
				 NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
		nmi_fail = FAILURE;
		return;
	}

	/* sync above data before sending NMI */
	wmb();

	apic->send_IPI_mask(mask, NMI_VECTOR);

	/* Don't wait longer than a second */
	timeout = USEC_PER_SEC;
	while (!cpumask_empty(mask) && timeout--)
	        udelay(1);

	/* What happens if we timeout, do we still unregister?? */
	unregister_nmi_handler(NMI_LOCAL, "nmi_selftest");

	if (!timeout)
		nmi_fail = TIMEOUT;
	return;
}
/*
 * Halt all other CPUs, calling the specified function on each of them
 *
 * This function can be used to halt all other CPUs on crash
 * or emergency reboot time. The function passed as parameter
 * will be called inside a NMI handler on all CPUs.
 */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
	unsigned long msecs;
	local_irq_disable();

	/* Make a note of crashing cpu. Will be used in NMI callback. */
	crashing_cpu = safe_smp_processor_id();

	shootdown_callback = callback;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
	/* Would it be better to replace the trap vector here? */
	if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
				 NMI_FLAG_FIRST, "crash"))
		return;		/* Return what? */
	/*
	 * Ensure the new callback function is set before sending
	 * out the NMI
	 */
	wmb();

	smp_send_nmi_allbutself();

	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}

	/* Leave the nmi callback set */
}
static void __init test_nmi_ipi(struct cpumask *mask)
{
	unsigned long timeout;

	if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
				 NMI_FLAG_FIRST, "nmi_selftest")) {
		nmi_fail = FAILURE;
		return;
	}

	/*                                    */
	wmb();

	apic->send_IPI_mask(mask, NMI_VECTOR);

	/*                                 */
	timeout = USEC_PER_SEC;
	while (!cpumask_empty(mask) && timeout--)
	        udelay(1);

	/*                                                      */
	unregister_nmi_handler(NMI_LOCAL, "nmi_selftest");

	if (!timeout)
		nmi_fail = TIMEOUT;
	return;
}
Example #5
0
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	/* make variables visible to the nmi handler: */
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	cpu_notifier_register_begin();

	/* Use get/put_online_cpus() to protect 'nmi_enabled' */
	get_online_cpus();
	nmi_enabled = 1;
	/* make nmi_enabled visible to the nmi handler: */
	smp_mb();
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	__register_cpu_notifier(&oprofile_cpu_nb);
	put_online_cpus();

	cpu_notifier_register_done();

	return 0;
fail:
	free_msrs();
	return err;
}
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;

	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */

	/* Assume saved/restored counters are the same on all CPUs */
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	/* make variables visible to the nmi handler: */
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	nmi_enabled = 1;
	/* make nmi_enabled visible to the nmi handler: */
	smp_mb();
	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
				nmi_cpu_online, nmi_cpu_down_prep);
	if (err < 0)
		goto fail_nmi;
	cpuhp_nmi_online = err;
	return 0;
fail_nmi:
	unregister_nmi_handler(NMI_LOCAL, "oprofile");
fail:
	free_msrs();
	return err;
}
static int nmi_setup(void)
{
	int err = 0;
	int cpu;

	if (!allocate_msrs())
		return -ENOMEM;


	
	err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
	if (err)
		goto fail;

	for_each_possible_cpu(cpu) {
		if (!cpu)
			continue;

		memcpy(per_cpu(cpu_msrs, cpu).counters,
		       per_cpu(cpu_msrs, 0).counters,
		       sizeof(struct op_msr) * model->num_counters);

		memcpy(per_cpu(cpu_msrs, cpu).controls,
		       per_cpu(cpu_msrs, 0).controls,
		       sizeof(struct op_msr) * model->num_controls);

		mux_clone(cpu);
	}

	nmi_enabled = 0;
	ctr_running = 0;
	
	smp_mb();
	err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
					0, "oprofile");
	if (err)
		goto fail;

	get_online_cpus();
	register_cpu_notifier(&oprofile_cpu_nb);
	nmi_enabled = 1;
	
	smp_mb();
	on_each_cpu(nmi_cpu_setup, NULL, 1);
	put_online_cpus();

	return 0;
fail:
	free_msrs();
	return err;
}
Example #8
0
static void __init ms_hyperv_init_platform(void)
{
	/*
	 * Extract the features and hints
	 */
	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);

	pr_info("HyperV: features 0x%x, hints 0x%x\n",
		ms_hyperv.features, ms_hyperv.hints);

#ifdef CONFIG_X86_LOCAL_APIC
	if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
		/*
		 * Get the APIC frequency.
		 */
		u64	hv_lapic_frequency;

		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
		lapic_timer_frequency = hv_lapic_frequency;
		pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
			lapic_timer_frequency);
	}

	register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
			     "hv_nmi_unknown");
#endif

	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
		clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);

#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif

#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
	machine_ops.shutdown = hv_machine_shutdown;
	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
	mark_tsc_unstable("running on Hyper-V");

	/*
	 * Generation 2 instances don't support reading the NMI status from
	 * 0x61 port.
	 */
	if (efi_enabled(EFI_BOOT))
		x86_platform.get_nmi_reason = hv_get_nmi_reason;
}
Example #9
0
static __init int perf_event_ibs_init(void)
{
	if (!ibs_caps)
		return -ENODEV;	/* ibs not supported by the cpu */

	perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
	if (ibs_caps & IBS_CAPS_OPCNT)
		perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
	perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
	register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
	printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);

	return 0;
}
Example #10
0
static __init int perf_event_ibs_init(void)
{
	struct attribute **attr = ibs_op_format_attrs;

	if (!ibs_caps)
		return -ENODEV;	/* ibs not supported by the cpu */

	perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");

	if (ibs_caps & IBS_CAPS_OPCNT) {
		perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
		*attr++ = &format_attr_cnt_ctl.attr;
	}
	perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");

	register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
	printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);

	return 0;
}
Example #11
0
static void native_nmi_stop_other_cpus(int wait)
{
	unsigned long flags;
	unsigned long timeout;

	if (reboot_force)
		return;

	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 */
	if (num_online_cpus() > 1) {
		/* did someone beat us here? */
		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
			return;

		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
					 NMI_FLAG_FIRST, "smp_stop"))
			/* Note: we ignore failures here */
			return;

		/* sync above data before sending NMI */
		wmb();

		apic->send_IPI_allbutself(NMI_VECTOR);

		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}

	local_irq_save(flags);
	disable_local_APIC();
	local_irq_restore(flags);
}
Example #12
0
static void __init init_nmi_testsuite(void)
{
	/* trap all the unknown NMIs we may generate */
	register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
			__initdata);
}
Example #13
0
File: smp.c Project: B-Rich/L4Reap
static void native_stop_other_cpus(int wait)
{
	unsigned long flags;
	unsigned long timeout;

	if (reboot_force)
		return;

	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 */

	/*
	 * We start by using the REBOOT_VECTOR irq.
	 * The irq is treated as a sync point to allow critical
	 * regions of code on other cpus to release their spin locks
	 * and re-enable irqs.  Jumping straight to an NMI might
	 * accidentally cause deadlocks with further shutdown/panic
	 * code.  By syncing, we give the cpus up to one second to
	 * finish their work before we force them off with the NMI.
	 */
	if (num_online_cpus() > 1) {
		/* did someone beat us here? */
		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
			return;

		/* sync above data before sending IRQ */
		wmb();

		apic->send_IPI_allbutself(REBOOT_VECTOR);

		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}
	
	/* if the REBOOT_VECTOR didn't work, try with the NMI */
	if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi))  {
		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
					 NMI_FLAG_FIRST, "smp_stop"))
			/* Note: we ignore failures here */
			/* Hope the REBOOT_IRQ is good enough */
			goto finish;

		/* sync above data before sending IRQ */
		wmb();

		pr_emerg("Shutting down cpus with NMI\n");

		apic->send_IPI_allbutself(NMI_VECTOR);

		/*
		 * Don't wait longer than a 10 ms if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_MSEC * 10;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}

finish:
	local_irq_save(flags);
	disable_local_APIC();
	local_irq_restore(flags);
}
Example #14
0
int SamplingThread(void *data)
{	
#ifdef USE_HPET
	
	struct hpet_info info;
#endif
	
	samplingExitedFlag = 0;
	inSampling = 1;
	sampleId = 0;

#ifdef DBG	
	printk("sampling thread has been initialized\n");
#endif	
	init_waitqueue_head(&samplingExited);
	on_each_cpu((void(*)(void*))StartSampling, NULL, 1);

#ifndef USE_HPET 
#ifdef USE_NMI
    register_nmi_handler(NMI_LOCAL, nmi_handler, 0, "sample_tool");
#else
	init_timer(&interval_timer);
	interval_timer.function = (void *)interval_handle;
	interval_timer.data = 0;
	interval_timer.expires = jiffies + HZ/100;
	add_timer(&interval_timer);
#endif
#else
	task.ht_func = (void *)interval_handle;
	task.ht_data = NULL;
	if (hpet_register(&task, 0) < 0)
	{
		printk("Can't register HPET timer.\n");
		samplingExitedFlag = 1;
		return 0;
	}
	if (hpet_control(&task, HPET_IRQFREQ, interval) < 0)
	{
		printk("Can't set HPET timer interval.\n");
		samplingExitedFlag = 1;
		return 0;
	}
	if (hpet_control(&task, HPET_INFO, (unsigned long)(&info)) < 0)
	{
		printk("Can't get HPET timer info.\n");
		samplingExitedFlag = 1;
		return 0;
	}
//	if (hpet_control(&task, HPET_EPI, 0) < 0)
//	{
//		printk("Set HPET timer perdooic failed.\n");
//		samplingExitedFlag = 1;
//		return;
//	}
	if (hpet_control(&task, HPET_IE_ON, 0))
	{
		printk("Can't start HPET timer.\n");
		samplingExitedFlag = 1;
		return 0;
	}		
#endif

/*	while(inSampling)
	{
		//msleep(interval);
		interval_timer.expires += HZ * interval / 1000;
		add_timer(&interval_timer);
		if((curOffset[curBuffIdx] + sizeof(SAMPLE_RECORD)) > (PAGE_SIZE <<  UNIT_BUFF_SIZE))
		{
			curBuffIdx = (curBuffIdx + 1)  % 2;
			writeflag = 1;
			wake_up_interruptible(&writeQue);
		}
		printk("Begin collecting sampling data\n");
		on_each_cpu((void(*)(void*))CollectSampling, NULL, 0, 1);
		sampleId++;
		curOffset[curBuffIdx] += sizeof(SAMPLE_RECORD);		
	}
	printk("sampling thread begin to exit.\n");
	
	samplingExitedFlag = 1;
	wake_up_interruptible(&samplingExited)i;
*/
	return 0;
}
static void __init init_nmi_testsuite(void)
{
	/*                                           */
	register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
}
Example #16
0
static int __init register_trigger_all_cpu_backtrace(void)
{
	register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
				0, "arch_bt");
	return 0;
}