Example #1
0
void __init alternative_instructions(void)
{
    nmi_callback_t saved_nmi_callback;

    arch_init_ideal_nops();

    /*
     * The patching is not fully atomic, so try to avoid local interruptions
     * that might execute the to be patched code.
     * Other CPUs are not running.
     */
    saved_nmi_callback = set_nmi_callback(mask_nmi_callback);

    /*
     * Don't stop machine check exceptions while patching.
     * MCEs only happen when something got corrupted and in this
     * case we must do something about the corruption.
     * Ignoring it is worse than a unlikely patching race.
     * Also machine checks tend to be broadcast and if one CPU
     * goes into machine check the others follow quickly, so we don't
     * expect a machine check to cause undue problems during to code
     * patching.
     */
    apply_alternatives(__alt_instructions, __alt_instructions_end);

    set_nmi_callback(saved_nmi_callback);
}
Example #2
0
static void nmi_shootdown_cpus(void)
{
    unsigned long msecs;

    local_irq_disable();

    crashing_cpu = smp_processor_id();
    local_irq_count(crashing_cpu) = 0;

    atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
    /* Would it be better to replace the trap vector here? */
    set_nmi_callback(crash_nmi_callback);
    /* Ensure the new callback function is set before sending out the NMI. */
    wmb();

    smp_send_nmi_allbutself();

    msecs = 1000; /* Wait at most a second for the other cpus to stop */
    while ( (atomic_read(&waiting_for_crash_ipi) > 0) && msecs )
    {
        mdelay(1);
        msecs--;
    }

    __stop_this_cpu();
    disable_IO_APIC();

    local_irq_enable();
}
static int timer_start(void)
{
#ifdef RRPROFILE
	if(poll_idle_enabled) {
		enable_poll_idle();
	}
#endif // RRPROFILE

	disable_timer_nmi_watchdog();
	set_nmi_callback(nmi_timer_callback);
	return 0;
}
Example #4
0
static void nmi_shootdown_cpus(void)
{
    unsigned long msecs;

    local_irq_disable();

    if ( hpet_broadcast_is_available() )
        hpet_disable_legacy_broadcast();

    crashing_cpu = smp_processor_id();
    local_irq_count(crashing_cpu) = 0;

    atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
    /* Would it be better to replace the trap vector here? */
    set_nmi_callback(crash_nmi_callback);
    /* Ensure the new callback function is set before sending out the NMI. */
    wmb();

    smp_send_nmi_allbutself();

    msecs = 1000; /* Wait at most a second for the other cpus to stop */
    while ( (atomic_read(&waiting_for_crash_ipi) > 0) && msecs )
    {
        mdelay(1);
        msecs--;
    }

    /* Crash shutdown any IOMMU functionality as the crashdump kernel is not
     * happy when booting if interrupt/dma remapping is still enabled */
    iommu_crash_shutdown();

    __stop_this_cpu();

    /* This is a bit of a hack due to the problems with the x2apic_enabled
     * variable, but we can't do any better without a significant refactoring
     * of the APIC code */
    x2apic_enabled = (current_local_apic_mode() == APIC_MODE_X2APIC);

    disable_IO_APIC();
    hpet_disable();
}
Example #5
0
/*
 * proc handler for /proc/sys/kernel/unknown_nmi_panic
 */
int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	int old_state;

	old_state = unknown_nmi_panic;
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (!!old_state == !!unknown_nmi_panic)
		return 0;

	if (unknown_nmi_panic) {
		if (reserve_lapic_nmi() < 0) {
			unknown_nmi_panic = 0;
			return -EBUSY;
		} else {
			set_nmi_callback(unknown_nmi_panic_callback);
		}
	} else {
		release_lapic_nmi();
		unset_nmi_callback();
	}
	return 0;
}
Example #6
0
static void nmi_shootdown_cpus(void)
{
	unsigned long msecs;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
	set_nmi_callback(crash_nmi_callback);

	/*
	 * Ensure the new callback function is set before sending
	 * out the NMI
	 */
	wmb();

	smp_send_nmi_allbutself();

	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}
	/* Leave the nmi callback set */
	disable_local_APIC();
}
Example #7
0
static int nmi_setup(void)
{
	if (!allocate_msrs())
		return -ENOMEM;

	/* We walk a thin line between law and rape here.
	 * We need to be careful to install our NMI handler
	 * without actually triggering any NMIs as this will
	 * break the core code horrifically.
	 */
	if (reserve_lapic_nmi() < 0) {
		free_msrs();
		return -EBUSY;
	}
	/* We need to serialize save and setup for HT because the subset
	 * of msrs are distinct for save and setup operations
	 */
	on_each_cpu(nmi_save_registers, NULL, 0, 1);
	on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
	set_nmi_callback(nmi_callback);
	nmi_enabled = 1;
	return 0;
}
Example #8
0
static int timer_start(void)
{
	disable_timer_nmi_watchdog();
	set_nmi_callback(nmi_timer_callback);
	return 0;
}
Example #9
0
int nmi_enable_virq(void)
{
	set_nmi_callback(nmi_callback);
	return 0;
}