Esempio n. 1
0
File: p5.c Progetto: 03199618/linux
/* Set up machine check reporting for processors with Intel style MCE: */
void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;

	/* Default P5 to off as its often misconnected: */
	if (!mce_p5_enabled)
		return;

	/* Check for MCE support: */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	machine_check_vector = pentium_machine_check;
	/* Make sure the vector pointer is visible before we enable MCEs: */
	wmb();

	/* Read registers before enabling: */
	rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
	rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
	printk(KERN_INFO
	       "Intel old style machine check architecture supported.\n");

	/* Enable MCE: */
	set_in_cr4(X86_CR4_MCE);
	printk(KERN_INFO
	       "Intel old style machine check reporting enabled on CPU#%d.\n",
	       smp_processor_id());
}
Esempio n. 2
0
/* AMD K7 machine check is Intel like */
void amd_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;

	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	machine_check_vector = k7_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/* Clear status for MC index 0 separately, we don't touch CTL,
	 * as some K7 Athlons cause spurious MCEs when its enabled. */
	if (boot_cpu_data.x86 == 6) {
		wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0);
		i = 1;
	} else
		i = 0;
	for (; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());
}
Esempio n. 3
0
/*
 * Enable the extended processor state save/restore feature
 */
static inline void xstate_enable(void)
{
#ifndef CONFIG_L4
	set_in_cr4(X86_CR4_OSXSAVE);
	xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
#endif /* L4 */
}
Esempio n. 4
0
void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;

	
	if (!mce_p5_enabled)
		return;

	
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	machine_check_vector = pentium_machine_check;
	
	wmb();

	
	rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
	rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
	printk(KERN_INFO
	       "Intel old style machine check architecture supported.\n");

	
	set_in_cr4(X86_CR4_MCE);
	printk(KERN_INFO
	       "Intel old style machine check reporting enabled on CPU#%d.\n",
	       smp_processor_id());
}
Esempio n. 5
0
static __init void xen_init_cpuid_mask(void)
{
	unsigned int ax, bx, cx, dx;

	cpuid_leaf1_edx_mask =
		~((1 << X86_FEATURE_MCE)  |  /* disable MCE */
		  (1 << X86_FEATURE_MCA)  |  /* disable MCA */
		  (1 << X86_FEATURE_ACC));   /* thermal monitoring */

	if (!xen_initial_domain())
		cpuid_leaf1_edx_mask &=
			~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
			  (1 << X86_FEATURE_ACPI));  /* disable ACPI */

	ax = 1;
	cx = 0;
	xen_cpuid(&ax, &bx, &cx, &dx);

	/* cpuid claims we support xsave; try enabling it to see what happens */
	if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
		unsigned long cr4;

		set_in_cr4(X86_CR4_OSXSAVE);
		
		cr4 = read_cr4();

		if ((cr4 & X86_CR4_OSXSAVE) == 0)
			cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));

		clear_in_cr4(X86_CR4_OSXSAVE);
	}
}
Esempio n. 6
0
/* Set up machine check reporting for processors with Intel style MCE */
void __init intel_p6_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;
	
	/* Check for MCE support */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	/* Check for PPro style MCA */
 	if (!cpu_has(c, X86_FEATURE_MCA))
		return;

	/* Ok machine check is available */
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */
	for (i=1; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());
}
Esempio n. 7
0
/* AMD K7 machine check */
int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;

	/* Check for PPro style MCA; our caller has confirmed MCE support. */
	if (!cpu_has(c, X86_FEATURE_MCA))
		return 0;

	x86_mce_vector_register(k7_machine_check);

	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/* Clear status for MC index 0 separately, we don't touch CTL,
	 * as some Athlons cause spurious MCEs when its enabled. */
	wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0);
	for (i=1; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "CPU%d: AMD K7 machine check reporting enabled.\n",
		smp_processor_id());

	return 1;
}
Esempio n. 8
0
static void __init pagetable_init (void)
{
	unsigned long vaddr;
	pgd_t *pgd_base = swapper_pg_dir;

#ifdef CONFIG_X86_PAE
	int i;
	/* Init entries of the first-level page table to the zero page */
	for (i = 0; i < PTRS_PER_PGD; i++)
		set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
#endif

	/* Enable PSE if available */
	if (cpu_has_pse) {
		set_in_cr4(X86_CR4_PSE);
	}

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
	page_table_range_init(vaddr, 0, pgd_base);

	permanent_kmaps_init(pgd_base);

#ifdef CONFIG_X86_PAE
	/*
	 * Add low memory identity-mappings - SMP needs it when
	 * starting up on an AP from real-mode. In the non-PAE
	 * case we already have these mappings through head.S.
	 * All user-space mappings are explicitly cleared after
	 * SMP startup.
	 */
	set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
#endif
}
Esempio n. 9
0
/* Set up machine check reporting on the Winchip C6 series */
void winchip_mcheck_init(struct cpuinfo_x86 *c)
{
    u32 lo, hi;
    machine_check_vector = winchip_machine_check;
    wmb();
    rdmsr(MSR_IDT_FCR1, lo, hi);
    lo |= (1<<2);    /* Enable EIERRINT (int 18 MCE) */
    lo &= ~(1<<4);    /* Enable MCE */
    wrmsr(MSR_IDT_FCR1, lo, hi);
    set_in_cr4(X86_CR4_MCE);
    printk(KERN_INFO "Winchip machine check reporting enabled on CPU#0.\n");
}
Esempio n. 10
0
/*
 * Enable the extended processor state save/restore feature
 */
void __cpuinit xsave_init(void)
{
	if (!cpu_has_xsave)
		return;

	set_in_cr4(X86_CR4_OSXSAVE);

	/*
	 * Enable all the features that the HW is capable of
	 * and the Linux kernel is aware of.
	 */
	xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
}
Esempio n. 11
0
/* Set up machine check reporting for processors with Intel style MCE */
void intel_p6_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;
	
	/* Check for MCE support */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	/* Check for PPro style MCA */
 	if (!cpu_has(c, X86_FEATURE_MCA))
		return;

	/* Ok machine check is available */
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/*
	 * Following the example in IA-32 SDM Vol 3:
	 * - MC0_CTL should not be written
	 * - Status registers on all banks should be cleared on reset
	 */
	for (i=1; i<nr_mce_banks; i++)
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);

	for (i=0; i<nr_mce_banks; i++)
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());
}
Esempio n. 12
0
/* AMD K8 machine check */
void amd_k8_mcheck_init(struct cpuinfo_x86 *c)
{
	uint64_t value;
	uint32_t i;
	int cpu_nr;

	machine_check_vector = k8_machine_check;
	cpu_nr = smp_processor_id();
	wmb();

	rdmsrl(MSR_IA32_MCG_CAP, value);
	if (value & MCG_CTL_P)	/* Control register present ? */
		wrmsrl (MSR_IA32_MCG_CTL, 0xffffffffffffffffULL);
	nr_mce_banks = value & MCG_CAP_COUNT;

	for (i = 0; i < nr_mce_banks; i++) {
		switch (i) {
		case 4: /* Northbridge */
			/* Enable error reporting of all errors,
			 * enable error checking and
			 * disable sync flooding */
			wrmsrl(MSR_IA32_MC4_CTL, 0x02c3c008ffffffffULL);
			wrmsrl(MSR_IA32_MC4_STATUS, 0x0ULL);
			break;

		default:
			/* Enable error reporting of all errors */
			wrmsrl(MSR_IA32_MC0_CTL + 4 * i, 0xffffffffffffffffULL);
			wrmsrl(MSR_IA32_MC0_STATUS + 4 * i, 0x0ULL);
			break;
		}
	}

	set_in_cr4(X86_CR4_MCE);
	printk("CPU%i: AMD K8 machine check reporting enabled.\n", cpu_nr);
}
Esempio n. 13
0
File: p4.c Progetto: kzlin129/tt-gpl
void __devinit intel_p4_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;
	
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	for (i=0; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());

	/* Check for P4/Xeon extended MCE MSRs */
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<9))	{/* MCG_EXT_P */
		mce_num_extended_msrs = (l >> 16) & 0xff;
		printk (KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
				" available\n",
			smp_processor_id(), mce_num_extended_msrs);

#ifdef CONFIG_X86_MCE_P4THERMAL
		/* Check for P4/Xeon Thermal monitor */
		intel_init_thermal(c);
#endif
	}
Esempio n. 14
0
/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
		printk("NX (Execute Disable) protection: active\n");
#endif

	pagetable_init();

	load_cr3(swapper_pg_dir);

#ifdef CONFIG_X86_PAE
	/*
	 * We will bail out later - printk doesn't work right now so
	 * the user would just see a hanging kernel.
	 */
	if (cpu_has_pae)
		set_in_cr4(X86_CR4_PAE);
#endif
	__flush_tlb_all();

	kmap_init();
}
Esempio n. 15
0
/*
 * Enable the extended processor state save/restore feature
 */
static inline void xstate_enable(void)
{
	set_in_cr4(X86_CR4_OSXSAVE);
	xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
}
Esempio n. 16
0
File: mce_32.c Progetto: 274914765/C
void __init restart_mce(void)
{
    if (old_cr4 & X86_CR4_MCE)
        set_in_cr4(X86_CR4_MCE);
}
Esempio n. 17
0
void __init trap_init(void)
{
	int i;

#ifdef CONFIG_EISA
	void __iomem *p = early_ioremap(0x0FFFD9, 4);

	if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
		EISA_bus = 1;
	early_iounmap(p, 4);
#endif

	set_intr_gate(0, &divide_error);
	set_intr_gate_ist(1, &debug, DEBUG_STACK);
	set_intr_gate_ist(2, &nmi, NMI_STACK);
	/* int3 can be called from all */
	set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
	/* int4 can be called from all */
	set_system_intr_gate(4, &overflow);
	set_intr_gate(5, &bounds);
	set_intr_gate(6, &invalid_op);
	set_intr_gate(7, &device_not_available);
#ifdef CONFIG_X86_32
	set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
#else
	set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
#endif
	set_intr_gate(9, &coprocessor_segment_overrun);
	set_intr_gate(10, &invalid_TSS);
	set_intr_gate(11, &segment_not_present);
	set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
	set_intr_gate(13, &general_protection);
	set_intr_gate(14, &page_fault);
	set_intr_gate(15, &spurious_interrupt_bug);
	set_intr_gate(16, &coprocessor_error);
	set_intr_gate(17, &alignment_check);
#ifdef CONFIG_X86_MCE
	set_intr_gate_ist(18, &machine_check, MCE_STACK);
#endif
	set_intr_gate(19, &simd_coprocessor_error);

#ifdef CONFIG_IA32_EMULATION
	set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif

#ifdef CONFIG_X86_32
	if (cpu_has_fxsr) {
		printk(KERN_INFO "Enabling fast FPU save and restore... ");
		set_in_cr4(X86_CR4_OSFXSR);
		printk("done.\n");
	}
	if (cpu_has_xmm) {
		printk(KERN_INFO
			"Enabling unmasked SIMD FPU exception support... ");
		set_in_cr4(X86_CR4_OSXMMEXCPT);
		printk("done.\n");
	}

	set_system_trap_gate(SYSCALL_VECTOR, &system_call);
#endif

	/* Reserve all the builtin and the syscall vector: */
	for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
		set_bit(i, used_vectors);

#ifdef CONFIG_X86_64
	set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#else
	set_bit(SYSCALL_VECTOR, used_vectors);
#endif
	/*
	 * Should be a barrier for any external CPU state:
	 */
	cpu_init();

#ifdef CONFIG_X86_32
	trap_init_hook();
#endif
}
Esempio n. 18
0
static void cr4_pce_enable(void *arg)
{
	set_in_cr4(X86_CR4_PCE);
}