Пример #1
0
static int __init init_nonfatal_mce_checker(void)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;

	/* Check for MCE support */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return -ENODEV;

	/* Check for PPro style MCA */
	if (!cpu_has(c, X86_FEATURE_MCA))
		return -ENODEV;

	/* Some Athlons misbehave when we frob bank 0 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
		boot_cpu_data.x86 == 6)
			firstbank = 1;
	else
			firstbank = 0;

	/*
	 * Check for non-fatal errors every MCE_RATE s
	 */
	schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
	printk(KERN_INFO "Machine check exception polling timer started.\n");
	return 0;
}
Пример #2
0
/* Set up machine check reporting for processors with Intel style MCE */
void __init intel_p6_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;
	
	/* Check for MCE support */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	/* Check for PPro style MCA */
 	if (!cpu_has(c, X86_FEATURE_MCA))
		return;

	/* Ok machine check is available */
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */
	for (i=1; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());
}
Пример #3
0
int cpu_init(void)
{
	if (parse_cpuinfo_features(proc_cpuinfo_match))
		return -1;

	BUILD_BUG_ON(sizeof(struct xsave_struct) != XSAVE_SIZE);
	BUILD_BUG_ON(sizeof(struct i387_fxsave_struct) != FXSAVE_SIZE);

	/*
	 * Make sure that at least FPU is onboard
	 * and fxsave is supported.
	 */
	if (cpu_has(X86_FEATURE_FPU)) {
		if (!cpu_has(X86_FEATURE_FXSR)) {
			pr_err("missing support fxsave/restore insns\n");
			return -1;
		}
	}

	pr_debug("fpu:%d fxsr:%d xsave:%d\n",
		 !!cpu_has(X86_FEATURE_FPU),
		 !!cpu_has(X86_FEATURE_FXSR),
		 !!cpu_has(X86_FEATURE_XSAVE));

	return 0;
}
Пример #4
0
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
static int intel_thermal_supported(struct cpuinfo_x86 *c)
{
    if (!cpu_has_apic)
        return 0;
    if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
        return 0;
    return 1;
}
Пример #5
0
/* P4/Xeon Thermal regulation detect and init */
static void intel_init_thermal(struct cpuinfo_x86 *c)
{
	u32 l, h;
	unsigned int cpu = smp_processor_id();

	/* Thermal monitoring */
	if (!cpu_has(c, X86_FEATURE_ACPI))
		return;	/* -ENODEV */

	/* Clock modulation */
	if (!cpu_has(c, X86_FEATURE_ACC))
		return;	/* -ENODEV */

	/* first check if its enabled already, in which case there might
	 * be some SMM goo which handles it, so we can't even put a handler
	 * since it might be delivered via SMI already -zwanem.
	 */
	rdmsr (MSR_IA32_MISC_ENABLE, l, h);
	h = apic_read(APIC_LVTTHMR);
	if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
		printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
				cpu);
		return; /* -EBUSY */
	}

	/* check whether a vector already exists, temporarily masked? */	
	if (h & APIC_VECTOR_MASK) {
		printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already "
				"installed\n",
			cpu, (h & APIC_VECTOR_MASK));
		return; /* -EBUSY */
	}

	/* The temperature transition interrupt handler setup */
	h = THERMAL_APIC_VECTOR;		/* our delivery vector */
	h |= (APIC_DM_FIXED | APIC_LVT_MASKED);	/* we'll mask till we're ready */
	apic_write_around(APIC_LVTTHMR, h);

	rdmsr (MSR_IA32_THERM_INTERRUPT, l, h);
	wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);

	/* ok we're good to go... */
	vendor_thermal_interrupt = intel_thermal_interrupt;
	
	rdmsr (MSR_IA32_MISC_ENABLE, l, h);
	wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h);

	l = apic_read (APIC_LVTTHMR);
	apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
	printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);

	/* enable thermal throttle processing */
	atomic_set(&therm_throt_en, 1);
	return;
}
Пример #6
0
static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int tm2 = 0;
	unsigned int cpu = smp_processor_id();

	if (!cpu_has(c, X86_FEATURE_ACPI))
		return;

	if (!cpu_has(c, X86_FEATURE_ACC))
		return;

	/* first check if TM1 is already enabled by the BIOS, in which
	 * case there might be some SMM goo which handles it, so we can't even
	 * put a handler since it might be delivered via SMI already.
	 */
	rdmsr(MSR_IA32_MISC_ENABLE, l, h);
	h = apic_read(APIC_LVTTHMR);
	if ((l & (1 << 3)) && (h & APIC_DM_SMI)) {
		printk(KERN_DEBUG
		       "CPU%d: Thermal monitoring handled by SMI\n", cpu);
		return;
	}

	if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13)))
		tm2 = 1;

	if (h & APIC_VECTOR_MASK) {
		printk(KERN_DEBUG
		       "CPU%d: Thermal LVT vector (%#x) already "
		       "installed\n", cpu, (h & APIC_VECTOR_MASK));
		return;
	}

	h = THERMAL_APIC_VECTOR;
	h |= (APIC_DM_FIXED | APIC_LVT_MASKED);
	apic_write(APIC_LVTTHMR, h);

	rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
	wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);

	rdmsr(MSR_IA32_MISC_ENABLE, l, h);
	wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h);

	l = apic_read(APIC_LVTTHMR);
	apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
	printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
		cpu, tm2 ? "TM2" : "TM1");

	/* enable thermal throttle processing */
	atomic_set(&therm_throt_en, 1);
	return;
}
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
	if (c->x86 == 0x06) {
		if (cpu_has(c, X86_FEATURE_EST))
			pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
		switch (c->x86_model) {
		case 0x0E: /* Core */
		case 0x0F: /* Core Duo */
		case 0x16: /* Celeron Core */
		case 0x1C: /* Atom */
			p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
			return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
		case 0x0D: /* Pentium M (Dothan) */
			p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
			/* fall through */
		case 0x09: /* Pentium M (Banias) */
			return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
		}
	}

	if (c->x86 != 0xF)
		return 0;

	/* on P-4s, the TSC runs with constant frequency independent whether
	 * throttling is active or not. */
	p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;

	if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
		pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
		return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
	}

	return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
}
Пример #8
0
/* AMD K7 machine check is Intel like */
void amd_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;

	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	machine_check_vector = k7_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/* Clear status for MC index 0 separately, we don't touch CTL,
	 * as some K7 Athlons cause spurious MCEs when its enabled. */
	if (boot_cpu_data.x86 == 6) {
		wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0);
		i = 1;
	} else
		i = 0;
	for (; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());
}
Пример #9
0
/* AMD K7 machine check */
int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;

	/* Check for PPro style MCA; our caller has confirmed MCE support. */
	if (!cpu_has(c, X86_FEATURE_MCA))
		return 0;

	x86_mce_vector_register(k7_machine_check);

	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/* Clear status for MC index 0 separately, we don't touch CTL,
	 * as some Athlons cause spurious MCEs when its enabled. */
	wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0);
	for (i=1; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "CPU%d: AMD K7 machine check reporting enabled.\n",
		smp_processor_id());

	return 1;
}
Пример #10
0
void setup_apic_nmi_watchdog(void)
{
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if (boot_cpu_data.x86 != 15)
			return;
		if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
			return;
		setup_k7_watchdog();
		break;
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
			if (!setup_intel_arch_watchdog())
				return;
		} else if (boot_cpu_data.x86 == 15) {
			if (!setup_p4_watchdog())
				return;
		} else {
			return;
		}

		break;

	default:
		return;
	}
	lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
	nmi_active = 1;
}
Пример #11
0
void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;

	
	if (!mce_p5_enabled)
		return;

	
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	machine_check_vector = pentium_machine_check;
	
	wmb();

	
	rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
	rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
	printk(KERN_INFO
	       "Intel old style machine check architecture supported.\n");

	
	set_in_cr4(X86_CR4_MCE);
	printk(KERN_INFO
	       "Intel old style machine check reporting enabled on CPU#%d.\n",
	       smp_processor_id());
}
Пример #12
0
static void disable_lapic_nmi_watchdog(void)
{
	if (nmi_active <= 0)
		return;
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		wrmsr(MSR_K7_EVNTSEL0, 0, 0);
		break;
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
			disable_intel_arch_watchdog();
			break;
		}
		switch (boot_cpu_data.x86) {
		case 6:
			if (boot_cpu_data.x86_model > 0xd)
				break;

			wrmsr(MSR_P6_EVNTSEL0, 0, 0);
			break;
		case 15:
			if (boot_cpu_data.x86_model > 0x4)
				break;

			wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
			wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
			break;
		}
		break;
	}
	nmi_active = -1;
	/* tell do_nmi() and others that we're not active any more */
	nmi_watchdog = 0;
}
Пример #13
0
/* Set up machine check reporting for processors with Intel style MCE: */
void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;

	/* Default P5 to off as its often misconnected: */
	if (!mce_p5_enabled)
		return;

	/* Check for MCE support: */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	machine_check_vector = pentium_machine_check;
	/* Make sure the vector pointer is visible before we enable MCEs: */
	wmb();

	/* Read registers before enabling: */
	rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
	rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
	printk(KERN_INFO
	       "Intel old style machine check architecture supported.\n");

	/* Enable MCE: */
	cr4_set_bits(X86_CR4_MCE);
	printk(KERN_INFO
	       "Intel old style machine check reporting enabled on CPU#%d.\n",
	       smp_processor_id());
}
Пример #14
0
static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
{
	const struct cpuid_dependent_feature *df;

	for (df = cpuid_dependent_features; df->feature; df++) {

		if (!cpu_has(c, df->feature))
			continue;
		/*
		 * Note: cpuid_level is set to -1 if unavailable, but
		 * extended_extended_level is set to 0 if unavailable
		 * and the legitimate extended levels are all negative
		 * when signed; hence the weird messing around with
		 * signs here...
		 */
		if (!((s32)df->level < 0 ?
		     (u32)df->level > (u32)c->extended_cpuid_level :
		     (s32)df->level > (s32)c->cpuid_level))
			continue;

		clear_cpu_cap(c, df->feature);
		if (!warn)
			continue;

		printk(KERN_WARNING
		       "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
				x86_cap_flags[df->feature], df->level);
	}
}
Пример #15
0
static int __init mtrr_init(void)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;

	if (!(xen_start_info->flags & SIF_PRIVILEGED))
		return -ENODEV;

	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
		return -ENODEV;

	set_num_var_ranges();
	init_table();

	return 0;
}
Пример #16
0
static int __init mtrr_init(void)
{
	struct cpuinfo_x86 *c = &boot_cpu_data;

	if (!is_initial_xendomain())
		return -ENODEV;

	if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
	    (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
	    (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
	    (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
		return -ENODEV;

	set_num_var_ranges();
	init_table();

	return 0;
}
Пример #17
0
static int __init longrun_init(void)
{
	struct cpuinfo_x86 *c = &cpu_data(0);

	if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
	    !cpu_has(c, X86_FEATURE_LONGRUN))
		return -ENODEV;

	return cpufreq_register_driver(&longrun_driver);
}
Пример #18
0
/*
 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
 * We can't rely on cpuidle installing MWAIT, because it will not load
 * on systems that support only C1 -- so the boot default must be MWAIT.
 *
 * Some AMD machines are the opposite, they depend on using HALT.
 *
 * So for default C1, which is used during boot until cpuidle loads,
 * use MWAIT-C1 on Intel HW that has it, else use HALT.
 */
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
	if (c->x86_vendor != X86_VENDOR_INTEL)
		return 0;

	if (!cpu_has(c, X86_FEATURE_MWAIT))
		return 0;

	return 1;
}
static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
    if (c->phys_proc_id == o->phys_proc_id) {
        if (cpu_has(c, X86_FEATURE_AMD_DCM))
            return true;

        return topology_sane(c, o, "mc");
    }
    return false;
}
Пример #20
0
static int check_est_cpu(unsigned int cpuid)
{
	struct cpuinfo_x86 *cpu = &cpu_data(cpuid);

	if (cpu->x86_vendor != X86_VENDOR_INTEL ||
	    !cpu_has(cpu, X86_FEATURE_EST))
		return 0;

	return 1;
}
Пример #21
0
void check_mpx_erratum(struct cpuinfo_x86 *c)
{
	if (forcempx)
		return;
	/*
	 * Turn off the MPX feature on CPUs where SMEP is not
	 * available or disabled.
	 *
	 * Works around Intel Erratum SKD046: "Branch Instructions
	 * May Initialize MPX Bound Registers Incorrectly".
	 *
	 * This might falsely disable MPX on systems without
	 * SMEP, like Atom processors without SMEP.  But there
	 * is no such hardware known at the moment.
	 */
	if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
		setup_clear_cpu_cap(X86_FEATURE_MPX);
		pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
	}
}
Пример #22
0
static int msr_open(struct inode *inode, struct file *file)
{
	unsigned int cpu = iminor(file->f_dentry->d_inode);
	struct cpuinfo_x86 *c = &(cpu_data)[cpu];

	if (cpu >= NR_CPUS || !cpu_online(cpu))
		return -ENXIO;	/* No such CPU */
	if (!cpu_has(c, X86_FEATURE_MSR))
		return -EIO;	/* MSR not supported */

	return 0;
}
Пример #23
0
static __cpuinit inline int nmi_known_cpu(void)
{
	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		return boot_cpu_data.x86 == 15;
	case X86_VENDOR_INTEL:
		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
			return 1;
		else
			return (boot_cpu_data.x86 == 15);
	}
	return 0;
}
Пример #24
0
int compat_pat_wc_init(void)
{
  int rc;
  struct cpuinfo_x86* cpu_info = &boot_cpu_data;
  if( compat_pat_wc.inited ) {
    ++compat_pat_wc.inited;
    return 0;
  }

  if( !cpu_has(cpu_info, X86_FEATURE_MSR) || !cpu_has(cpu_info, X86_FEATURE_PAT) ) {
    EFRM_ERR("%s: ERROR: PAT not available on this processor", __func__);
    return -ENOSYS;
  }

  rc = setup_pat();
  switch (rc) {
    case -EIO:
      EFRM_ERR("%s: ERROR: failed accessing PAT register", __func__);
      return rc;
    case -EFAULT:
      EFRM_ERR("%s: ERROR: PAT registers inconsistent across CPUs", __func__);
      return rc;
    case -ENOSPC:
      EFRM_ERR("%s: ERROR: incompatible PAT modification detected %llx",
          __func__, compat_pat_wc.original_pat.u64);
      return rc;
    case -EALREADY:
      EFRM_WARN("%s: WARNING: compatible PAT modification detected %llx",
          __func__, compat_pat_wc.original_pat.u64);
    case 0:
      EFRM_WARN( "%s: PAT modified for WC", __func__);
      break;
    default:
      EFRM_ERR( "%s: unknown return code", __func__);
  }

  compat_pat_wc.inited = 1;
  return 0;
}
Пример #25
0
/* Set up machine check reporting for processors with Intel style MCE */
void intel_p6_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;
	
	/* Check for MCE support */
	if (!cpu_has(c, X86_FEATURE_MCE))
		return;

	/* Check for PPro style MCA */
 	if (!cpu_has(c, X86_FEATURE_MCA))
		return;

	/* Ok machine check is available */
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	/*
	 * Following the example in IA-32 SDM Vol 3:
	 * - MC0_CTL should not be written
	 * - Status registers on all banks should be cleared on reset
	 */
	for (i=1; i<nr_mce_banks; i++)
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);

	for (i=0; i<nr_mce_banks; i++)
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());
}
Пример #26
0
void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
{
    if (cpu_has(c, X86_FEATURE_MWAIT)) {
        printk("monitor/mwait feature present.\n");
        /*
         * Skip, if setup has overridden idle.
         * One CPU supports mwait => All CPUs supports mwait
         */
        if (!pm_idle) {
            printk("using mwait in idle threads.\n");
            pm_idle = mwait_idle;
        }
    }
}
Пример #27
0
static void __cpuinit setup_APIC_timer(void)
{
    struct clock_event_device *levt = &__get_cpu_var(lapic_events);

    if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
        lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;

        lapic_clockevent.rating = 150;
    }

    memcpy(levt, &lapic_clockevent, sizeof(*levt));
    levt->cpumask = cpumask_of(smp_processor_id());

    clockevents_register_device(levt);
}
Пример #28
0
int mlx4_enable_wc(void)
{
	struct cpuinfo_x86 *c = &(cpu_data)[0];
	int ret;

	if (wc_enabled)
		return 0;

	if (!cpu_has(c, X86_FEATURE_MSR) ||
	    !cpu_has(c, X86_FEATURE_PAT)) {
		printk(KERN_INFO "ib_mlx4: WC not available"
		       " on this processor\n");
		return -ENOSYS;
	}

	if (have_wc_errata())
		return -ENOSYS;

	if (!(ret = read_and_modify_pat()))
		wc_enabled = 1;
	else
		printk(KERN_INFO "ib_mlx4: failed to enable WC\n");
	return ret ? -EIO  : 0;
}
Пример #29
0
static int msr_open(struct inode *inode, struct file *file)
{
    unsigned int cpu;
    struct cpuinfo_x86 *c;

    cpu = iminor(file->f_path.dentry->d_inode);
    if (cpu >= nr_cpu_ids || !cpu_online(cpu))
        return -ENXIO;

    c = &cpu_data(cpu);
    if (!cpu_has(c, X86_FEATURE_MSR))
        return -EIO;

    return 0;
}
Пример #30
0
static void init_cpu_flags(void *dummy)
{
	int cpu = smp_processor_id();
	struct cpu_flags *flags = &per_cpu(cpu_flags, cpu);
	struct cpuinfo_x86 *c = &cpu_data(cpu);
	unsigned int tmp1, tmp2;
	int i;

	bitmap_zero((unsigned long *)flags, NCAPINTS);
	for (i = 0; i < 32*NCAPINTS; i++)
		if (cpu_has(c, i))
			set_bit(i, (unsigned long *)flags);

	cpuid(0x00000001, &tmp1, &tmp2, &flags->val[4], &flags->val[0]);
	cpuid(0x80000001, &tmp1, &tmp2, &flags->val[6], &flags->val[1]);
}