コード例 #1
0
ファイル: sleep.c プロジェクト: ARMWorks/FA_2451_Linux_Kernel
/**
 * acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int acpi_suspend_lowlevel(void)
{
	struct wakeup_header *header =
		(struct wakeup_header *) __va(real_mode_header->wakeup_header);

	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
		printk(KERN_ERR "wakeup header does not match\n");
		return -EINVAL;
	}

	header->video_mode = saved_video_mode;

#ifndef CONFIG_64BIT
	store_gdt((struct desc_ptr *)&header->pmode_gdt);

	if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
		       &header->pmode_efer_high))
		header->pmode_efer_low = header->pmode_efer_high = 0;
#endif /* !CONFIG_64BIT */

	header->pmode_cr0 = read_cr0();
	header->pmode_cr4 = read_cr4_safe();
	header->pmode_behavior = 0;
	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
			&header->pmode_misc_en_low,
			&header->pmode_misc_en_high))
		header->pmode_behavior |=
			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
	header->realmode_flags = acpi_realmode_flags;
	header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
	header->pmode_entry = (u32)&wakeup_pmode_return;
	header->pmode_cr3 = (u32)__pa(&initial_page_table);
	saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
	early_gdt_descr.address =
			(unsigned long)get_cpu_gdt_table(smp_processor_id());
	initial_gs = per_cpu_offset(smp_processor_id());
#endif
	initial_code = (unsigned long)wakeup_long64;
       saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

	do_suspend_lowlevel();
	return 0;
}
コード例 #2
0
ファイル: mce-severity.c プロジェクト: kishore1006/linux
static int mce_severity_amd_smca(struct mce *m, int err_ctx)
{
	u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
	u32 low, high;

	/*
	 * We need to look at the following bits:
	 * - "succor" bit (data poisoning support), and
	 * - TCC bit (Task Context Corrupt)
	 * in MCi_STATUS to determine error severity.
	 */
	if (!mce_flags.succor)
		return MCE_PANIC_SEVERITY;

	if (rdmsr_safe(addr, &low, &high))
		return MCE_PANIC_SEVERITY;

	/* TCC (Task context corrupt). If set and if IN_KERNEL, panic. */
	if ((low & MCI_CONFIG_MCAX) &&
	    (m->status & MCI_STATUS_TCC) &&
	    (err_ctx == IN_KERNEL))
		return MCE_PANIC_SEVERITY;

	 /* ...otherwise invoke hwpoison handler. */
	return MCE_AR_SEVERITY;
}
コード例 #3
0
ファイル: compat_pat_wc.c プロジェクト: majek/openonload
static int read_pat(efx_qword_t* pat)
{
  int r = rdmsr_safe(MSR_IA32_CR_PAT, &(pat->u32[0]), &(pat->u32[1]));
  if( r )
    return -EIO;
  return 0;
}
コード例 #4
0
ファイル: cpuctl.c プロジェクト: derekmarcotte/freebsd
/*
 * Actually perform MSR operations.
 */
static int
cpuctl_do_msr(int cpu, cpuctl_msr_args_t *data, u_long cmd, struct thread *td)
{
	uint64_t reg;
	int is_bound = 0;
	int oldcpu;
	int ret;

	KASSERT(cpu >= 0 && cpu <= mp_maxid,
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));

	/*
	 * Explicitly clear cpuid data to avoid returning stale
	 * info
	 */
	DPRINTF("[cpuctl,%d]: operating on MSR %#0x for %d cpu\n", __LINE__,
	    data->msr, cpu);
#ifdef __i386__
	if ((cpu_feature & CPUID_MSR) == 0)
		return (ENODEV);
#endif
	oldcpu = td->td_oncpu;
	is_bound = cpu_sched_is_bound(td);
	set_cpu(cpu, td);
	if (cmd == CPUCTL_RDMSR) {
		data->data = 0;
		ret = rdmsr_safe(data->msr, &data->data);
	} else if (cmd == CPUCTL_WRMSR) {
		ret = wrmsr_safe(data->msr, data->data);
	} else if (cmd == CPUCTL_MSRSBIT) {
		critical_enter();
		ret = rdmsr_safe(data->msr, &reg);
		if (ret == 0)
			ret = wrmsr_safe(data->msr, reg | data->data);
		critical_exit();
	} else if (cmd == CPUCTL_MSRCBIT) {
		critical_enter();
		ret = rdmsr_safe(data->msr, &reg);
		if (ret == 0)
			ret = wrmsr_safe(data->msr, reg & ~data->data);
		critical_exit();
	} else
		panic("[cpuctl,%d]: unknown operation requested: %lu",
		    __LINE__, cmd);
	restore_cpu(oldcpu, is_bound, td);
	return (ret);
}
コード例 #5
0
ファイル: msr.c プロジェクト: ut-osa/syncchar
static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
{
	int err;

	err = rdmsr_safe(reg, eax, edx);
	if (err)
		err = -EIO;
	return err;
}
コード例 #6
0
int VBOXCALL    supdrvOSMsrProberRead(uint32_t uMsr, RTCPUID idCpu, uint64_t *puValue)
{
# ifdef SUPDRV_LINUX_HAS_SAFE_MSR_API
    uint32_t u32Low, u32High;
    int rc;

    if (idCpu == NIL_RTCPUID)
        rc = rdmsr_safe(uMsr, &u32Low, &u32High);
    else if (RTMpIsCpuOnline(idCpu))
        rc = rdmsr_safe_on_cpu(idCpu, uMsr, &u32Low, &u32High);
    else
        return VERR_CPU_OFFLINE;
    if (rc == 0)
    {
        *puValue = RT_MAKE_U64(u32Low, u32High);
        return VINF_SUCCESS;
    }
    return VERR_ACCESS_DENIED;
# else
    return VERR_NOT_SUPPORTED;
# endif
}
コード例 #7
0
ファイル: mce_amd.c プロジェクト: sheep/xen
int mcequirk_amd_apply(enum mcequirk_amd_flags flags)
{
    uint64_t val;

    switch ( flags )
    {
    case MCEQUIRK_K8_GART:
        /*
         * Enable error reporting for all errors except for GART
         * TBL walk error reporting, which trips off incorrectly
         * with AGP GART & 3ware & Cerberus.
         */
        wrmsrl(MSR_IA32_MCx_CTL(4), ~(1ULL << 10));
        wrmsrl(MSR_IA32_MCx_STATUS(4), 0ULL);
        break;
    case MCEQUIRK_F10_GART:
        if ( rdmsr_safe(MSR_AMD64_MCx_MASK(4), val) == 0 )
                wrmsr_safe(MSR_AMD64_MCx_MASK(4), val | (1 << 10));
        break;
    }

    return 0;
}
コード例 #8
0
ファイル: sleep.c プロジェクト: jaehong/imx23-audio
/**
 * x86_acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int x86_acpi_suspend_lowlevel(void)
{
    struct wakeup_header *header =
        (struct wakeup_header *) __va(real_mode_header->wakeup_header);

    if (header->signature != WAKEUP_HEADER_SIGNATURE) {
        printk(KERN_ERR "wakeup header does not match\n");
        return -EINVAL;
    }

    header->video_mode = saved_video_mode;

    header->pmode_behavior = 0;

#ifndef CONFIG_64BIT
    native_store_gdt((struct desc_ptr *)&header->pmode_gdt);

    /*
     * We have to check that we can write back the value, and not
     * just read it.  At least on 90 nm Pentium M (Family 6, Model
     * 13), reading an invalid MSR is not guaranteed to trap, see
     * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
     * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
     * nm process with 512-KB L2 Cache Specification Update".
     */
    if (!rdmsr_safe(MSR_EFER,
                    &header->pmode_efer_low,
                    &header->pmode_efer_high) &&
            !wrmsr_safe(MSR_EFER,
                        header->pmode_efer_low,
                        header->pmode_efer_high))
        header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */

    header->pmode_cr0 = read_cr0();
    if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
        header->pmode_cr4 = read_cr4();
        header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
    }
    if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
                    &header->pmode_misc_en_low,
                    &header->pmode_misc_en_high) &&
            !wrmsr_safe(MSR_IA32_MISC_ENABLE,
                        header->pmode_misc_en_low,
                        header->pmode_misc_en_high))
        header->pmode_behavior |=
            (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
    header->realmode_flags = acpi_realmode_flags;
    header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
    header->pmode_entry = (u32)&wakeup_pmode_return;
    header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
    saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
    stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
    early_gdt_descr.address =
        (unsigned long)get_cpu_gdt_table(smp_processor_id());
    initial_gs = per_cpu_offset(smp_processor_id());
#endif
    initial_code = (unsigned long)wakeup_long64;
    saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

    do_suspend_lowlevel();
    return 0;
}
コード例 #9
0
/**
 * acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int acpi_suspend_lowlevel(void)
{
	struct wakeup_header *header;
	/* address in low memory of the wakeup routine. */
	char *acpi_realmode;

	acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);

	header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
		printk(KERN_ERR "wakeup header does not match\n");
		return -EINVAL;
	}

	header->video_mode = saved_video_mode;

	header->wakeup_jmp_seg = acpi_wakeup_address >> 4;

	/*
	 * Set up the wakeup GDT.  We set these up as Big Real Mode,
	 * that is, with limits set to 4 GB.  At least the Lenovo
	 * Thinkpad X61 is known to need this for the video BIOS
	 * initialization quirk to work; this is likely to also
	 * be the case for other laptops or integrated video devices.
	 */

	/* GDT[0]: GDT self-pointer */
	header->wakeup_gdt[0] =
		(u64)(sizeof(header->wakeup_gdt) - 1) +
		((u64)__pa(&header->wakeup_gdt) << 16);
	/* GDT[1]: big real mode-like code segment */
	header->wakeup_gdt[1] =
		GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
	/* GDT[2]: big real mode-like data segment */
	header->wakeup_gdt[2] =
		GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);

#ifndef CONFIG_64BIT
	store_gdt((struct desc_ptr *)&header->pmode_gdt);

	if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
		       &header->pmode_efer_high))
		header->pmode_efer_low = header->pmode_efer_high = 0;
#endif /* !CONFIG_64BIT */

	header->pmode_cr0 = read_cr0();
	header->pmode_cr4 = read_cr4_safe();
	header->pmode_behavior = 0;
	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
			&header->pmode_misc_en_low,
			&header->pmode_misc_en_high))
		header->pmode_behavior |=
			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
	header->realmode_flags = acpi_realmode_flags;
	header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
	header->pmode_entry = (u32)&wakeup_pmode_return;
	header->pmode_cr3 = (u32)__pa(&initial_page_table);
	saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
	header->trampoline_segment = trampoline_address() >> 4;
#ifdef CONFIG_SMP
	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
	early_gdt_descr.address =
			(unsigned long)get_cpu_gdt_table(smp_processor_id());
	initial_gs = per_cpu_offset(smp_processor_id());
#endif
	initial_code = (unsigned long)wakeup_long64;
       saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

	do_suspend_lowlevel();
	return 0;
}
コード例 #10
0
ファイル: intel.c プロジェクト: changliwei/suse_xen
static unsigned int probe_intel_cpuid_faulting(void)
{
	uint64_t x;
	return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) && (x & (1u<<31));
}
コード例 #11
0
int acpi_suspend_lowlevel(void)
{
	struct wakeup_header *header;
	
	char *acpi_realmode;

	acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);

	header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
		printk(KERN_ERR "wakeup header does not match\n");
		return -EINVAL;
	}

	header->video_mode = saved_video_mode;

	header->wakeup_jmp_seg = acpi_wakeup_address >> 4;


	
	header->wakeup_gdt[0] =
		(u64)(sizeof(header->wakeup_gdt) - 1) +
		((u64)__pa(&header->wakeup_gdt) << 16);
	
	header->wakeup_gdt[1] =
		GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
	
	header->wakeup_gdt[2] =
		GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);

#ifndef CONFIG_64BIT
	store_gdt((struct desc_ptr *)&header->pmode_gdt);

	if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
		       &header->pmode_efer_high))
		header->pmode_efer_low = header->pmode_efer_high = 0;
#endif 

	header->pmode_cr0 = read_cr0();
	header->pmode_cr4 = read_cr4_safe();
	header->pmode_behavior = 0;
	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
			&header->pmode_misc_en_low,
			&header->pmode_misc_en_high))
		header->pmode_behavior |=
			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
	header->realmode_flags = acpi_realmode_flags;
	header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
	header->pmode_entry = (u32)&wakeup_pmode_return;
	header->pmode_cr3 = (u32)__pa(&initial_page_table);
	saved_magic = 0x12345678;
#else 
	header->trampoline_segment = trampoline_address() >> 4;
#ifdef CONFIG_SMP
	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
	early_gdt_descr.address =
			(unsigned long)get_cpu_gdt_table(smp_processor_id());
	initial_gs = per_cpu_offset(smp_processor_id());
#endif
	initial_code = (unsigned long)wakeup_long64;
       saved_magic = 0x123456789abcdef0L;
#endif 

	do_suspend_lowlevel();
	return 0;
}
コード例 #12
0
static void rd_old_pat(void *err)
{
	*(int *)err |= rdmsr_safe(X86_MSR_PAT_OFFSET,
				  &old_pat_lo[smp_processor_id()],
				  &old_pat_hi[smp_processor_id()]);
}
コード例 #13
0
ファイル: msr-on-cpu.c プロジェクト: johnny/CobraDroidBeta
/* These "safe" variants are slower and should be used when the target MSR
   may not actually exist. */
static void __rdmsr_safe_on_cpu(void *info)
{
	struct msr_info *rv = info;

	rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
}
コード例 #14
0
ファイル: intel.c プロジェクト: Chong-Li/xen
static unsigned int probe_intel_cpuid_faulting(void)
{
	uint64_t x;
	return !rdmsr_safe(MSR_INTEL_PLATFORM_INFO, x) &&
		(x & MSR_PLATFORM_INFO_CPUID_FAULTING);
}
コード例 #15
0
ファイル: sleep.c プロジェクト: Adjustxx/Savaged-Zen
/**
 * acpi_save_state_mem - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 *
 * Note that this is too late to change acpi_wakeup_address.
 */
int acpi_save_state_mem(void)
{
	struct wakeup_header *header;

	if (!acpi_realmode) {
		printk(KERN_ERR "Could not allocate memory during boot, "
		       "S3 disabled\n");
		return -ENOMEM;
	}
	memcpy((void *)acpi_realmode, &wakeup_code_start, WAKEUP_SIZE);

	header = (struct wakeup_header *)(acpi_realmode + HEADER_OFFSET);
	if (header->signature != 0x51ee1111) {
		printk(KERN_ERR "wakeup header does not match\n");
		return -EINVAL;
	}

	header->video_mode = saved_video_mode;

	header->wakeup_jmp_seg = acpi_wakeup_address >> 4;

	/*
	 * Set up the wakeup GDT.  We set these up as Big Real Mode,
	 * that is, with limits set to 4 GB.  At least the Lenovo
	 * Thinkpad X61 is known to need this for the video BIOS
	 * initialization quirk to work; this is likely to also
	 * be the case for other laptops or integrated video devices.
	 */

	/* GDT[0]: GDT self-pointer */
	header->wakeup_gdt[0] =
		(u64)(sizeof(header->wakeup_gdt) - 1) +
		((u64)(acpi_wakeup_address +
			((char *)&header->wakeup_gdt - (char *)acpi_realmode))
				<< 16);
	/* GDT[1]: big real mode-like code segment */
	header->wakeup_gdt[1] =
		GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
	/* GDT[2]: big real mode-like data segment */
	header->wakeup_gdt[2] =
		GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);

#ifndef CONFIG_64BIT
	store_gdt((struct desc_ptr *)&header->pmode_gdt);

	if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
		       &header->pmode_efer_high))
		header->pmode_efer_low = header->pmode_efer_high = 0;
#endif /* !CONFIG_64BIT */

	header->pmode_cr0 = read_cr0();
	header->pmode_cr4 = read_cr4_safe();
	header->realmode_flags = acpi_realmode_flags;
	header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
	header->pmode_entry = (u32)&wakeup_pmode_return;
	header->pmode_cr3 = (u32)__pa(&initial_page_table);
	saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
	header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP
	stack_start.sp = temp_stack + sizeof(temp_stack);
	early_gdt_descr.address =
			(unsigned long)get_cpu_gdt_table(smp_processor_id());
	initial_gs = per_cpu_offset(smp_processor_id());
#endif
	initial_code = (unsigned long)wakeup_long64;
       saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

	return 0;
}
コード例 #16
0
ファイル: intel.c プロジェクト: Chong-Li/xen
/*
 * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask.
 * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD,
 * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to
 * 'rev down' to E8400, you can set these values in these Xen boot parameters.
 */
static void set_cpuidmask(const struct cpuinfo_x86 *c)
{
	static unsigned int msr_basic, msr_ext, msr_xsave;
	static enum { not_parsed, no_mask, set_mask } status;
	u64 msr_val;

	if (status == no_mask)
		return;

	if (status == set_mask)
		goto setmask;

	ASSERT((status == not_parsed) && (c == &boot_cpu_data));
	status = no_mask;

	if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx &
	       opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx &
	       opt_cpuid_mask_xsave_eax))
		return;

	/* Only family 6 supports this feature. */
	if (c->x86 != 6) {
		printk("No CPUID feature masking support available\n");
		return;
	}

	switch (c->x86_model) {
	case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */
	case 0x1d: /* Dunnington(MP) */
		msr_basic = MSR_INTEL_MASK_V1_CPUID1;
		break;

	case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */
	case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */
	case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */
	case 0x25: /* Arrandale, Clarksdale */
	case 0x2c: /* Gulftown, Westmere-EP */
	case 0x2e: /* Nehalem-EX(Beckton) */
	case 0x2f: /* Westmere-EX */
		msr_basic = MSR_INTEL_MASK_V2_CPUID1;
		msr_ext   = MSR_INTEL_MASK_V2_CPUID80000001;
		break;

	case 0x2a: /* SandyBridge */
	case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */
		msr_basic = MSR_INTEL_MASK_V3_CPUID1;
		msr_ext   = MSR_INTEL_MASK_V3_CPUID80000001;
		msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01;
		break;
	}

	status = set_mask;

	if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) {
		if (msr_basic)
			printk("Writing CPUID feature mask ecx:edx -> %08x:%08x\n",
			       opt_cpuid_mask_ecx, opt_cpuid_mask_edx);
		else
			printk("No CPUID feature mask available\n");
	}
	else
		msr_basic = 0;

	if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) {
		if (msr_ext)
			printk("Writing CPUID extended feature mask ecx:edx -> %08x:%08x\n",
			       opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx);
		else
			printk("No CPUID extended feature mask available\n");
	}
	else
		msr_ext = 0;

	if (~opt_cpuid_mask_xsave_eax) {
		if (msr_xsave)
			printk("Writing CPUID xsave feature mask eax -> %08x\n",
			       opt_cpuid_mask_xsave_eax);
		else
			printk("No CPUID xsave feature mask available\n");
	}
	else
		msr_xsave = 0;

 setmask:
	if (msr_basic &&
	    wrmsr_safe(msr_basic,
		       ((u64)opt_cpuid_mask_edx << 32) | opt_cpuid_mask_ecx)){
		msr_basic = 0;
		printk("Failed to set CPUID feature mask\n");
	}

	if (msr_ext &&
	    wrmsr_safe(msr_ext,
		       ((u64)opt_cpuid_mask_ext_edx << 32) | opt_cpuid_mask_ext_ecx)){
		msr_ext = 0;
		printk("Failed to set CPUID extended feature mask\n");
	}

	if (msr_xsave &&
	    (rdmsr_safe(msr_xsave, msr_val) ||
	     wrmsr_safe(msr_xsave,
			(msr_val & (~0ULL << 32)) | opt_cpuid_mask_xsave_eax))){
		msr_xsave = 0;
		printk("Failed to set CPUID xsave feature mask\n");
	}
}
コード例 #17
0
uint32_t
acpicpu_md_flags(void)
{
	struct cpu_info *ci = curcpu();
	struct pci_attach_args pa;
	uint32_t family, val = 0;
	uint32_t regs[4];
	uint64_t msr;

	if (acpi_md_ncpus() == 1)
		val |= ACPICPU_FLAG_C_BM;

	if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
		val |= ACPICPU_FLAG_C_FFH;

	/*
	 * By default, assume that the local APIC timer
	 * as well as TSC are stalled during C3 sleep.
	 */
	val |= ACPICPU_FLAG_C_APIC | ACPICPU_FLAG_C_TSC;

	/*
	 * Detect whether TSC is invariant. If it is not, we keep the flag to
	 * note that TSC will not run at constant rate. Depending on the CPU,
	 * this may affect P- and T-state changes, but especially relevant
	 * are C-states; with variant TSC, states larger than C1 may
	 * completely stop the counter.
	 */
	if (tsc_is_invariant())
		val &= ~ACPICPU_FLAG_C_TSC;

	switch (cpu_vendor) {

	case CPUVENDOR_IDT:

		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
			val |= ACPICPU_FLAG_P_FFH;

		if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
			val |= ACPICPU_FLAG_T_FFH;

		break;

	case CPUVENDOR_INTEL:

		/*
		 * Bus master control and arbitration should be
		 * available on all supported Intel CPUs (to be
		 * sure, this is double-checked later from the
		 * firmware data). These flags imply that it is
		 * not necessary to flush caches before C3 state.
		 */
		val |= ACPICPU_FLAG_C_BM | ACPICPU_FLAG_C_ARB;

		/*
		 * Check if we can use "native", MSR-based,
		 * access. If not, we have to resort to I/O.
		 */
		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
			val |= ACPICPU_FLAG_P_FFH;

		if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
			val |= ACPICPU_FLAG_T_FFH;

		/*
		 * Check whether MSR_APERF, MSR_MPERF, and Turbo
		 * Boost are available. Also see if we might have
		 * an invariant local APIC timer ("ARAT").
		 */
		if (cpuid_level >= 0x06) {

			x86_cpuid(0x00000006, regs);

			if ((regs[2] & CPUID_DSPM_HWF) != 0)
				val |= ACPICPU_FLAG_P_HWF;

			if ((regs[0] & CPUID_DSPM_IDA) != 0)
				val |= ACPICPU_FLAG_P_TURBO;

			if ((regs[0] & CPUID_DSPM_ARAT) != 0)
				val &= ~ACPICPU_FLAG_C_APIC;
		}

		break;

	case CPUVENDOR_AMD:

		x86_cpuid(0x80000000, regs);

		if (regs[0] < 0x80000007)
			break;

		x86_cpuid(0x80000007, regs);

		family = CPUID_TO_FAMILY(ci->ci_signature);

    		switch (family) {

		case 0x0f:

			/*
			 * Disable C1E if present.
			 */
			if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT)
				val |= ACPICPU_FLAG_C_C1E;

			/*
			 * Evaluate support for the "FID/VID
			 * algorithm" also used by powernow(4).
			 */
			if ((regs[3] & CPUID_APM_FID) == 0)
				break;

			if ((regs[3] & CPUID_APM_VID) == 0)
				break;

			val |= ACPICPU_FLAG_P_FFH | ACPICPU_FLAG_P_FIDVID;
			break;

		case 0x10:
		case 0x11:

			/*
			 * Disable C1E if present.
			 */
			if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT)
				val |= ACPICPU_FLAG_C_C1E;

			/* FALLTHROUGH */

		case 0x12:
		case 0x14: /* AMD Fusion */
		case 0x15: /* AMD Bulldozer */

			/*
			 * Like with Intel, detect MSR-based P-states,
			 * and AMD's "turbo" (Core Performance Boost),
			 * respectively.
			 */
			if ((regs[3] & CPUID_APM_HWP) != 0)
				val |= ACPICPU_FLAG_P_FFH;

			if ((regs[3] & CPUID_APM_CPB) != 0)
				val |= ACPICPU_FLAG_P_TURBO;

			/*
			 * Also check for APERF and MPERF,
			 * first available in the family 10h.
			 */
			if (cpuid_level >= 0x06) {

				x86_cpuid(0x00000006, regs);

				if ((regs[2] & CPUID_DSPM_HWF) != 0)
					val |= ACPICPU_FLAG_P_HWF;
			}

			break;
		}

		break;
	}

	/*
	 * There are several erratums for PIIX4.
	 */
	if (pci_find_device(&pa, acpicpu_md_quirk_piix4) != 0)
		val |= ACPICPU_FLAG_PIIX4;

	return val;
}