Ejemplo n.º 1
0
static void __init ms_hyperv_init_platform(void)
{
	int hv_host_info_eax;
	int hv_host_info_ebx;
	int hv_host_info_ecx;
	int hv_host_info_edx;

	/*
	 * Extract the features and hints
	 */
	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);

	pr_info("HyperV: features 0x%x, hints 0x%x\n",
		ms_hyperv.features, ms_hyperv.hints);

	/*
	 * Extract host information.
	 */
	if (cpuid_eax(HVCPUID_VENDOR_MAXFUNCTION) >= HVCPUID_VERSION) {
		hv_host_info_eax = cpuid_eax(HVCPUID_VERSION);
		hv_host_info_ebx = cpuid_ebx(HVCPUID_VERSION);
		hv_host_info_ecx = cpuid_ecx(HVCPUID_VERSION);
		hv_host_info_edx = cpuid_edx(HVCPUID_VERSION);

		pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d\n",
			hv_host_info_eax, hv_host_info_ebx >> 16,
			hv_host_info_ebx & 0xFFFF, hv_host_info_ecx,
			hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
	}
Ejemplo n.º 2
0
static void init_c3(struct cpuinfo_x86 *c)
{
	u32  lo, hi;

	/* Test for Centaur Extended Feature Flags presence */
	if (cpuid_eax(0xC0000000) >= 0xC0000001) {
		u32 tmp = cpuid_edx(0xC0000001);

		/* enable ACE unit, if present and disabled */
		if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
			rdmsr(MSR_VIA_FCR, lo, hi);
			lo |= ACE_FCR;		/* enable ACE unit */
			wrmsr(MSR_VIA_FCR, lo, hi);
			pr_info("CPU: Enabled ACE h/w crypto\n");
		}

		/* enable RNG unit, if present and disabled */
		if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
			rdmsr(MSR_VIA_RNG, lo, hi);
			lo |= RNG_ENABLE;	/* enable RNG unit */
			wrmsr(MSR_VIA_RNG, lo, hi);
			pr_info("CPU: Enabled h/w RNG\n");
		}

		/* store Centaur Extended Feature Flags as
		 * word 5 of the CPU capability bit array
		 */
		c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
	}
#ifdef CONFIG_X86_32
	/* Cyrix III family needs CX8 & PGE explicitly enabled. */
	if (c->x86_model >= 6 && c->x86_model <= 13) {
		rdmsr(MSR_VIA_FCR, lo, hi);
		lo |= (1<<1 | 1<<7);
		wrmsr(MSR_VIA_FCR, lo, hi);
		set_cpu_cap(c, X86_FEATURE_CX8);
	}

	/* Before Nehemiah, the C3's had 3dNOW! */
	if (c->x86_model >= 6 && c->x86_model < 9)
		set_cpu_cap(c, X86_FEATURE_3DNOW);
#endif
	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
		c->x86_cache_alignment = c->x86_clflush_size * 2;
		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
	}

	cpu_detect_cache_sizes(c);
}
Ejemplo n.º 3
0
bool vmmr0_boot_cpu_has(unsigned int bit)
{
	static u32 svm_features;
	static bool initialized;

	if (!initialized) {
		svm_features = cpuid_edx(SVM_CPUID_FUNC);
		initialized = true;
	}
	switch (bit) {
	case X86_FEATURE_NPT:
		return svm_features & SVM_FEATURE_NPT;
	case X86_FEATURE_LBRV:
		return svm_features & SVM_FEATURE_LBRV;
	case X86_FEATURE_NRIPS:
		return svm_features & SVM_FEATURE_NRIP;
	case X86_FEATURE_FLUSHBYASID:
		return svm_features & SVM_FEATURE_FLUSH_ASID;
	case X86_FEATURE_DECODEASSISTS:
		return svm_features & SVM_FEATURE_DECODE_ASSIST;
	case X86_FEATURE_PAUSEFILTER:
		return svm_features & SVM_FEATURE_PAUSE_FILTER;
	default:
		return boot_cpu_has(bit);
	}
}
Ejemplo n.º 4
0
static void init_c3(struct cpuinfo_x86 *c)
{
	uint64_t msr_content;

	/* Test for Centaur Extended Feature Flags presence */
	if (cpuid_eax(0xC0000000) >= 0xC0000001) {
		u32 tmp = cpuid_edx(0xC0000001);

		/* enable ACE unit, if present and disabled */
		if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
			rdmsrl(MSR_VIA_FCR, msr_content);
			/* enable ACE unit */
			wrmsrl(MSR_VIA_FCR, msr_content | ACE_FCR);
			printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
		}

		/* enable RNG unit, if present and disabled */
		if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
			rdmsrl(MSR_VIA_RNG, msr_content);
			/* enable RNG unit */
			wrmsrl(MSR_VIA_RNG, msr_content | RNG_ENABLE);
			printk(KERN_INFO "CPU: Enabled h/w RNG\n");
		}
	}

	if (c->x86 == 0x6 && c->x86_model >= 0xf) {
		c->x86_cache_alignment = c->x86_clflush_size * 2;
		__set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
	}

	get_model_name(c);
	display_cacheinfo(c);
}
Ejemplo n.º 5
0
Archivo: amd.c Proyecto: mobilipia/iods
void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
	if (cpuid_eax(0x80000000) >= 0x80000007) {
		c->x86_power = cpuid_edx(0x80000007);
		if (c->x86_power & (1<<8))
			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
	}
}
Ejemplo n.º 6
0
static void early_init_transmeta(struct cpuinfo_x86 *c)
{
	u32 xlvl;

	/* Transmeta-defined flags: level 0x80860001 */
	xlvl = cpuid_eax(0x80860000);
	if ((xlvl & 0xffff0000) == 0x80860000) {
		if (xlvl >= 0x80860001)
			c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
	}
}
Ejemplo n.º 7
0
static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
{
	u32 xlvl;

	
	xlvl = cpuid_eax(0x80860000);
	if ((xlvl & 0xffff0000) == 0x80860000) {
		if (xlvl >= 0x80860001)
			c->x86_capability[2] = cpuid_edx(0x80860001);
	}
}
Ejemplo n.º 8
0
/* Copied from the upstream's ms_hyperv_init_platform() */
void init_ms_hyperv_ext(void)
{
	/*
	 * Extract the features and hints
	 */

	ms_hyperv_ext.features = cpuid_eax(HYPERV_CPUID_FEATURES);
	ms_hyperv_ext.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
	ms_hyperv_ext.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);

	pr_info("Hyper-V: detected features 0x%x, hints 0x%x\n",
		ms_hyperv_ext.features, ms_hyperv_ext.hints);
}
Ejemplo n.º 9
0
static void __init ms_hyperv_init_platform(void)
{
	/*
	 * Extract the features and hints
	 */
	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);

	pr_info("HyperV: features 0x%x, hints 0x%x\n",
		ms_hyperv.features, ms_hyperv.hints);

#ifdef CONFIG_X86_LOCAL_APIC
	if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
		/*
		 * Get the APIC frequency.
		 */
		u64	hv_lapic_frequency;

		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
		lapic_timer_frequency = hv_lapic_frequency;
		pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
			lapic_timer_frequency);
	}

	register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
			     "hv_nmi_unknown");
#endif

	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
		clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);

#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif

#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
	machine_ops.shutdown = hv_machine_shutdown;
	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
	mark_tsc_unstable("running on Hyper-V");

	/*
	 * Generation 2 instances don't support reading the NMI status from
	 * 0x61 port.
	 */
	if (efi_enabled(EFI_BOOT))
		x86_platform.get_nmi_reason = hv_get_nmi_reason;
}
Ejemplo n.º 10
0
  int X(have_simd_sse2)(void)
  {
       static int init = 0, res;

       if (!init) {
	    res =   !is_386() 
		 && has_cpuid()
		 && (cpuid_edx(1) & (1 << DS(26,25)))
		 && sse2_works();
	    init = 1;
	    X(check_alignment_of_sse2_pm)();
       }
       return res;
  }
Ejemplo n.º 11
0
  int RIGHT_CPU(void)
  {
       static int wav2midi_init = 0, res;

       if (!wav2midi_init) {
	    res =   !is_386() 
		 && has_cpuid()
		 && (cpuid_edx(1) & (1 << 26))
		 && sse2_works();
	    wav2midi_init = 1;
	    X(check_alignment_of_sse2_pm)();
       }
       return res;
  }
Ejemplo n.º 12
0
  int RIGHT_CPU(void)
  {
       static int init = 0, res;
       extern void X(check_alignment_of_sse_pmpm)(void);

       if (!init) {
	    res =   !is_386() 
		 && has_cpuid()
		 && (cpuid_edx(1) & (1 << 25)) 
		 && sse_works();
	    init = 1;
	    X(check_alignment_of_sse_pmpm)();
       }
       return res;
  }
Ejemplo n.º 13
0
static void feature_detect(void *info)
{
    struct cpufreq_policy *policy = info;
    unsigned int ecx, edx;

    ecx = cpuid_ecx(6);
    if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY) {
        policy->aperf_mperf = 1;
        powernow_cpufreq_driver.getavg = get_measured_perf;
    }

    edx = cpuid_edx(CPUID_FREQ_VOLT_CAPABILITIES);
    if ((edx & CPB_CAPABLE) == CPB_CAPABLE) {
        policy->turbo = CPUFREQ_TURBO_ENABLED;
        if (cpufreq_verbose)
            printk(XENLOG_INFO
                   "CPU%u: Core Boost/Turbo detected and enabled\n",
                   smp_processor_id());
    }
}
Ejemplo n.º 14
0
static void __init ms_hyperv_init_platform(void)
{
	/*
	 * Extract the features and hints
	 */
	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);

	pr_info("HyperV: features 0x%x, hints 0x%x\n",
		ms_hyperv.features, ms_hyperv.hints);

#ifdef CONFIG_X86_LOCAL_APIC
	if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
		/*
		 * Get the APIC frequency.
		 */
		u64	hv_lapic_frequency;

		rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
		hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
		lapic_timer_frequency = hv_lapic_frequency;
		pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
			lapic_timer_frequency);
	}
#endif

	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
		clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);

#ifdef CONFIG_X86_IO_APIC
	no_timer_check = 1;
#endif

#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
	machine_ops.shutdown = hv_machine_shutdown;
	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
	mark_tsc_unstable("running on Hyper-V");
}
Ejemplo n.º 15
0
static int __init detect_init_APIC (void)
{
    uint64_t msr_content;
    u32 features;

    /* Disabled by kernel option? */
    if (enable_local_apic < 0)
        return -1;

    switch (boot_cpu_data.x86_vendor) {
    case X86_VENDOR_AMD:
        if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
            (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x17))
            break;
        goto no_apic;
    case X86_VENDOR_INTEL:
        if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
            (boot_cpu_data.x86 == 5 && cpu_has_apic))
            break;
        goto no_apic;
    default:
        goto no_apic;
    }

    if (!cpu_has_apic) {
        /*
         * Over-ride BIOS and try to enable the local
         * APIC only if "lapic" specified.
         */
        if (enable_local_apic <= 0) {
            printk("Local APIC disabled by BIOS -- "
                   "you can enable it with \"lapic\"\n");
            return -1;
        }
        /*
         * Some BIOSes disable the local APIC in the
         * APIC_BASE MSR. This can only be done in
         * software for Intel P6 or later and AMD K7
         * (Model > 1) or later.
         */
        rdmsrl(MSR_IA32_APICBASE, msr_content);
        if (!(msr_content & MSR_IA32_APICBASE_ENABLE)) {
            printk("Local APIC disabled by BIOS -- reenabling.\n");
            msr_content &= ~MSR_IA32_APICBASE_BASE;
            msr_content |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
            wrmsrl(MSR_IA32_APICBASE,
                msr_content | MSR_IA32_APICBASE_ENABLE
                | APIC_DEFAULT_PHYS_BASE);
            enabled_via_apicbase = 1;
        }
    }
    /*
     * The APIC feature bit should now be enabled
     * in `cpuid'
     */
    features = cpuid_edx(1);
    if (!(features & (1 << X86_FEATURE_APIC))) {
        printk("Could not enable APIC!\n");
        return -1;
    }

    set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
    mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;

    /* The BIOS may have set up the APIC at some other address */
    rdmsrl(MSR_IA32_APICBASE, msr_content);
    if (msr_content & MSR_IA32_APICBASE_ENABLE)
        mp_lapic_addr = msr_content & MSR_IA32_APICBASE_BASE;

    if (nmi_watchdog != NMI_NONE)
        nmi_watchdog = NMI_LOCAL_APIC;

    printk("Found and enabled local APIC!\n");

    apic_pm_activate();

    return 0;

no_apic:
    printk("No local APIC present or hardware disabled\n");
    return -1;
}
Ejemplo n.º 16
0
static u8 l3_cache(void)
{
	return (cpuid_edx(0x80000006) & (0x3FFF << 18)) != 0;
}
Ejemplo n.º 17
0
static int __init detect_init_APIC (void)
{
	u32 h, l, features;
	extern void get_cpu_vendor(struct cpuinfo_x86*);

	/* Workaround for us being called before identify_cpu(). */
	get_cpu_vendor(&boot_cpu_data);

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1)
			break;
		goto no_apic;
	case X86_VENDOR_INTEL:
		if (boot_cpu_data.x86 == 6 ||
		    (boot_cpu_data.x86 == 15 && cpu_has_apic) ||
		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
			break;
		goto no_apic;
	default:
		goto no_apic;
	}

	if (!cpu_has_apic) {
		/*
		 * Some BIOSes disable the local APIC in the
		 * APIC_BASE MSR. This can only be done in
		 * software for Intel P6 and AMD K7 (Model > 1).
		 */
		rdmsr(MSR_IA32_APICBASE, l, h);
		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
			printk("Local APIC disabled by BIOS -- reenabling.\n");
			l &= ~MSR_IA32_APICBASE_BASE;
			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
			wrmsr(MSR_IA32_APICBASE, l, h);
		}
	}
	/*
	 * The APIC feature bit should now be enabled
	 * in `cpuid'
	 */
	features = cpuid_edx(1);
	if (!(features & (1 << X86_FEATURE_APIC))) {
		printk("Could not enable APIC!\n");
		return -1;
	}
	set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
	boot_cpu_physical_apicid = 0;
	if (nmi_watchdog != NMI_NONE)
		nmi_watchdog = NMI_LOCAL_APIC;

	printk("Found and enabled local APIC!\n");

	apic_pm_init1();

	return 0;

no_apic:
	printk("No local APIC present or hardware disabled\n");
	return -1;
}
Ejemplo n.º 18
0
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
	pgd_t *pgd;

	if (!xen_start_info)
		return;

	xen_domain_type = XEN_PV_DOMAIN;

	/* Install Xen paravirt ops */
	pv_info = xen_info;
	pv_init_ops = xen_init_ops;
	pv_time_ops = xen_time_ops;
	pv_cpu_ops = xen_cpu_ops;
	pv_apic_ops = xen_apic_ops;

	x86_init.resources.memory_setup = xen_memory_setup;
	x86_init.oem.arch_setup = xen_arch_setup;
	x86_init.oem.banner = xen_banner;

	x86_init.timers.timer_init = xen_time_init;
	x86_init.timers.setup_percpu_clockev = x86_init_noop;
	x86_cpuinit.setup_percpu_clockev = x86_init_noop;

	x86_platform.calibrate_tsc = xen_tsc_khz;
	x86_platform.get_wallclock = xen_get_wallclock;
	x86_platform.set_wallclock = xen_set_wallclock;

	/*
	 * Set up some pagetable state before starting to set any ptes.
	 */

	xen_init_mmu_ops();

	/* Prevent unwanted bits from being set in PTEs. */
	__supported_pte_mask &= ~_PAGE_GLOBAL;
	if (!xen_initial_domain())
		__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);

	__supported_pte_mask |= _PAGE_IOMAP;

	/*
	 * Prevent page tables from being allocated in highmem, even
	 * if CONFIG_HIGHPTE is enabled.
	 */
	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;

#ifdef CONFIG_X86_64
	/* Work out if we support NX */
	check_efer();
#endif

	xen_setup_features();

	/* Get mfn list */
	if (!xen_feature(XENFEAT_auto_translated_physmap))
		xen_build_dynamic_phys_to_machine();

	/*
	 * Set up kernel GDT and segment registers, mainly so that
	 * -fstack-protector code can be executed.
	 */
	xen_setup_stackprotector();

	xen_init_irq_ops();
	xen_init_cpuid_mask();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * set up the basic apic ops.
	 */
	set_xen_basic_apic_ops();
#endif

	if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
		pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
	}

	machine_ops = xen_machine_ops;

	/*
	 * The only reliable way to retain the initial address of the
	 * percpu gdt_page is to remember it here, so we can go and
	 * mark it RW later, when the initial percpu area is freed.
	 */
	xen_initial_gdt = &per_cpu(gdt_page, 0);

	xen_smp_init();

	pgd = (pgd_t *)xen_start_info->pt_base;

	/* Don't do the full vcpu_info placement stuff until we have a
	   possible map and a non-dummy shared_info. */
	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];

	local_irq_disable();
	early_boot_irqs_off();

	xen_raw_console_write("mapping kernel into physical memory\n");
	pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);

	init_mm.pgd = pgd;

	/* keep using Xen gdt for now; no urgent need to change it */

	pv_info.kernel_rpl = 1;
	if (xen_feature(XENFEAT_supervisor_mode_kernel))
		pv_info.kernel_rpl = 0;

	/* set the limit of our address space */
	xen_reserve_top();

#ifdef CONFIG_X86_32
	/* set up basic CPUID stuff */
	cpu_detect(&new_cpu_data);
	new_cpu_data.hard_math = 1;
	new_cpu_data.wp_works_ok = 1;
	new_cpu_data.x86_capability[0] = cpuid_edx(1);
#endif

	/* Poke various useful things into boot_params */
	boot_params.hdr.type_of_loader = (9 << 4) | 0;
	boot_params.hdr.ramdisk_image = xen_start_info->mod_start
		? __pa(xen_start_info->mod_start) : 0;
	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
	boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);

	if (!xen_initial_domain()) {
		add_preferred_console("xenboot", 0, NULL);
		add_preferred_console("tty", 0, NULL);
		add_preferred_console("hvc", 0, NULL);
	}

	xen_raw_console_write("about to get started...\n");

	xen_setup_runstate_info(0);

	/* Start the world */
#ifdef CONFIG_X86_32
	i386_start_kernel();
#else
	x86_64_start_reservations((char *)__pa_symbol(&boot_params));
#endif
}
Ejemplo n.º 19
0
int get_cpu_info(unsigned int cpu, struct cpupower_cpu_info *cpu_info)
{
	FILE *fp;
	char value[64];
	unsigned int proc, x;
	unsigned int unknown = 0xffffff;
	unsigned int cpuid_level, ext_cpuid_level;

	int ret = -EINVAL;

	cpu_info->vendor		= X86_VENDOR_UNKNOWN;
	cpu_info->family		= unknown;
	cpu_info->model			= unknown;
	cpu_info->stepping		= unknown;
	cpu_info->caps			= 0;

	fp = fopen("/proc/cpuinfo", "r");
	if (!fp)
		return -EIO;

	while (!feof(fp)) {
		if (!fgets(value, 64, fp))
			continue;
		value[63 - 1] = '\0';

		if (!strncmp(value, "processor\t: ", 12))
			sscanf(value, "processor\t: %u", &proc);

		if (proc != cpu)
			continue;

		/*                */
		if (!strncmp(value, "vendor_id", 9)) {
			for (x = 1; x < X86_VENDOR_MAX; x++) {
				if (strstr(value, cpu_vendor_table[x]))
					cpu_info->vendor = x;
			}
		/*                      */
		} else if (!strncmp(value, "cpu family\t: ", 13)) {
			sscanf(value, "cpu family\t: %u",
			       &cpu_info->family);
		} else if (!strncmp(value, "model\t\t: ", 9)) {
			sscanf(value, "model\t\t: %u",
			       &cpu_info->model);
		} else if (!strncmp(value, "stepping\t: ", 10)) {
			sscanf(value, "stepping\t: %u",
			       &cpu_info->stepping);

			/*                                       */
			if (cpu_info->vendor == X86_VENDOR_UNKNOWN ||
			    cpu_info->family == unknown ||
			    cpu_info->model == unknown ||
			    cpu_info->stepping == unknown) {
				ret = -EINVAL;
				goto out;
			}

			ret = 0;
			goto out;
		}
	}
	ret = -ENODEV;
out:
	fclose(fp);
	/*                                             */
	if (cpu_info->vendor != X86_VENDOR_AMD &&
	    cpu_info->vendor != X86_VENDOR_INTEL)
		return ret;

	cpuid_level	= cpuid_eax(0);
	ext_cpuid_level	= cpuid_eax(0x80000000);

	/*               */
	if (ext_cpuid_level >= 0x80000007 &&
	    (cpuid_edx(0x80000007) & (1 << 8)))
		cpu_info->caps |= CPUPOWER_CAP_INV_TSC;

	/*                               */
	if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1))
		cpu_info->caps |= CPUPOWER_CAP_APERF;

	/*                                         */
	if (cpu_info->vendor == X86_VENDOR_AMD) {
		if (ext_cpuid_level >= 0x80000007 &&
		    (cpuid_edx(0x80000007) & (1 << 9)))
			cpu_info->caps |= CPUPOWER_CAP_AMD_CBP;
	}

	if (cpu_info->vendor == X86_VENDOR_INTEL) {
		if (cpuid_level >= 6 &&
		    (cpuid_eax(6) & (1 << 1)))
			cpu_info->caps |= CPUPOWER_CAP_INTEL_IDA;
	}

	if (cpu_info->vendor == X86_VENDOR_INTEL) {
		/*                               */
		if (cpuid_level >= 6 && (cpuid_ecx(6) & (1 << 3)))
			cpu_info->caps |= CPUPOWER_CAP_PERF_BIAS;

		/*                                   */
		if (cpu_info->family == 6) {
			switch (cpu_info->model) {
			case 0x1A:	/*                          
                                    
      */
			case 0x1E:	/*                         
                                             
      */
			case 0x1F:	/*                                    */
			case 0x25:	/*                
                            
      */
			case 0x2C:	/*                        */
				cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
			case 0x2A:	/*     */
			case 0x2D:	/*          */
				cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
				cpu_info->caps |= CPUPOWER_CAP_IS_SNB;
				break;
			case 0x2E:	/*                           */
			case 0x2F:	/*                             */
			default:
				break;
			}
		}
	}

	/*                                                
                                                
 */
	return ret;
}
Ejemplo n.º 20
0
static int __init detect_init_APIC(void)
{
    u32 h, l, features;


    if (disable_apic)
        return -1;

    switch (boot_cpu_data.x86_vendor) {
    case X86_VENDOR_AMD:
        if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
                (boot_cpu_data.x86 >= 15))
            break;
        goto no_apic;
    case X86_VENDOR_INTEL:
        if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
                (boot_cpu_data.x86 == 5 && cpu_has_apic))
            break;
        goto no_apic;
    default:
        goto no_apic;
    }

    if (!cpu_has_apic) {

        if (!force_enable_local_apic) {
            pr_info("Local APIC disabled by BIOS -- "
                    "you can enable it with \"lapic\"\n");
            return -1;
        }

        rdmsr(MSR_IA32_APICBASE, l, h);
        if (!(l & MSR_IA32_APICBASE_ENABLE)) {
            pr_info("Local APIC disabled by BIOS -- reenabling.\n");
            l &= ~MSR_IA32_APICBASE_BASE;
            l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
            wrmsr(MSR_IA32_APICBASE, l, h);
            enabled_via_apicbase = 1;
        }
    }

    features = cpuid_edx(1);
    if (!(features & (1 << X86_FEATURE_APIC))) {
        pr_warning("Could not enable APIC!\n");
        return -1;
    }
    set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
    mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;


    rdmsr(MSR_IA32_APICBASE, l, h);
    if (l & MSR_IA32_APICBASE_ENABLE)
        mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;

    pr_info("Found and enabled local APIC!\n");

    apic_pm_activate();

    return 0;

no_apic:
    pr_info("No local APIC present or hardware disabled\n");
    return -1;
}
Ejemplo n.º 21
0
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
	pgd_t *pgd;

	if (!xen_start_info)
		return;

	xen_domain_type = XEN_PV_DOMAIN;

	/* Install Xen paravirt ops */
	pv_info = xen_info;
	pv_init_ops = xen_init_ops;
	pv_time_ops = xen_time_ops;
	pv_cpu_ops = xen_cpu_ops;
	pv_apic_ops = xen_apic_ops;
	pv_mmu_ops = xen_mmu_ops;

#ifdef CONFIG_X86_64
	/*
	 * Setup percpu state.  We only need to do this for 64-bit
	 * because 32-bit already has %fs set properly.
	 */
	load_percpu_segment(0);
#endif

	xen_init_irq_ops();
	xen_init_cpuid_mask();

#ifdef CONFIG_X86_LOCAL_APIC
	/*
	 * set up the basic apic ops.
	 */
	set_xen_basic_apic_ops();
#endif

	xen_setup_features();

	if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
		pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
	}

	machine_ops = xen_machine_ops;

	/*
	 * The only reliable way to retain the initial address of the
	 * percpu gdt_page is to remember it here, so we can go and
	 * mark it RW later, when the initial percpu area is freed.
	 */
	xen_initial_gdt = &per_cpu(gdt_page, 0);

	xen_smp_init();

	/* Get mfn list */
	if (!xen_feature(XENFEAT_auto_translated_physmap))
		xen_build_dynamic_phys_to_machine();

	pgd = (pgd_t *)xen_start_info->pt_base;

	/* Prevent unwanted bits from being set in PTEs. */
	__supported_pte_mask &= ~_PAGE_GLOBAL;
	if (!xen_initial_domain())
		__supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);

#ifdef CONFIG_X86_64
	/* Work out if we support NX */
	check_efer();
#endif

	/* Don't do the full vcpu_info placement stuff until we have a
	   possible map and a non-dummy shared_info. */
	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];

	local_irq_disable();
	early_boot_irqs_off();

	xen_raw_console_write("mapping kernel into physical memory\n");
	pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);

	init_mm.pgd = pgd;

	/* keep using Xen gdt for now; no urgent need to change it */

	pv_info.kernel_rpl = 1;
	if (xen_feature(XENFEAT_supervisor_mode_kernel))
		pv_info.kernel_rpl = 0;

	/* set the limit of our address space */
	xen_reserve_top();

#ifdef CONFIG_X86_32
	/* set up basic CPUID stuff */
	cpu_detect(&new_cpu_data);
	new_cpu_data.hard_math = 1;
	new_cpu_data.wp_works_ok = 1;
	new_cpu_data.x86_capability[0] = cpuid_edx(1);
#endif

	/* Poke various useful things into boot_params */
	boot_params.hdr.type_of_loader = (9 << 4) | 0;
	boot_params.hdr.ramdisk_image = xen_start_info->mod_start
		? __pa(xen_start_info->mod_start) : 0;
	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
	boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);

	if (!xen_initial_domain()) {
		add_preferred_console("xenboot", 0, NULL);
		add_preferred_console("tty", 0, NULL);
		add_preferred_console("hvc", 0, NULL);
	}

	xen_raw_console_write("about to get started...\n");

	/* Start the world */
#ifdef CONFIG_X86_32
	i386_start_kernel();
#else
	x86_64_start_reservations((char *)__pa_symbol(&boot_params));
#endif
}
Ejemplo n.º 22
0
void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
	static const uint16_t spd_addr [] = {
		DIMM0, 0, 0, 0,
		DIMM1, 0, 0, 0,
	};

	int needs_reset;
	unsigned bsp_apicid = 0, nodes;
	struct mem_controller ctrl[8];

	if (!cpu_init_detectedx && boot_cpu()) {
		/* Nothing special needs to be done to find bus 0 */
		/* Allow the HT devices to be found */
		enumerate_ht_chain();
		sio_setup();
	}

	if (bist == 0)
		bsp_apicid = init_cpus(cpu_init_detectedx);

//	post_code(0x32);

	winbond_enable_serial(SERIAL_DEV, CONFIG_TTYS0_BASE);
	console_init();

	/* Halt if there was a built in self test failure */
	report_bist_failure(bist);

#if 0
	dump_pci_device(PCI_DEV(0, 0x18, 0));
#endif

	needs_reset = setup_coherent_ht_domain();

	wait_all_core0_started();
	// It is said that we should start core1 after all core0 launched
	start_other_cores();
	wait_all_other_cores_started(bsp_apicid);

#if CONFIG_SET_FIDVID
	/* Check to see if processor is capable of changing FIDVID  */
	/* otherwise it will throw a GP# when reading FIDVID_STATUS */
	if ((cpuid_edx(0x80000007) & 0x6) == 0x6) {
		msr_t msr;
		/* Read FIDVID_STATUS */
		msr = rdmsr(0xc0010042);
		printk(BIOS_DEBUG, "begin msr fid, vid: hi=0x%x, lo=0x%x\n", msr.hi, msr.lo);

		enable_fid_change();
		init_fidvid_bsp(bsp_apicid);

		msr = rdmsr(0xc0010042);
		printk(BIOS_DEBUG, "end msr fid, vid: hi=0x%x, lo=0x%x\n", msr.hi, msr.lo);
	}
#endif

	needs_reset |= ht_setup_chains_x();
	needs_reset |= ck804_early_setup_x();
	if (needs_reset) {
		printk(BIOS_INFO, "ht reset -\n");
		soft_reset();
	}

	allow_all_aps_stop(bsp_apicid);

	nodes = get_nodes();
	//It's the time to set ctrl now;
	fill_mem_ctrl(nodes, ctrl, spd_addr);

	enable_smbus();
#if 0
	dump_spd_registers(&cpu[0]);
	dump_smbus_registers();
#endif

	memreset_setup();
	sdram_initialize(nodes, ctrl);

#if 0
	print_pci_devices();
	dump_pci_devices();
#endif

	post_cache_as_ram();
}
Ejemplo n.º 23
0
static int __init detect_init_APIC (void)
{
	u32 h, l, features;
	extern void get_cpu_vendor(struct cpuinfo_x86*);

	/* Disabled by DMI scan or kernel option? */
	if (enable_local_apic < 0)
		return -1;

	/* Workaround for us being called before identify_cpu(). */
	get_cpu_vendor(&boot_cpu_data);

	switch (boot_cpu_data.x86_vendor) {
	case X86_VENDOR_AMD:
		if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
		    (boot_cpu_data.x86 == 15))	    
			break;
		goto no_apic;
	case X86_VENDOR_INTEL:
		if (boot_cpu_data.x86 == 6 ||
		    (boot_cpu_data.x86 == 15 && (cpu_has_apic || enable_local_apic > 0)) ||
		    (boot_cpu_data.x86 == 5 && cpu_has_apic))
			break;
		goto no_apic;
	default:
		goto no_apic;
	}

	if (!cpu_has_apic) {
		/*
		 * Over-ride BIOS and try to enable LAPIC
		 * only if "lapic" specified
		 */
		if (enable_local_apic != 1)
			goto no_apic;
		/*
		 * Some BIOSes disable the local APIC in the
		 * APIC_BASE MSR. This can only be done in
		 * software for Intel P6 and AMD K7 (Model > 1).
		 */
		rdmsr(MSR_IA32_APICBASE, l, h);
		if (!(l & MSR_IA32_APICBASE_ENABLE)) {
			apic_printk(APIC_VERBOSE, "Local APIC disabled "
					"by BIOS -- reenabling.\n");
			l &= ~MSR_IA32_APICBASE_BASE;
			l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
			wrmsr(MSR_IA32_APICBASE, l, h);
			enabled_via_apicbase = 1;
		}
	}
	/*
	 * The APIC feature bit should now be enabled
	 * in `cpuid'
	 */
	features = cpuid_edx(1);
	if (!(features & (1 << X86_FEATURE_APIC))) {
		printk("Could not enable APIC!\n");
		return -1;
	}
	set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;

	/* The BIOS may have set up the APIC at some other address */
	rdmsr(MSR_IA32_APICBASE, l, h);
	if (l & MSR_IA32_APICBASE_ENABLE)
		mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;

	if (nmi_watchdog != NMI_NONE)
		nmi_watchdog = NMI_LOCAL_APIC;

	apic_printk(APIC_VERBOSE, "Found and enabled local APIC!\n");

	apic_pm_activate();

	return 0;

no_apic:
	printk("No local APIC present or hardware disabled\n");
	return -1;
}
Ejemplo n.º 24
0
void arch_paging_init(void)
{
	memcpy(hv_paging, x86_64_paging, sizeof(x86_64_paging));
	if (!(cpuid_edx(0x80000001) & X86_FEATURE_GBPAGES))
		hv_paging[1].page_size = 0;
}