Exemple #1
0
int cpu_check(void)
{
#ifdef MACOSX
    return 0;
#else
#ifdef X86
    unsigned af,bf,cf,df;
    x86_cpuid(0, af, bf, cf, df);
    if(bf==0x68747541 && cf==0x444D4163 && df==0x69746E65)
        amd = 1;
    x86_cpuid(1, af, bf, cf, df);
#ifdef X86_SSE
    if(!(df&(1<<25)))
        return 1;
#endif
#ifdef X86_SSE2
    if(!(df&(1<<26)))
        return 1;
#endif
#ifdef X86_SSE3
    if(!(cf&1))
        return 1;
#endif
#endif
#endif
    return 0;
}
int
powernow_probe(struct cpu_info *ci)
{
	uint32_t regs[4];

	x86_cpuid(0x80000000, regs);

	/* We need CPUID(0x80000007) */
	if (regs[0] < 0x80000007)
		return 0;
	x86_cpuid(0x80000007, regs);

	/*
	 * For now we're only interested in FID and VID for frequency scaling.
	 */

	return (regs[3] & AMD_PN_FID_VID) == AMD_PN_FID_VID;
}
Exemple #3
0
void
x86_errata(void)
{
	struct cpu_info *ci;
	uint32_t descs[4];
	errata_t *e, *ex;
	cpurev_t rev;
	int i, j, upgrade;
	static int again;

	if (cpu_vendor != CPUVENDOR_AMD)
		return;

	ci = curcpu();

	x86_cpuid(0x80000001, descs);

	for (i = 0;; i += 2) {
		if ((rev = cpurevs[i]) == OINK)
			return;
		if (cpurevs[i + 1] == descs[0])
			break;
	}

	ex = errata + sizeof(errata) / sizeof(errata[0]);
	for (upgrade = 0, e = errata; e < ex; e++) {
		if (e->e_reported)
			continue;
		if (e->e_set != NULL) {
			for (j = 0; e->e_set[j] != OINK; j++)
				if (e->e_set[j] == rev)
					break;
			if (e->e_set[j] == OINK)
				continue;
		}

		aprint_debug_dev(ci->ci_dev, "testing for erratum %d\n",
		    e->e_num);

		if (e->e_act == NULL)
			e->e_reported = TRUE;
		else if ((*e->e_act)(ci, e) == FALSE)
			continue;

		aprint_verbose_dev(ci->ci_dev, "erratum %d present\n",
		    e->e_num);
		upgrade = 1;
	}

	if (upgrade && !again) {
		again = 1;
		aprint_normal_dev(ci->ci_dev, "WARNING: errata present, BIOS upgrade "
		    "may be\n");
		aprint_normal_dev(ci->ci_dev, "WARNING: necessary to ensure reliable "
		    "operation\n");
	}
}
Exemple #4
0
void
tmx86_get_longrun_status(u_int *frequency, u_int *voltage, u_int *percentage)
{
	u_long eflags;
	u_int descs[4];

	eflags = x86_read_psl();
	x86_disable_intr();

	x86_cpuid(0x80860007, descs);
	*frequency = descs[0];
	*voltage = descs[1];
	*percentage = descs[2];

	x86_write_psl(eflags);
}
Exemple #5
0
void vm_init_paging(struct multiboot_info *boot_info)
{
	struct x86_cpuid_info cpuid_info;
	struct frame *pgdir_frame;
	struct frame *pgtab_frame;
	pte_t *pgtab;
	ulong_t paddr, mem_max;

	/*
	 * Check CPUID instruction to see if large pages (PSE feature)
	 * is supported.
	 */
	PANIC_IF(!x86_cpuid(&cpuid_info), "GeekOS requires a Pentium-class CPU");
	PANIC_IF(!cpuid_info.feature_info_edx.pse, "Processor does not support PSE");
	cons_printf("CPU supports PSE\n");

	/*
	 * Enable PSE by setting the PSE bit in CR4.
	 */
	x86_set_cr4(x86_get_cr4() | CR4_PSE);

	/*
	 * Allocate kernel page directory.
	 */
	pgdir_frame = mem_alloc_frame(FRAME_KERN, 1);
	s_kernel_pagedir = mem_frame_to_pa(pgdir_frame);
	memset(s_kernel_pagedir, '\0', PAGE_SIZE);

	/*
	 * We will support at most 2G of physical memory.
	 */
	mem_max = ((ulong_t) boot_info->mem_upper) * 1024;
	if (mem_max > (1 << 31)) {
		mem_max = (ulong_t) (1 << 31);
	}

	/*
	 * We need a page table for the low 4M of the kernel address space,
	 * since we want to leave the zero page unmapped (to catch null pointer derefs).
	 */
	pgtab_frame = mem_alloc_frame(FRAME_KERN, 1);
	pgtab = mem_frame_to_pa(pgtab_frame);
	memset(pgtab, '\0', PAGE_SIZE);

	/*
	 * Initialize low page table, leaving page 0 unmapped
	 */
	for (paddr = PAGE_SIZE; paddr < VM_PT_SPAN; paddr += PAGE_SIZE) {
		vm_set_pte(pgtab, VM_WRITE|VM_READ|VM_EXEC, paddr, paddr);
	}

	/*
	 * Add low page table to the kernel pagedir.
	 */
	vm_set_pde(s_kernel_pagedir, VM_WRITE|VM_READ|VM_EXEC, 0UL, (u32_t) pgtab);

	/*
	 * Use 4M pages to map the rest of the low 2G of memory
	 */
	for (paddr = VM_PT_SPAN; paddr < mem_max; paddr += VM_PT_SPAN) {
		vm_set_pde_4m(s_kernel_pagedir, VM_WRITE|VM_READ|VM_EXEC, paddr, paddr);
	}

	/*
	 * Turn on paging!
	 */
	x86_set_cr3((u32_t) s_kernel_pagedir); /* set the kernel page directory */
	x86_set_cr0(x86_get_cr0() | CR0_PG);   /* turn on the paging bit in cr0 */

	cons_printf("Paging enabled\n");
}
Exemple #6
0
uint32_t
acpicpu_md_flags(void)
{
	struct cpu_info *ci = curcpu();
	struct pci_attach_args pa;
	uint32_t family, val = 0;
	uint32_t regs[4];
	uint64_t msr;

	if (acpi_md_ncpus() == 1)
		val |= ACPICPU_FLAG_C_BM;

	if ((ci->ci_feat_val[1] & CPUID2_MONITOR) != 0)
		val |= ACPICPU_FLAG_C_FFH;

	/*
	 * By default, assume that the local APIC timer
	 * as well as TSC are stalled during C3 sleep.
	 */
	val |= ACPICPU_FLAG_C_APIC | ACPICPU_FLAG_C_TSC;

	/*
	 * Detect whether TSC is invariant. If it is not, we keep the flag to
	 * note that TSC will not run at constant rate. Depending on the CPU,
	 * this may affect P- and T-state changes, but especially relevant
	 * are C-states; with variant TSC, states larger than C1 may
	 * completely stop the counter.
	 */
	if (tsc_is_invariant())
		val &= ~ACPICPU_FLAG_C_TSC;

	switch (cpu_vendor) {

	case CPUVENDOR_IDT:

		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
			val |= ACPICPU_FLAG_P_FFH;

		if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
			val |= ACPICPU_FLAG_T_FFH;

		break;

	case CPUVENDOR_INTEL:

		/*
		 * Bus master control and arbitration should be
		 * available on all supported Intel CPUs (to be
		 * sure, this is double-checked later from the
		 * firmware data). These flags imply that it is
		 * not necessary to flush caches before C3 state.
		 */
		val |= ACPICPU_FLAG_C_BM | ACPICPU_FLAG_C_ARB;

		/*
		 * Check if we can use "native", MSR-based,
		 * access. If not, we have to resort to I/O.
		 */
		if ((ci->ci_feat_val[1] & CPUID2_EST) != 0)
			val |= ACPICPU_FLAG_P_FFH;

		if ((ci->ci_feat_val[0] & CPUID_ACPI) != 0)
			val |= ACPICPU_FLAG_T_FFH;

		/*
		 * Check whether MSR_APERF, MSR_MPERF, and Turbo
		 * Boost are available. Also see if we might have
		 * an invariant local APIC timer ("ARAT").
		 */
		if (cpuid_level >= 0x06) {

			x86_cpuid(0x00000006, regs);

			if ((regs[2] & CPUID_DSPM_HWF) != 0)
				val |= ACPICPU_FLAG_P_HWF;

			if ((regs[0] & CPUID_DSPM_IDA) != 0)
				val |= ACPICPU_FLAG_P_TURBO;

			if ((regs[0] & CPUID_DSPM_ARAT) != 0)
				val &= ~ACPICPU_FLAG_C_APIC;
		}

		break;

	case CPUVENDOR_AMD:

		x86_cpuid(0x80000000, regs);

		if (regs[0] < 0x80000007)
			break;

		x86_cpuid(0x80000007, regs);

		family = CPUID_TO_FAMILY(ci->ci_signature);

    		switch (family) {

		case 0x0f:

			/*
			 * Disable C1E if present.
			 */
			if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT)
				val |= ACPICPU_FLAG_C_C1E;

			/*
			 * Evaluate support for the "FID/VID
			 * algorithm" also used by powernow(4).
			 */
			if ((regs[3] & CPUID_APM_FID) == 0)
				break;

			if ((regs[3] & CPUID_APM_VID) == 0)
				break;

			val |= ACPICPU_FLAG_P_FFH | ACPICPU_FLAG_P_FIDVID;
			break;

		case 0x10:
		case 0x11:

			/*
			 * Disable C1E if present.
			 */
			if (rdmsr_safe(MSR_CMPHALT, &msr) != EFAULT)
				val |= ACPICPU_FLAG_C_C1E;

			/* FALLTHROUGH */

		case 0x12:
		case 0x14: /* AMD Fusion */
		case 0x15: /* AMD Bulldozer */

			/*
			 * Like with Intel, detect MSR-based P-states,
			 * and AMD's "turbo" (Core Performance Boost),
			 * respectively.
			 */
			if ((regs[3] & CPUID_APM_HWP) != 0)
				val |= ACPICPU_FLAG_P_FFH;

			if ((regs[3] & CPUID_APM_CPB) != 0)
				val |= ACPICPU_FLAG_P_TURBO;

			/*
			 * Also check for APERF and MPERF,
			 * first available in the family 10h.
			 */
			if (cpuid_level >= 0x06) {

				x86_cpuid(0x00000006, regs);

				if ((regs[2] & CPUID_DSPM_HWF) != 0)
					val |= ACPICPU_FLAG_P_HWF;
			}

			break;
		}

		break;
	}

	/*
	 * There are several erratums for PIIX4.
	 */
	if (pci_find_device(&pa, acpicpu_md_quirk_piix4) != 0)
		val |= ACPICPU_FLAG_PIIX4;

	return val;
}