Example #1
0
void
initializecpu(void)
{

	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I586_CPU
	case CPU_586:
		switch (cpu_vendor_id) {
		case CPU_VENDOR_AMD:
#ifdef CPU_WT_ALLOC
			if (((cpu_id & 0x0f0) > 0) &&
			    ((cpu_id & 0x0f0) < 0x60) &&
			    ((cpu_id & 0x00f) > 3))
				enable_K5_wt_alloc();
			else if (((cpu_id & 0x0f0) > 0x80) ||
			    (((cpu_id & 0x0f0) == 0x80) &&
				(cpu_id & 0x00f) > 0x07))
				enable_K6_2_wt_alloc();
			else if ((cpu_id & 0x0f0) > 0x50)
				enable_K6_wt_alloc();
#endif
			if ((cpu_id & 0xf0) == 0xa0)
				/*
				 * Make sure the TSC runs through
				 * suspension, otherwise we can't use
				 * it as timecounter
				 */
				wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
			break;
		case CPU_VENDOR_CENTAUR:
			init_winchip();
			break;
		case CPU_VENDOR_TRANSMETA:
			init_transmeta();
			break;
		case CPU_VENDOR_RISE:
			init_rise();
			break;
		}
		break;
#endif
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		switch (cpu_vendor_id) {
		case CPU_VENDOR_INTEL:
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
			break;
#ifdef CPU_ATHLON_SSE_HACK
		case CPU_VENDOR_AMD:
			/*
			 * Sometimes the BIOS doesn't enable SSE instructions.
			 * According to AMD document 20734, the mobile
			 * Duron, the (mobile) Athlon 4 and the Athlon MP
			 * support SSE. These correspond to cpu_id 0x66X
			 * or 0x67X.
			 */
			if ((cpu_feature & CPUID_XMM) == 0 &&
			    ((cpu_id & ~0xf) == 0x660 ||
			     (cpu_id & ~0xf) == 0x670 ||
			     (cpu_id & ~0xf) == 0x680)) {
				u_int regs[4];
				wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
				do_cpuid(1, regs);
				cpu_feature = regs[3];
			}
			break;
#endif
		case CPU_VENDOR_CENTAUR:
			init_via();
			break;
		case CPU_VENDOR_TRANSMETA:
			init_transmeta();
			break;
		}
#if defined(PAE) || defined(PAE_TABLES)
		if ((amd_feature & AMDID_NX) != 0) {
			uint64_t msr;

			msr = rdmsr(MSR_EFER) | EFER_NXE;
			wrmsr(MSR_EFER, msr);
			pg_nx = PG_NX;
			elf32_nxstack = 1;
		}
#endif
		break;
#endif
	default:
		break;
	}
#if defined(CPU_ENABLE_SSE)
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}
#endif
}
Example #2
0
void
initializecpu(void)
{

	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		if (cpu_vendor_id == CPU_VENDOR_INTEL) {
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
		} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
#if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
			/*
			 * Sometimes the BIOS doesn't enable SSE instructions.
			 * According to AMD document 20734, the mobile
			 * Duron, the (mobile) Athlon 4 and the Athlon MP
			 * support SSE. These correspond to cpu_id 0x66X
			 * or 0x67X.
			 */
			if ((cpu_feature & CPUID_XMM) == 0 &&
			    ((cpu_id & ~0xf) == 0x660 ||
			     (cpu_id & ~0xf) == 0x670 ||
			     (cpu_id & ~0xf) == 0x680)) {
				u_int regs[4];
				wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
				do_cpuid(1, regs);
				cpu_feature = regs[3];
			}
#endif
		} else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
			switch (cpu_id & 0xff0) {
			case 0x690:
				if ((cpu_id & 0xf) < 3)
					break;
				/* fall through. */
			case 0x6a0:
			case 0x6d0:
			case 0x6f0:
				init_via();
				break;
			default:
				break;
			}
		}
#ifdef PAE
		if ((amd_feature & AMDID_NX) != 0) {
			uint64_t msr;

			msr = rdmsr(MSR_EFER) | EFER_NXE;
			wrmsr(MSR_EFER, msr);
			pg_nx = PG_NX;
		}
#endif
		break;
#endif
	default:
		break;
	}
	enable_sse();

	/*
	 * CPUID with %eax = 1, %ebx returns
	 * Bits 15-8: CLFLUSH line size
	 * 	(Value * 8 = cache line size in bytes)
	 */
	if ((cpu_feature & CPUID_CLFSH) != 0)
		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
	/*
	 * XXXKIB: (temporary) hack to work around traps generated when
	 * CLFLUSHing APIC registers window.
	 */
	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
	if (cpu_vendor_id == CPU_VENDOR_INTEL && !(cpu_feature & CPUID_SS) &&
	    hw_clflush_disable == -1)
		cpu_feature &= ~CPUID_CLFSH;
	/*
	 * Allow to disable CLFLUSH feature manually by
	 * hw.clflush_disable tunable.  This may help Xen guest on some AMD
	 * CPUs.
	 */
	if (hw_clflush_disable == 1)
		cpu_feature &= ~CPUID_CLFSH;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	/*
	 * OS should flush L1 cache by itself because no PC-98 supports
	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
	 * when need_pre_dma_flush = 1, use invd instruction after DMA
	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
	 * product supports hardware cache control, you can add the
	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
	 * This option eliminates unneeded cache flush instruction(s).
	 */
	if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
		switch (cpu) {
#ifdef I486_CPU
		case CPU_486DLC:
			need_post_dma_flush = 1;
			break;
		case CPU_M1SC:
			need_pre_dma_flush = 1;
			break;
		case CPU_CY486DX:
			need_pre_dma_flush = 1;
#ifdef CPU_I486_ON_386
			need_post_dma_flush = 1;
#endif
			break;
#endif
		default:
			break;
		}
	} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
		switch (cpu_id & 0xFF0) {
		case 0x470:		/* Enhanced Am486DX2 WB */
		case 0x490:		/* Enhanced Am486DX4 WB */
		case 0x4F0:		/* Am5x86 WB */
			need_pre_dma_flush = 1;
			break;
		}
	} else if (cpu_vendor_id == CPU_VENDOR_IBM) {
		need_post_dma_flush = 1;
	} else {
#ifdef CPU_I486_ON_386
		need_pre_dma_flush = 1;
#endif
	}
#endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
}
Example #3
0
void
initializecpu(void)
{
	switch (cpu) {
#ifdef I486_CPU
	case CPU_BLUE:
		init_bluelightning();
		break;
	case CPU_486DLC:
		init_486dlc();
		break;
	case CPU_CY486DX:
		init_cy486dx();
		break;
	case CPU_M1SC:
		init_5x86();
		break;
#ifdef CPU_I486_ON_386
	case CPU_486:
		init_i486_on_386();
		break;
#endif
	case CPU_M1:
		init_6x86();
		break;
#endif /* I486_CPU */
#ifdef I686_CPU
	case CPU_M2:
		init_6x86MX();
		break;
	case CPU_686:
		if (cpu_vendor_id == CPU_VENDOR_INTEL) {
			switch (cpu_id & 0xff0) {
			case 0x610:
				init_ppro();
				break;
			case 0x660:
				init_mendocino();
				break;
			}
		} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
			init_686_amd();
		} else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) {
			switch (cpu_id & 0xff0) {
			case 0x690:
				if ((cpu_id & 0xf) < 3)
					break;
				/* fall through. */
			case 0x6a0:
			case 0x6d0:
			case 0x6f0:
				init_via();
				break;
			default:
				break;
			}
		}
		break;
#endif
	default:
		break;
	}
	enable_sse();
}