Ejemplo n.º 1
0
/*
 * Boot time overrides that are not scaled against main memory
 */
void
init_param1(void)
{
#ifndef XEN
	vm_guest = detect_virtual();
#else
	vm_guest = VM_GUEST_XEN;
#endif
	hz = -1;
	TUNABLE_INT_FETCH("kern.hz", &hz);
	if (hz == -1)
		hz = vm_guest > VM_GUEST_NO ? HZ_VM : HZ;
	tick = 1000000 / hz;

#ifdef VM_SWZONE_SIZE_MAX
	maxswzone = VM_SWZONE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxswzone", &maxswzone);
#ifdef VM_BCACHE_SIZE_MAX
	maxbcache = VM_BCACHE_SIZE_MAX;
#endif
	TUNABLE_LONG_FETCH("kern.maxbcache", &maxbcache);
	msgbufsize = MSGBUF_SIZE;
	TUNABLE_INT_FETCH("kern.msgbufsize", &msgbufsize);

	maxtsiz = MAXTSIZ;
	TUNABLE_ULONG_FETCH("kern.maxtsiz", &maxtsiz);
	dfldsiz = DFLDSIZ;
	TUNABLE_ULONG_FETCH("kern.dfldsiz", &dfldsiz);
	maxdsiz = MAXDSIZ;
	TUNABLE_ULONG_FETCH("kern.maxdsiz", &maxdsiz);
	dflssiz = DFLSSIZ;
	TUNABLE_ULONG_FETCH("kern.dflssiz", &dflssiz);
	maxssiz = MAXSSIZ;
	TUNABLE_ULONG_FETCH("kern.maxssiz", &maxssiz);
	sgrowsiz = SGROWSIZ;
	TUNABLE_ULONG_FETCH("kern.sgrowsiz", &sgrowsiz);

	/*
	 * Let the administrator set {NGROUPS_MAX}, but disallow values
	 * less than NGROUPS_MAX which would violate POSIX.1-2008 or
	 * greater than INT_MAX-1 which would result in overflow.
	 */
	ngroups_max = NGROUPS_MAX;
	TUNABLE_INT_FETCH("kern.ngroups", &ngroups_max);
	if (ngroups_max < NGROUPS_MAX)
		ngroups_max = NGROUPS_MAX;

	/*
	 * Only allow to lower the maximal pid.
	 * Prevent setting up a non-bootable system if pid_max is too low.
	 */
	TUNABLE_INT_FETCH("kern.pid_max", &pid_max);
	if (pid_max > PID_MAX)
		pid_max = PID_MAX;
	else if (pid_max < 300)
		pid_max = 300;
}
Ejemplo n.º 2
0
/*
 * Initialize CPU control registers
 */
void
initializecpu(int cpu)
{
	uint64_t msr;

	/*Check for FXSR and SSE support and enable if available.*/
	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}

	if (cpu == 0) {
		/* Check if we are running in a hypervisor. */
		vmm_guest = detect_virtual();
		if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM))
			vmm_guest = VMM_GUEST_UNKNOWN;
	}

#if !defined(CPU_DISABLE_AVX)
	/*Check for XSAVE and AVX support and enable if available.*/
	if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE)
	     && (cpu_feature & CPUID_SSE)) {
		load_cr4(rcr4() | CR4_XSAVE);

		/* Adjust size of savefpu in npx.h before adding to mask.*/
		xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0);
		cpu_xsave = 1;
	}
#endif

	if (cpu_vendor_id == CPU_VENDOR_AMD) {
		switch((cpu_id & 0xFF0000)) {
		case 0x100000:
		case 0x120000:
			/*
			 * Errata 721 is the cpu bug found by your's truly
			 * (Matthew Dillon).  It is a bug where a sequence
			 * of 5 or more popq's + a retq, under involved
			 * deep recursion circumstances, can cause the %rsp
			 * to not be properly updated, almost always
			 * resulting in a seg-fault soon after.
			 *
			 * Do not install the workaround when we are running
			 * in a virtual machine.
			 */
			if (vmm_guest)
				break;

			msr = rdmsr(MSR_AMD_DE_CFG);
			if ((msr & 1) == 0) {
				if (cpu == 0)
					kprintf("Errata 721 workaround "
						"installed\n");
				msr |= 1;
				wrmsr(MSR_AMD_DE_CFG, msr);
			}
			break;
		}
	}

	if ((amd_feature & AMDID_NX) != 0) {
		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
#if 0 /* JG */
		pg_nx = PG_NX;
#endif
	}
	if (cpu_vendor_id == CPU_VENDOR_CENTAUR &&
	    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
	    CPUID_TO_MODEL(cpu_id) >= 0xf)
		init_via();

	TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable);
	if (cpu_feature & CPUID_CLFSH) {
		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;

		if (hw_clflush_enable == 0 ||
		    ((hw_clflush_enable == -1) && vmm_guest))
			cpu_feature &= ~CPUID_CLFSH;
	}