Example #1
0
boolean_t
lapic_probe(void)
{
	uint32_t	lo;
	uint32_t	hi;

	if (cpuid_features() & CPUID_FEATURE_APIC)
		return TRUE;

	if (cpuid_family() == 6 || cpuid_family() == 15) {
		/*
		 * Mobile Pentiums:
		 * There may be a local APIC which wasn't enabled by BIOS.
		 * So we try to enable it explicitly.
		 */
		rdmsr(MSR_IA32_APIC_BASE, lo, hi);
		lo &= ~MSR_IA32_APIC_BASE_BASE;
		lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
		lo |= MSR_IA32_APIC_BASE_ENABLE;
		wrmsr(MSR_IA32_APIC_BASE, lo, hi);

		/*
		 * Re-initialize cpu features info and re-check.
		 */
		cpuid_set_info();
		if (cpuid_features() & CPUID_FEATURE_APIC) {
			printf("Local APIC discovered and enabled\n");
			lapic_os_enabled = TRUE;
			lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
			return TRUE;
		}
	}

	return FALSE;
}
Example #2
0
/*
 * rtc_timer_init() is called at startup on the boot processor only.
 */
void
rtc_timer_init(void)
{
	int	TSC_deadline_timer = 0;

	/* See whether we can use the local apic in TSC-deadline mode */
	if ((cpuid_features() & CPUID_FEATURE_TSCTMR)) {
		TSC_deadline_timer = 1;
		PE_parse_boot_argn("TSC_deadline_timer", &TSC_deadline_timer,
				   sizeof(TSC_deadline_timer));
		printf("TSC Deadline Timer supported %s enabled\n",
			TSC_deadline_timer ? "and" : "but not");
	}

	if (TSC_deadline_timer) {
		rtc_timer = &rtc_timer_tsc_deadline;
		rtc_decrementer_max = UINT64_MAX;	/* effectively none */
		/*
		 * The min could be as low as 1nsec,
		 * but we're being conservative for now and making it the same
		 * as for the local apic timer.
		 */
		rtc_decrementer_min = 1*NSEC_PER_USEC;	/* 1 usec */
	} else {
		/*
		 * Compute the longest interval using LAPIC timer.
		 */
		rtc_decrementer_max = tmrCvt(0x7fffffffULL, busFCvtt2n);
		kprintf("maxDec: %lld\n", rtc_decrementer_max);
		rtc_decrementer_min = 1*NSEC_PER_USEC;	/* 1 usec */
	}

	/* Point LAPIC interrupts to hardclock() */
	lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
}
Example #3
0
/*
 * Look for FPU and initialize it.
 * Called on each CPU.
 */
void
init_fpu(void)
{
	unsigned short	status, control;

	/*
	 * Check for FPU by initializing it,
	 * then trying to read the correct bit patterns from
	 * the control and status registers.
	 */
	set_cr0(get_cr0() & ~(CR0_EM|CR0_TS));	/* allow use of FPU */

	fninit();
	status = fnstsw();
	fnstcw(&control);

	if ((status & 0xff) == 0 &&
	    (control & 0x103f) == 0x3f) 
        {
            fp_kind = FP_387;	/* assume we have a 387 compatible instruction set */
	    /* Use FPU save/restore instructions if available */
            if (cpuid_features() & CPUID_FEATURE_FXSR) {
	    	fp_kind = FP_FXSR;
		set_cr4(get_cr4() | CR4_FXS);
		printf("Enabling XMM register save/restore");
		/* And allow SIMD instructions if present */
		if (cpuid_features() & CPUID_FEATURE_SSE) {
		    printf(" and SSE/SSE2");
		    set_cr4(get_cr4() | CR4_XMM);
		}
		printf(" opcodes\n");
	    }

	    /*
	     * Trap wait instructions.  Turn off FPU for now.
	     */
	    set_cr0(get_cr0() | CR0_TS | CR0_MP);
	}
	else
	{
	    /*
	     * NO FPU.
	     */
	    fp_kind = FP_NO;
	    set_cr0(get_cr0() | CR0_EM);
	}
}
Example #4
0
static void
put_cpu_info (void)
{
  struct cpuid_basic_info cpu_info;
  cpuid_basic_info (&cpu_info);

  struct cpuid_processor_brand_string cpu_brand;
  cpuid_processor_brand_string (&cpu_brand);

  videoram_printf ("CPU: %.12s (%.72s)\n",
                   cpu_info.vendor_string,
                   cpu_brand.string);

  uint64_t cpu_feats = cpuid_features ();
# define FEAT_EDX(NAME) \
    if (cpu_feats & CPUID_FEAT_EDX_##NAME) \
      videoram_puts (#NAME " ", COLOR_NORMAL);
# define FEAT_ECX(NAME) \
    if (cpu_feats & CPUID_FEAT_ECX_##NAME) \
      videoram_puts (#NAME " ", COLOR_NORMAL)

  videoram_puts ("Features: ", COLOR_NORMAL);
  FEAT_EDX (FPU); FEAT_EDX (VME); FEAT_EDX (DE); FEAT_EDX (PSE);
  FEAT_EDX (TSC); FEAT_EDX (MSR); FEAT_EDX (PAE); FEAT_EDX (MCE);
  FEAT_EDX (CX8); FEAT_EDX (APIC); FEAT_EDX (SEP); FEAT_EDX (MTRR);
  FEAT_EDX (PGE); FEAT_EDX (MCA); FEAT_EDX (CMOV); FEAT_EDX (PAT);
  FEAT_EDX (PSE36); FEAT_EDX (PSN); FEAT_EDX (CLF); FEAT_EDX (DTES);
  FEAT_EDX (ACPI); FEAT_EDX (MMX); FEAT_EDX (FXSR); FEAT_EDX (SSE);
  FEAT_EDX (SSE2); FEAT_EDX (SS); FEAT_EDX (HTT); FEAT_EDX (TM1);
  FEAT_EDX (IA64); FEAT_EDX (PBE);
  videoram_put_ln ();

  videoram_puts ("More features: ", COLOR_NORMAL);
  FEAT_ECX (SSE3); FEAT_ECX (PCLMUL); FEAT_ECX (DTES64); FEAT_ECX (MONITOR);
  FEAT_ECX (DS_CPL); FEAT_ECX (VMX); FEAT_ECX (SMX); FEAT_ECX (EST);
  FEAT_ECX (TM2); FEAT_ECX (SSSE3); FEAT_ECX (CID); FEAT_ECX (FMA);
  FEAT_ECX (CX16); FEAT_ECX (ETPRD); FEAT_ECX (PDCM); FEAT_ECX (DCA);
  FEAT_ECX (SSE4_1); FEAT_ECX (SSE4_2); FEAT_ECX (x2APIC); FEAT_ECX (MOVBE);
  FEAT_ECX (POPCNT); FEAT_ECX (AES); FEAT_ECX (XSAVE); FEAT_ECX (OSXSAVE);
  FEAT_ECX (AVX);
  videoram_put_ln ();
  
  videoram_put_ln ();
}
Example #5
0
void vmm_excp_mce()
{
   ia32_mcg_cap_t    cap;
   ia32_mci_status_t mci_sts;
   uint16_t          i;
   uint32_t          c=0,d;

   cpuid_features(c,d);

   if(! (d & (CPUID_EDX_FEAT_MCE|CPUID_EDX_FEAT_MCA)))
   {
      debug(EXCP, "MCE/MCA unsupported\n");
      return;
   }

   rd_msr_ia32_mcg_cap(cap);
   debug(EXCP, "MCE_CAP 0x%X\n", cap.raw);

   for(i=0 ; i<cap.count; i++)
   {
      rd_msr_ia32_mci_status(mci_sts, i);
      debug(EXCP, "MC%d_STS 0x%X\n", i, mci_sts.raw);
   }
}
Example #6
0
static void
commpage_init_cpu_capabilities( void )
{
	uint64_t bits;
	int cpus;
	ml_cpu_info_t cpu_info;

	bits = 0;
	ml_cpu_get_info(&cpu_info);
	
	switch (cpu_info.vector_unit) {
		case 9:
			bits |= kHasAVX1_0;
			/* fall thru */
		case 8:
			bits |= kHasSSE4_2;
			/* fall thru */
		case 7:
			bits |= kHasSSE4_1;
			/* fall thru */
		case 6:
			bits |= kHasSupplementalSSE3;
			/* fall thru */
		case 5:
			bits |= kHasSSE3;
			/* fall thru */
		case 4:
			bits |= kHasSSE2;
			/* fall thru */
		case 3:
			bits |= kHasSSE;
			/* fall thru */
		case 2:
			bits |= kHasMMX;
		default:
			break;
	}
	switch (cpu_info.cache_line_size) {
		case 128:
			bits |= kCache128;
			break;
		case 64:
			bits |= kCache64;
			break;
		case 32:
			bits |= kCache32;
			break;
		default:
			break;
	}
	cpus = commpage_cpus();			// how many CPUs do we have

	bits |= (cpus << kNumCPUsShift);

	bits |= kFastThreadLocalStorage;	// we use %gs for TLS

#define setif(_bits, _bit, _condition) \
	if (_condition) _bits |= _bit

	setif(bits, kUP,         cpus == 1);
	setif(bits, k64Bit,      cpu_mode_is64bit());
	setif(bits, kSlow,       tscFreq <= SLOW_TSC_THRESHOLD);

	setif(bits, kHasAES,     cpuid_features() &
					CPUID_FEATURE_AES);
	setif(bits, kHasF16C,    cpuid_features() &
					CPUID_FEATURE_F16C);
	setif(bits, kHasRDRAND,  cpuid_features() &
					CPUID_FEATURE_RDRAND);
	setif(bits, kHasFMA,     cpuid_features() &
					CPUID_FEATURE_FMA);

	setif(bits, kHasBMI1,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI1);
	setif(bits, kHasBMI2,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI2);
	setif(bits, kHasRTM,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_RTM);
	setif(bits, kHasHLE,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_HLE);
	setif(bits, kHasAVX2_0,  cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_AVX2);
	setif(bits, kHasRDSEED,  cpuid_features() &
					CPUID_LEAF7_FEATURE_RDSEED);
	setif(bits, kHasADX,     cpuid_features() &
					CPUID_LEAF7_FEATURE_ADX);
	
	setif(bits, kHasMPX,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_MPX);
	setif(bits, kHasSGX,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_SGX);
	uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
	setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
				 (cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_ERMS));
	
	_cpu_capabilities = bits;		// set kernel version for use by drivers etc
}
Example #7
0
static void
commpage_init_cpu_capabilities( void )
{
	uint64_t bits;
	int cpus;
	ml_cpu_info_t cpu_info;

	bits = 0;
	ml_cpu_get_info(&cpu_info);

	switch (cpu_info.vector_unit) {
		case 9:
			bits |= kHasAVX1_0;
			/* fall thru */
		case 8:
			bits |= kHasSSE4_2;
			/* fall thru */
		case 7:
			bits |= kHasSSE4_1;
			/* fall thru */
		case 6:
			bits |= kHasSupplementalSSE3;
			/* fall thru */
		case 5:
			bits |= kHasSSE3;
			/* fall thru */
		case 4:
			bits |= kHasSSE2;
			/* fall thru */
		case 3:
			bits |= kHasSSE;
			/* fall thru */
		case 2:
			bits |= kHasMMX;
		default:
			break;
	}
	switch (cpu_info.cache_line_size) {
		case 128:
			bits |= kCache128;
			break;
		case 64:
			bits |= kCache64;
			break;
		case 32:
			bits |= kCache32;
			break;
		default:
			break;
	}
	cpus = commpage_cpus();			// how many CPUs do we have

	/** Sinetek: by default we'd like some reasonable values,
	 **  so that the userspace runs correctly.
	 **
	 ** On Mountain Lion, kHasSSE4_2 provides vanilla SSE2 routines.
	 ** On Mavericks, we need a bit more support: SSE3, SSE3X.
	 **/
	if (IsAmdCPU()) {
		bits |= kHasSSE4_2;
		bits &= ~kHasSupplementalSSE3;
#define MAVERICKS_AMD
#ifdef MAVERICKS_AMD
		bits |= kHasSSE3;
	//	bits |= kHasSupplementalSSE3;
		bits &= ~kHasSSE4_2;
#endif
	}

	bits |= (cpus << kNumCPUsShift);

	bits |= kFastThreadLocalStorage;	// we use %gs for TLS

#define setif(_bits, _bit, _condition) \
	if (_condition) _bits |= _bit

	setif(bits, kUP,         cpus == 1);
	setif(bits, k64Bit,      cpu_mode_is64bit());
	setif(bits, kSlow,       tscFreq <= SLOW_TSC_THRESHOLD);

	setif(bits, kHasAES,     cpuid_features() &
					CPUID_FEATURE_AES);
	setif(bits, kHasF16C,    cpuid_features() &
					CPUID_FEATURE_F16C);
	setif(bits, kHasRDRAND,  cpuid_features() &
					CPUID_FEATURE_RDRAND);
	setif(bits, kHasFMA,     cpuid_features() &
					CPUID_FEATURE_FMA);

	setif(bits, kHasBMI1,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI1);
	setif(bits, kHasBMI2,    cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_BMI2);
	setif(bits, kHasRTM,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_RTM);
	setif(bits, kHasHLE,     cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_HLE);
	setif(bits, kHasAVX2_0,  cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_AVX2);
	
	uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
	setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
				 (cpuid_leaf7_features() &
					CPUID_LEAF7_FEATURE_ENFSTRG));
	
	_cpu_capabilities = bits;		// set kernel version for use by drivers etc
}
Example #8
0
void	pmap_pcid_configure(void) {
	int ccpu = cpu_number();
	uintptr_t cr4 = get_cr4();
	boolean_t pcid_present = FALSE;

	pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu);
	pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0);
	pmap_assert(cpu_mode_is64bit());

	if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) {
		pmap_pcid_log("PMAP: PCID feature disabled\n");
		printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled);
		kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled);
	}
	 /* no_shared_cr3+PCID is currently unsupported */
#if	DEBUG
	if (pmap_pcid_disabled == FALSE)
		no_shared_cr3 = FALSE;
	else
		no_shared_cr3 = TRUE;
#else
	if (no_shared_cr3)
		pmap_pcid_disabled = TRUE;
#endif
	if (pmap_pcid_disabled || no_shared_cr3) {
		unsigned i;
		/* Reset PCID status, as we may have picked up
		 * strays if discovered prior to platform
		 * expert initialization.
		 */
		for (i = 0; i < real_ncpus; i++) {
			if (cpu_datap(i)) {
				cpu_datap(i)->cpu_pmap_pcid_enabled = FALSE;
			}
			pmap_pcid_ncpus = 0;
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		return;
	}
	/* DRKTODO: assert if features haven't been discovered yet. Redundant
	 * invocation of cpu_mode_init and descendants masks this for now.
	 */
	if ((cpuid_features() & CPUID_FEATURE_PCID))
		pcid_present = TRUE;
	else {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu);
		return;
	}
	if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;
		pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu);
		return;
	}
	if (pcid_present == TRUE) {
		pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4);

		if (cpu_number() >= PMAP_PCID_MAX_CPUS) {
			panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number());
		}
		if ((get_cr4() & CR4_PGE) == 0) {
			set_cr4(get_cr4() | CR4_PGE);
			pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu);
		}
		set_cr4(get_cr4() | CR4_PCIDE);
		pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4());
		tlb_flush_global();
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;

		if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) {
			pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus, pmap_pcid_ncpus);
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_coherentp =
		    cpu_datap(ccpu)->cpu_pmap_pcid_coherentp_kernel =
		    &(kernel_pmap->pmap_pcid_coherency_vector[ccpu]);
		cpu_datap(ccpu)->cpu_pcid_refcounts[0] = 1;
	}
}
Example #9
0
/* -----------------------------------------------------------------------------
   vmx_is_available()
	Is the VMX facility available on this CPU?
   -------------------------------------------------------------------------- */
static inline boolean_t
vmx_is_available(void)
{
	return (0 != (cpuid_features() & CPUID_FEATURE_VMX));
}