Пример #1
0
static void
mca_get_availability(void)
{
	uint64_t	features = cpuid_info()->cpuid_features;
	uint32_t	family =  cpuid_info()->cpuid_family;

	mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0;
	mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0;
	mca_family = family;
	
	/*
	 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
	 */
	if (mca_MCA_present) {
		ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
		mca_error_bank_count = ia32_mcg_cap.bits.count;
		mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
		mca_threshold_status_present = ia32_mcg_cap.bits.mcg_tes_p;
		mca_sw_error_recovery_present = ia32_mcg_cap.bits.mcg_ser_p;
		mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
		if (family == 0x0F) {
			mca_extended_MSRs_present = ia32_mcg_cap.bits.mcg_ext_p;
			mca_extended_MSRs_count = ia32_mcg_cap.bits.mcg_ext_cnt;
		}
	}
}
Пример #2
0
static void
mca_get_availability(void)
{
	uint64_t	features = cpuid_info()->cpuid_features;
	uint32_t	family =   cpuid_info()->cpuid_family;
	uint32_t	model =    cpuid_info()->cpuid_model;
	uint32_t	stepping = cpuid_info()->cpuid_stepping;

	mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0;
	mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0;
	mca_family = family;

	if ((model == CPUID_MODEL_HASWELL     && stepping < 3) ||
	    (model == CPUID_MODEL_HASWELL_ULT && stepping < 1) ||
	    (model == CPUID_MODEL_CRYSTALWELL && stepping < 1))
		panic("Haswell pre-C0 steppings are not supported");

	/*
	 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
	 */
	if (mca_MCA_present) {
		ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
		mca_error_bank_count = ia32_mcg_cap.bits.count;
		mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
		mca_threshold_status_present = ia32_mcg_cap.bits.mcg_tes_p;
		mca_sw_error_recovery_present = ia32_mcg_cap.bits.mcg_ser_p;
		mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
		if (family == 0x0F) {
			mca_extended_MSRs_present = ia32_mcg_cap.bits.mcg_ext_p;
			mca_extended_MSRs_count = ia32_mcg_cap.bits.mcg_ext_cnt;
		}
	}
}
IOService* com_reidburke_air_IntelEnhancedSpeedStep::probe(IOService* provider, SInt32* score) {
	IOService* res = super::probe(provider, score);
	dbg("Probing for Intel processor...\n");
	
  info("vendor: ( %s )  \n", cpuid_info()->cpuid_vendor);
 // warn("vendor %s ", cpuid_info() );
  
	/* Make preliminary check */
	if ( (strcmp(cpuid_info()->cpuid_vendor, CPUID_VID_INTEL) == 0) // Check it's actually Intel
	&& ( cpuid_info()->cpuid_features & CPUID_FEATURE_EST) ) { // Check it supports EST
	  *score += 1000;
		dbg("Supported Intel processor found on your system\n");
		res = this;
	} else {
		warn("No Intel processor found, or your processor does not support SpeedStep."
		     "Kext will not load\n");
		res = NULL;
   
	}
	
	if (!isConstantTSC()) {
		ConstantTSC = false;
		if (RtcFixKernel)
			dbg("Your processor doesn't support constant_tsc, but you have a kernel which can compensate for it.\n");
		else
			warn("Your processor doesn't support constant_tsc and your kernel doesn't " 
			     "know how to compensate - Expect timing issues or switch to a kernel with RTC fix.\n");
	} else {
		ConstantTSC = true;
		Below1Ghz = true; // blindly, because we're not gonna recalibrate the clock so no fear of panic
	}
	
	return res;
}
Пример #4
0
void version() {
    kprintf("%s %d.%d %s\n%s\nBuild Number %s\n", OS_NAME, VERSION_MAJOR, VESRION_MINOR, BUILD_TYPE, COPYRIGHT, BUILD_NUMBER);
    
    kprintf("CPU Vendor %s\n", cpuid_info()->cpuid_vendor);
    kprintf("CPU %s\n", cpuid_info()->cpuid_brand_string);
    kprintf("Memory %d GB\n", Platform_state.bootArgs->PhysicalMemorySize/GB);
}
void checkForPenryn() {
	uint8_t cpumodel = (cpuid_info()->cpuid_extmodel << 4) + cpuid_info()->cpuid_model;
	Is45nmPenryn = (cpuid_info()->cpuid_family == 6) && (cpumodel >= 0x17);
	if (Is45nmPenryn)
    dbg("On your processor, voltages can be changed in 12.5 mV steps\n");
	else
		dbg("On your processor, voltages can be changed in 16 mV steps\n");
}
Пример #6
0
static inline UInt8 get_cpu_number()
{
    UInt8 number = cpu_number() & 0xFF;
    
    if (cpuid_info()->thread_count > cpuid_info()->core_count) {
        return !(number % 2) ? number >> 1 : UINT8_MAX;
    }
    
    return number;
}
Пример #7
0
/******************************************************************************
 * Generic MIB initialisation.
 *
 * This is a hack, and should be replaced with SYSINITs
 * at some point.
 */
void
sysctl_mib_init(void)
{
	cputype = cpu_type();
	cpusubtype = cpu_subtype();
	cputhreadtype = cpu_threadtype();
#if defined(__i386__) || defined (__x86_64__)
	cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
#else
#error Unsupported arch
#endif

	/*
	 * Populate the optional portion of the hw.* MIB.
	 *
	 * XXX This could be broken out into parts of the code
	 *     that actually directly relate to the functions in
	 *     question.
	 */

	if (cputhreadtype != CPU_THREADTYPE_NONE) {
		sysctl_register_oid(&sysctl__hw_cputhreadtype);
	}

#if defined (__i386__) || defined (__x86_64__)
	/* hw.cpufamily */
	cpufamily = cpuid_cpufamily();

	/* hw.cacheconfig */
	cacheconfig[0] = ml_cpu_cache_sharing(0);
	cacheconfig[1] = ml_cpu_cache_sharing(1);
	cacheconfig[2] = ml_cpu_cache_sharing(2);
	cacheconfig[3] = ml_cpu_cache_sharing(3);
	cacheconfig[4] = 0;

	/* hw.cachesize */
	cachesize[0] = ml_cpu_cache_size(0);
	cachesize[1] = ml_cpu_cache_size(1);
	cachesize[2] = ml_cpu_cache_size(2);
	cachesize[3] = ml_cpu_cache_size(3);
	cachesize[4] = 0;

	/* hw.packages */
	packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
			/ cpuid_info()->thread_count;

#else
#error unknown architecture
#endif /* !__i386__ && !__x86_64 && !__arm__ */

}
bool isConstantTSC() {
	/* Check for constant_tsc by getting family, model, stepping */
	/* Ref http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits 
	 * And http://www.intel.com/assets/pdf/appnote/241618.pdf
	 */
	uint8_t cpumodel = (cpuid_info()->cpuid_extmodel << 4) + cpuid_info()->cpuid_model;
	uint8_t cpufamily = cpuid_info()->cpuid_family;
	dbg("Processor Family %d, Model %d\n", cpufamily, cpumodel);
	if ((cpufamily == 0x6 && cpumodel < 14) // 13 is pentium M, 14+ is core and above
	|| ( cpufamily == 0xf && cpumodel < 3)) // 0xF is pentium 4, less than model 3 dont support constant tsc
		// Ref - http://www.tomshardware.com/forum/128629-28-intel
		return false;
	else
		return true;
}
Пример #9
0
static void
commpage_populate_one( 
	vm_map_t	submap,		// commpage32_map or compage64_map
	char **		kernAddressPtr,	// &commPagePtr32 or &commPagePtr64
	size_t		area_used,	// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
	commpage_address_t base_offset,	// will become commPageBaseOffset
	commpage_time_data** time_data,	// &time_data32 or &time_data64
	const char*	signature,	// "commpage 32-bit" or "commpage 64-bit"
	vm_prot_t	uperm)
{
	uint8_t		c1;
	uint16_t	c2;
	int		c4;
	uint64_t	c8;
	uint32_t	cfamily;
	short   version = _COMM_PAGE_THIS_VERSION;

	next = 0;
	commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm );
	*kernAddressPtr = commPagePtr;				// save address either in commPagePtr32 or 64
	commPageBaseOffset = base_offset;

	*time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START );

	/* Stuff in the constants.  We move things into the comm page in strictly
	* ascending order, so we can check for overlap and panic if so.
	* Note: the 32-bit cpu_capabilities vector is retained in addition to
	* the expanded 64-bit vector.
	*/
	commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature)));
	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities));
	commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short));
	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t));

	c2 = 32;  // default
	if (_cpu_capabilities & kCache64)
		c2 = 64;
	else if (_cpu_capabilities & kCache128)
		c2 = 128;
	commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);

	c4 = MP_SPIN_TRIES;
	commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4);

	/* machine_info valid after ml_get_max_cpus() */
	c1 = machine_info.physical_cpu_max;
	commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1);
	c1 = machine_info.logical_cpu_max;
	commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1);

	c8 = ml_cpu_cache_size(0);
	commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8);

	cfamily = cpuid_info()->cpuid_cpufamily;
	commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4);

	if (next > _COMM_PAGE_END)
		panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr);

}
Пример #10
0
static void
mca_report_cpu_info(void)
{
	i386_cpu_info_t *infop = cpuid_info();

	kdb_printf(" family: %d model: %d stepping: %d microcode: %d\n",
		infop->cpuid_family,
		infop->cpuid_model,
		infop->cpuid_stepping,
		infop->cpuid_microcode_version);
	kdb_printf(" %s\n", infop->cpuid_brand_string);
}
Пример #11
0
bool
random_device::hasRDRAND()
{
    if (!hasIntelCpu())
        return false;

    CPUIDinfo info;
    cpuid_info(&info, 1, 0);
    static const constexpr unsigned int RDRAND_FLAG = (1 << 30);
    if ((info.ECX & RDRAND_FLAG) == RDRAND_FLAG)
        return true;
    return false;
}
Пример #12
0
u16
get_amd_core_num()
{
	int	ret;
	struct cpuid_data	data;
	u16	num, i;

	ret = cpuid_info(0x80000008, &data);

	if (ret == 0) {
		num = (data.ecx >> 12) &0xf;

		if (num == 0) {
			num = (data.ecx & 0xff) + 1;
		} else {
			i = 1;
			num = i << num;
		}

		return (num);
	}
Пример #13
0
inline UInt32 get_cpu_number()
{
    return cpu_number() % cpuid_info()->core_count;
}
Пример #14
0
int 
diagCall64(x86_saved_state_t * state)
{
	uint64_t	curpos, i, j;
	uint64_t	selector, data;
	uint64_t	currNap, durNap;
	x86_saved_state64_t	*regs;
	boolean_t 	diagflag;
	uint32_t	rval = 0;

	assert(is_saved_state64(state));
	regs = saved_state64(state);

	diagflag = ((dgWork.dgFlags & enaDiagSCs) != 0);
	selector = regs->rdi;

	switch (selector) {	/* Select the routine */
	case dgRuptStat:	/* Suck Interruption statistics */
		(void) ml_set_interrupts_enabled(TRUE);
		data = regs->rsi; /* Get the number of processors */

		if (data == 0) { /* If no location is specified for data, clear all
				  * counts
				  */
			for (i = 0; i < real_ncpus; i++) {	/* Cycle through
								 * processors */
				for (j = 0; j < 256; j++)
					cpu_data_ptr[i]->cpu_hwIntCnt[j] = 0;
			}

			lastRuptClear = mach_absolute_time();	/* Get the time of clear */
			rval = 1;	/* Normal return */
			break;
		}

		(void) copyout((char *) &real_ncpus, data, sizeof(real_ncpus));	/* Copy out number of
										 * processors */
		currNap = mach_absolute_time();	/* Get the time now */
		durNap = currNap - lastRuptClear;	/* Get the last interval
							 * duration */
		if (durNap == 0)
			durNap = 1;	/* This is a very short time, make it
					 * bigger */

		curpos = data + sizeof(real_ncpus);	/* Point to the next
							 * available spot */

		for (i = 0; i < real_ncpus; i++) {	/* Move 'em all out */
			(void) copyout((char *) &durNap, curpos, 8);	/* Copy out the time
									 * since last clear */
			(void) copyout((char *) &cpu_data_ptr[i]->cpu_hwIntCnt, curpos + 8, 256 * sizeof(uint32_t));	/* Copy out interrupt
															 * data for this
															 * processor */
			curpos = curpos + (256 * sizeof(uint32_t) + 8);	/* Point to next out put
									 * slot */
		}
		rval = 1;
		break;

	case dgPowerStat:
	{
		uint32_t c2l = 0, c2h = 0, c3l = 0, c3h = 0, c6l = 0, c6h = 0, c7l = 0, c7h = 0;
		uint32_t pkg_unit_l = 0, pkg_unit_h = 0, pkg_ecl = 0, pkg_ech = 0;

		pkg_energy_statistics_t pkes;
		core_energy_stat_t cest;

		bzero(&pkes, sizeof(pkes));
		bzero(&cest, sizeof(cest));

		pkes.pkes_version = 1ULL;
		rdmsr_carefully(MSR_IA32_PKG_C2_RESIDENCY, &c2l, &c2h);
		rdmsr_carefully(MSR_IA32_PKG_C3_RESIDENCY, &c3l, &c3h);
		rdmsr_carefully(MSR_IA32_PKG_C6_RESIDENCY, &c6l, &c6h);
		rdmsr_carefully(MSR_IA32_PKG_C7_RESIDENCY, &c7l, &c7h);

		pkes.pkg_cres[0][0] = ((uint64_t)c2h << 32) | c2l;
		pkes.pkg_cres[0][1] = ((uint64_t)c3h << 32) | c3l;
		pkes.pkg_cres[0][2] = ((uint64_t)c6h << 32) | c6l;
		pkes.pkg_cres[0][3] = ((uint64_t)c7h << 32) | c7l;

		uint32_t cpumodel = cpuid_info()->cpuid_model;
		boolean_t c8avail;
		switch (cpumodel) {
		case CPUID_MODEL_HASWELL_ULT:
			c8avail = TRUE;
			break;
		default:
			c8avail = FALSE;
			break;
		}
		uint64_t c8r = ~0ULL, c9r = ~0ULL, c10r = ~0ULL;

		if (c8avail) {
			rdmsr64_carefully(MSR_IA32_PKG_C8_RESIDENCY, &c8r);
			rdmsr64_carefully(MSR_IA32_PKG_C9_RESIDENCY, &c9r);
			rdmsr64_carefully(MSR_IA32_PKG_C10_RESIDENCY, &c10r);
		}

		pkes.pkg_cres[0][4] = c8r;
		pkes.pkg_cres[0][5] = c9r;
		pkes.pkg_cres[0][6] = c10r;

		pkes.ddr_energy = ~0ULL;
		rdmsr64_carefully(MSR_IA32_DDR_ENERGY_STATUS, &pkes.ddr_energy);
		pkes.llc_flushed_cycles = ~0ULL;
		rdmsr64_carefully(MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER, &pkes.llc_flushed_cycles);

		pkes.ring_ratio_instantaneous = ~0ULL;
		rdmsr64_carefully(MSR_IA32_RING_PERF_STATUS, &pkes.ring_ratio_instantaneous);

		pkes.IA_frequency_clipping_cause = ~0ULL;
		rdmsr64_carefully(MSR_IA32_IA_PERF_LIMIT_REASONS, &pkes.IA_frequency_clipping_cause);

		pkes.GT_frequency_clipping_cause = ~0ULL;
		rdmsr64_carefully(MSR_IA32_GT_PERF_LIMIT_REASONS, &pkes.GT_frequency_clipping_cause);

		rdmsr_carefully(MSR_IA32_PKG_POWER_SKU_UNIT, &pkg_unit_l, &pkg_unit_h);
		rdmsr_carefully(MSR_IA32_PKG_ENERGY_STATUS, &pkg_ecl, &pkg_ech);
		pkes.pkg_power_unit = ((uint64_t)pkg_unit_h << 32) | pkg_unit_l;
		pkes.pkg_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl;

		rdmsr_carefully(MSR_IA32_PP0_ENERGY_STATUS, &pkg_ecl, &pkg_ech);
		pkes.pp0_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl;

		rdmsr_carefully(MSR_IA32_PP1_ENERGY_STATUS, &pkg_ecl, &pkg_ech);
		pkes.pp1_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl;

		pkes.pkg_idle_exits = current_cpu_datap()->lcpu.package->package_idle_exits;
		pkes.ncpus = real_ncpus;

		(void) ml_set_interrupts_enabled(TRUE);

		copyout(&pkes, regs->rsi, sizeof(pkes));
		curpos = regs->rsi + sizeof(pkes);

		mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_powerstats, NULL);
		
		for (i = 0; i < real_ncpus; i++) {
			(void) ml_set_interrupts_enabled(FALSE);

			cest.caperf = cpu_data_ptr[i]->cpu_aperf;
			cest.cmperf = cpu_data_ptr[i]->cpu_mperf;
			cest.ccres[0] = cpu_data_ptr[i]->cpu_c3res;
			cest.ccres[1] = cpu_data_ptr[i]->cpu_c6res;
			cest.ccres[2] = cpu_data_ptr[i]->cpu_c7res;

			bcopy(&cpu_data_ptr[i]->cpu_rtimes[0], &cest.crtimes[0], sizeof(cest.crtimes));
			bcopy(&cpu_data_ptr[i]->cpu_itimes[0], &cest.citimes[0], sizeof(cest.citimes));

			cest.citime_total = cpu_data_ptr[i]->cpu_itime_total;
			cest.crtime_total = cpu_data_ptr[i]->cpu_rtime_total;
 			cest.cpu_idle_exits = cpu_data_ptr[i]->cpu_idle_exits;
 			cest.cpu_insns = cpu_data_ptr[i]->cpu_cur_insns;
 			cest.cpu_ucc = cpu_data_ptr[i]->cpu_cur_ucc;
 			cest.cpu_urc = cpu_data_ptr[i]->cpu_cur_urc;
 			(void) ml_set_interrupts_enabled(TRUE);

			copyout(&cest, curpos, sizeof(cest));
			curpos += sizeof(cest);
		}
		rval = 1;
	}
		break;
 	case dgEnaPMC:
 	{
 		boolean_t enable = TRUE;
		uint32_t cpuinfo[4];
		/* Require architectural PMC v2 or higher, corresponding to
		 * Merom+, or equivalent virtualised facility.
		 */
		do_cpuid(0xA, &cpuinfo[0]);
		if ((cpuinfo[0] & 0xFF) >= 2) {
			mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_pmc_control, &enable);
			diag_pmc_enabled = TRUE;
		}
 		rval = 1;
 	}
 	break;
#if	DEBUG
	case dgGzallocTest:
	{
		(void) ml_set_interrupts_enabled(TRUE);
		if (diagflag) {
			unsigned *ptr = (unsigned *)kalloc(1024);
			kfree(ptr, 1024);
			*ptr = 0x42;
		}
	}
	break;
#endif

#if PERMIT_PERMCHECK	
	case	dgPermCheck:
	{
		(void) ml_set_interrupts_enabled(TRUE);
		if (diagflag)
			rval = pmap_permissions_verify(kernel_pmap, kernel_map, 0, ~0ULL);
	}
 		break;
#endif /* PERMIT_PERMCHECK */
	default:		/* Handle invalid ones */
		rval = 0;	/* Return an exception */
	}

	regs->rax = rval;

	return rval;
}
Пример #15
0
/******************************************************************************
 * Generic MIB initialisation.
 *
 * This is a hack, and should be replaced with SYSINITs
 * at some point.
 */
void
sysctl_mib_init(void)
{
	cputype = cpu_type();
	cpusubtype = cpu_subtype();
	cputhreadtype = cpu_threadtype();
#if defined(__i386__) || defined (__x86_64__)
    cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
#elif defined(__arm__)
    kprintf("sysctl_mib_init: NEED ARM DEFINES\n");
#else
#error Unsupported arch
#endif

	/*
	 * Populate the optional portion of the hw.* MIB.
	 *
	 * XXX This could be broken out into parts of the code
	 *     that actually directly relate to the functions in
	 *     question.
	 */

	if (cputhreadtype != CPU_THREADTYPE_NONE) {
		sysctl_register_oid(&sysctl__hw_cputhreadtype);
	}

#if defined (__i386__) || defined (__x86_64__)
#define is_capability_set(k) (((_get_cpu_capabilities() & (k)) == (k)) ? 1 : 0)
	mmx_flag		= is_capability_set(kHasMMX);
	sse_flag		= is_capability_set(kHasSSE);
	sse2_flag		= is_capability_set(kHasSSE2);
	sse3_flag		= is_capability_set(kHasSSE3);
	supplementalsse3_flag	= is_capability_set(kHasSupplementalSSE3);
	sse4_1_flag		= is_capability_set(kHasSSE4_1);
	sse4_2_flag		= is_capability_set(kHasSSE4_2);
	x86_64_flag		= is_capability_set(k64Bit);
	aes_flag		= is_capability_set(kHasAES);
	avx1_0_flag		= is_capability_set(kHasAVX1_0);
	rdrand_flag		= is_capability_set(kHasRDRAND);
	f16c_flag		= is_capability_set(kHasF16C);
	enfstrg_flag		= is_capability_set(kHasENFSTRG);

	/* hw.cpufamily */
	cpufamily = cpuid_cpufamily();

	/* hw.cacheconfig */
	cacheconfig[0] = ml_cpu_cache_sharing(0);
	cacheconfig[1] = ml_cpu_cache_sharing(1);
	cacheconfig[2] = ml_cpu_cache_sharing(2);
	cacheconfig[3] = ml_cpu_cache_sharing(3);
	cacheconfig[4] = 0;

	/* hw.cachesize */
	cachesize[0] = ml_cpu_cache_size(0);
	cachesize[1] = ml_cpu_cache_size(1);
	cachesize[2] = ml_cpu_cache_size(2);
	cachesize[3] = ml_cpu_cache_size(3);
	cachesize[4] = 0;

	/* hw.packages */
	packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
			/ cpuid_info()->thread_count;
#elif defined(__arm__)
    kprintf("sysctl_mib_init: shortcircuiting to finish, reimplement\n");
#else
#error unknown architecture
#endif /* !__i386__ && !__x86_64 && !__arm__ */

}
bool com_reidburke_air_IntelEnhancedSpeedStep::init(OSDictionary* dict) {
	bool res = super::init(dict);
	info("Initializing xnu-speedstep-air\n");
  
  // read data
  cpuid_update_generic_info();
  
	/* Allocate our spinlock for later use */
	Lock = IOSimpleLockAlloc();
	/* Check for a patched kernel which properly implements rtc_clock_stepped() */
	uint64_t magic = -1; // means autodetect
	
	OSBoolean* debugMsgs = (OSBoolean*) dict->getObject("DebugMessages");
	if (debugMsgs != 0)
		DebugOn = debugMsgs->getValue();
	else
		DebugOn = false;
	
	OSNumber* kernelFeatures = (OSNumber*) dict->getObject("KernelFeatures");
	if (kernelFeatures != 0)
		magic = kernelFeatures->unsigned8BitValue();
	
	if (magic == 255) nanoseconds_to_absolutetime(~(0), &magic); //255uint = -1 int
	
	if (magic == 1) {
		RtcFixKernel = true;
		Below1Ghz	= false;
	} else if (magic == 2) {
		RtcFixKernel = true;
		Below1Ghz	= true;
	} else if (magic == 3) {
		RtcFixKernel = false;
		Below1Ghz = true;
	} else {
		RtcFixKernel = false;
		Below1Ghz	= false;
	}
	
	checkForNby2Ratio(); // check and store in global variable before loading pstate override
	if (getFSB() == false)
		return false;
	
	OSArray* overrideTable = (OSArray*) dict->getObject("PStateTable");
	if (overrideTable != 0 )
		loadPStateOverride(overrideTable);
	
	OSNumber* defaultState = (OSNumber*) dict->getObject("DefaultPState");
	if (defaultState != 0)
		DefaultPState = defaultState->unsigned8BitValue();
	else
		DefaultPState = -1; // indicate no default state

	OSNumber* maxLatency = (OSNumber*) dict->getObject("Latency");
	if (maxLatency != 0)
		MaxLatency = maxLatency->unsigned32BitValue();
	else
		MaxLatency = 0;
	
 	
  /* Make preliminary check */
	if ( (strcmp(cpuid_info()->cpuid_vendor, CPUID_VID_INTEL) == 0) // Check it's actually Intel
      && ( cpuid_info()->cpuid_features & CPUID_FEATURE_EST) ) { // Check it supports EST
	 
    autostart = (OSNumber*) dict->getObject("AutoStart");
    info( "do autostart %d \n", autostart->unsigned8BitValue() );
    
    if ( autostart != 0 && autostart->unsigned8BitValue() == 1 ) {
      Throttler = new AutoThrottler;
      if (Throttler) {
        dbg("Throttler instantiated.\n");
        OSNumber* targetload = (OSNumber*) dict->getObject("TargetCPULoad");
        if (targetload != 0)
          Throttler->targetCPULoad = (targetload->unsigned16BitValue()) * 10;
        else
          Throttler->targetCPULoad = 700;
      }
    }
	}
  
	totalThrottles = 0;
	frequencyUsage[0] = '\0';
	
	/* Return whatever the superclass returned */
	return res;
}
Пример #17
0
bool SMBIOSResolver::start(IOService * provider)
{
	if( super::start(provider) != true ) return false;	// Oh no	
	if( IOService::getResourceService()->getProperty("SMBIOS-Resolver") ) return false;	// We should exist only once	
	if( !IOService::getResourceService()->getProperty("SMBIOS") ) return false;	// AppleSMBIOS.kext didn´t start we bail out
	
	IOService * iosRoot = getServiceRoot();
	if( !iosRoot ) return false;	// Unable to get IOServiceRoot
	
	int doVerbose = 0;
	// PE_parse_boot_arg("smbios", &doVerbose);	// bootarg SMBIOS=1 will give a verbose output to log (when I find something verbose worth outputting)
	
	// Dictionary from plist
	OSDictionary * hwDict = OSDynamicCast( OSDictionary, getProperty("Override"));
	
	//	/rom/version
	IORegistryEntry * dtROMNode = fromPath("/rom", gIODTPlane);
	if( dtROMNode )
	{
		OSString * romVersion = OSDynamicCast( OSString, hwDict->getObject("rom-version"));
		if(romVersion->getLength() > 0) dtROMNode->setProperty("version", OSData::withBytes(romVersion->getCStringNoCopy(), romVersion->getLength() + 1) );
		dtROMNode->release();
	}
	else
	{
		return false;	// No /rom node in IODeviceTree plane
	}
	
	// root entries
	OSObject * dictString = 0;
	
	dictString = hwDict->getObject("manufacturer");
	if(dictString)
	{
		OSString * rootManufacturer = OSDynamicCast( OSString, dictString);
		if(rootManufacturer->getLength() > 1) iosRoot->setProperty("manufacturer", OSData::withBytes(rootManufacturer->getCStringNoCopy(), rootManufacturer->getLength() + 1) );
	}
	
	dictString = hwDict->getObject("system-type");
	if(dictString)
	{
		OSData * systemType = OSDynamicCast( OSData, dictString);
		if(systemType) iosRoot->setProperty("system-type", systemType );
	}
	
	dictString = hwDict->getObject("compatible");
	if(dictString) 
	{
		OSString * rootCompatible = OSDynamicCast( OSString, dictString);
		if(rootCompatible->getLength() > 1) iosRoot->setProperty("compatible", OSData::withBytes(rootCompatible->getCStringNoCopy(), rootCompatible->getLength() + 1) );
	}
	
	dictString = hwDict->getObject("product-name");
	if(dictString) 
	{
		OSString * rootProductName = OSDynamicCast( OSString, dictString);
		if(rootProductName->getLength() > 1) iosRoot->setProperty("product-name", OSData::withBytes(rootProductName->getCStringNoCopy(), rootProductName->getLength() + 1) );
	}
	
	dictString = hwDict->getObject("model");
	if(dictString) 
	{
		OSString * rootModel = OSDynamicCast( OSString, dictString);
		if(rootModel->getLength() > 1)
		{
			iosRoot->setProperty("model", OSData::withBytes(rootModel->getCStringNoCopy(), rootModel->getLength() + 1) );
			iosRoot->setName(rootModel->getCStringNoCopy());
		}
	}
	
	dictString = hwDict->getObject("version");
	if(dictString) 
	{
		OSString * rootVersion = OSDynamicCast( OSString, dictString);
		if(rootVersion->getLength() > 1) iosRoot->setProperty("version", OSData::withBytes(rootVersion->getCStringNoCopy(), rootVersion->getLength() + 1) );
	}
	
	dictString = hwDict->getObject("board-id");
	if(dictString) 
	{
		OSString * rootBoardId = OSDynamicCast( OSString, dictString);
		if(rootBoardId->getLength() > 1) iosRoot->setProperty("board-id", OSData::withBytes(rootBoardId->getCStringNoCopy(), rootBoardId->getLength() + 1) );
	}
	
	dictString = hwDict->getObject("serial-number");
	if(dictString) 
	{
		OSString * rootSerial = OSDynamicCast( OSString, dictString);
		if(rootSerial->getLength() > 1)
		{
			UInt8 length = rootSerial->getLength();
			const char *serialNumberString = rootSerial->getCStringNoCopy();
			
			// The serial-number property in the IORegistry is a 43-byte data object.
			// Bytes 0 through 2 are the last three bytes of the serial number string.
			// Bytes 11 through 20, inclusive, are the serial number string itself.
			// All other bytes are '\0'.
			OSData * data = OSData::withCapacity(43);
			if (data)
			{
				data->appendBytes(serialNumberString + (length - 3), 3);
				data->appendBytes(NULL, 10);
				data->appendBytes(serialNumberString, length);
				data->appendBytes(NULL, 43 - length - 10 - 3);
				iosRoot->setProperty("serial-number", data);
				data->release();
			}
			
			iosRoot->setProperty(kIOPlatformSerialNumberKey, rootSerial);
		}
	}
	
	dictString = hwDict->getObject("UUID-key");
	if(dictString) 
	{
		OSString * rootUUIDKey = OSDynamicCast( OSString, hwDict->getObject("UUID-key"));
		iosRoot->setProperty(kIOPlatformUUIDKey, rootUUIDKey);
		publishResource(kIOPlatformUUIDKey, rootUUIDKey);
	}
	
	bool useEfiBus = false;
	UInt64 fsbFrequency = 0;
	UInt64 msr;
	dictString = hwDict->getObject("use-efi-bus");
	if (dictString) useEfiBus = (OSDynamicCast(OSBoolean, dictString))->getValue(); 
	IORegistryEntry * efiPlatform = fromPath("/efi/platform", gIODTPlane);
	if (efiPlatform && useEfiBus)
	{
		OSData * efiFSBFreq = OSDynamicCast(OSData, efiPlatform->getProperty("FSBFrequency"));
		bcopy(efiFSBFreq->getBytesNoCopy(), &fsbFrequency, efiFSBFreq->getLength());
		efiPlatform->release();
	}
	else
	{	// No /efi/platform found
		fsbFrequency = gPEClockFrequencyInfo.bus_frequency_hz;	// Value previously set by AppleSMBIOS 
		if (!strncmp(cpuid_info()->cpuid_vendor, CPUID_VID_INTEL, sizeof(CPUID_VID_INTEL)) && (cpuid_info()->cpuid_features & CPUID_FEATURE_SSE2)) fsbFrequency /= 4;
	}

	dictString = hwDict->getObject("hardcode-bus");
	if(dictString) 
	{
		fsbFrequency = (OSDynamicCast(OSNumber, dictString))->unsigned64BitValue();
		if (fsbFrequency)
		{
			if (fsbFrequency <= 10000) fsbFrequency *= 1000000;
		}
		else
		{
			if (!strncmp(cpuid_info()->cpuid_vendor, CPUID_VID_INTEL, sizeof(CPUID_VID_INTEL)))
			{
				if ((cpuid_info()->cpuid_family == 0x0f) && (cpuid_info()->cpuid_model >= 2))
				{
					msr = rdmsr64(0x0000002C);
					switch ((msr >> 16) & 0x7) {
						case 0:
							if (cpuid_info()->cpuid_model == 2) fsbFrequency = 100 * 1000000;
							else 
							{
								fsbFrequency = (800 * 1000000) / 3;	// 266
								fsbFrequency++;
							}
							break;
						case 1:
							fsbFrequency = (400 * 1000000) / 3;	//	133
							break;
						case 2:
							fsbFrequency = (600 * 1000000) / 3;	// 200
							break;
						case 3:
							fsbFrequency = (500 * 1000000) / 3;	//	166
							fsbFrequency++;
							break;
						case 4:
							fsbFrequency = (1000 * 1000000) / 3;	//	333
							break;
						default:
							break;
					}
				}
				else
				{
					fsbFrequency = 100 * 1000000;
				}
				
				if (cpuid_info()->cpuid_family == 0x06)
				{
					msr = rdmsr64(0x000000CD);
					switch (msr & 0x7) {
						case 0:
							fsbFrequency = (800 * 1000000) / 3;	//	266
							fsbFrequency++;
							break;
						case 1:
							fsbFrequency = (400 * 1000000) / 3;	//	133
							break;
						case 2:
							fsbFrequency = (600 * 1000000) / 3;	//	200
							break;
						case 3:
							fsbFrequency = (500 * 1000000) / 3;	//	166
							fsbFrequency++;
							break;
						case 4:
							fsbFrequency = (1000 * 1000000) / 3;//	333
							break;
						case 5:
							fsbFrequency = (300 * 1000000) / 3;	//	100
							break;
						case 6:
							fsbFrequency = (1200 * 1000000) / 3;//	400
							break;
						case 7:		// should check
							fsbFrequency = (1400 * 1000000) / 3;//	466
							fsbFrequency++;
							break;
						default:
							break;
					}
				}
				 
			}
		}
Пример #18
0
int
get_processor_name(char *cpu_name, int size)
{
	int	ret;
	struct cpuid_data	data1, data2, data3;
	int	i, j, b, c;

	if (size < 49) {
		return (1);
	}

	ret = cpuid_info(0x80000002, &data1);

	if (ret) {
		return (1);
	}

	ret = cpuid_info(0x80000003, &data2);

	if (ret) {
		return (1);
	}

	ret = cpuid_info(0x80000004, &data3);

	if (ret) {
		return (1);
	}	

	(void) snprintf(cpu_name, size,
	"%.4s%.4s%.4s%.4s%.4s%.4s%.4s%.4s%.4s%.4s%.4s%.4s",
	(char *)&data1.eax, (char *)&data1.ebx,
	(char *)&data1.ecx, (char *)&data1.edx,
	(char *)&data2.eax, (char *)&data2.ebx,
	(char *)&data2.ecx, (char *)&data2.edx,
	(char *)&data3.eax, (char *)&data3.ebx,
	(char *)&data3.ecx, (char *)&data3.edx);

	i = 0;
	j = 0;
	c = cpu_name[i];

	while (isblank(c)) {
		i++;
		c = cpu_name[i];
	}

	while (cpu_name[i] != '\0') {
		cpu_name[j] = cpu_name[i];
		c = cpu_name[i];

		if (isblank(c)) {
			c = cpu_name[i + 1];
			while (isblank(c)) {
				i++;
				c = cpu_name[i + 1];
			}
		}
		i++;
		j++;
	}

	cpu_name[j] = '\0';

	return (0);
}
Пример #19
0
Файл: cpu.c Проект: argp/xnu
void
cpu_init(void)
{
	cpu_data_t     *cdp = getCpuDatap();
	arm_cpu_info_t *cpu_info_p;

	if (cdp->cpu_type != CPU_TYPE_ARM) {

		cdp->cpu_type = CPU_TYPE_ARM;

		timer_call_queue_init(&cdp->rtclock_timer.queue);
		cdp->rtclock_timer.deadline = EndOfAllTime;

		if (cdp == &BootCpuData) {
			do_cpuid();
			do_cacheid();
			do_mvfpid();
		} else {
			/*
			 * We initialize non-boot CPUs here; the boot CPU is
			 * dealt with as part of pmap_bootstrap.
			 */
			pmap_cpu_data_init();
		}
		/* ARM_SMP: Assuming identical cpu */
		do_debugid();

		cpu_info_p = cpuid_info();

		/* switch based on CPU's reported architecture */
		switch (cpu_info_p->arm_info.arm_arch) {
		case CPU_ARCH_ARMv4T:
		case CPU_ARCH_ARMv5T:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_V4T;
			break;
		case CPU_ARCH_ARMv5TE:
		case CPU_ARCH_ARMv5TEJ:
			if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL)
				cdp->cpu_subtype = CPU_SUBTYPE_ARM_XSCALE;
			else
				cdp->cpu_subtype = CPU_SUBTYPE_ARM_V5TEJ;
			break;
		case CPU_ARCH_ARMv6:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6;
			break;
		case CPU_ARCH_ARMv7:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7;
			break;
		case CPU_ARCH_ARMv7f:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7F;
			break;
		case CPU_ARCH_ARMv7s:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7S;
			break;
		case CPU_ARCH_ARMv7k:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_V7K;
			break;
		default:
			cdp->cpu_subtype = CPU_SUBTYPE_ARM_ALL;
			break;
		}

		cdp->cpu_threadtype = CPU_THREADTYPE_NONE;
	}
	cdp->cpu_stat.irq_ex_cnt_wake = 0;
	cdp->cpu_stat.ipi_cnt_wake = 0;
	cdp->cpu_stat.timer_cnt_wake = 0;
	cdp->cpu_running = TRUE;
	cdp->cpu_sleep_token_last = cdp->cpu_sleep_token;
	cdp->cpu_sleep_token = 0x0UL;

}
Пример #20
0
	ret = cpuid_info(0x80000008, &data);

	if (ret == 0) {
		num = (data.ecx >> 12) &0xf;

		if (num == 0) {
			num = (data.ecx & 0xff) + 1;
		} else {
			i = 1;
			num = i << num;
		}

		return (num);
	}

	ret = cpuid_info(0x1, &data);

	if (ret) {
		return (1);
	}

	if ((data.edx & (1 << 28)) == 0) {
		return (1);
	}

	num = (data.ebx >> 16) & 0xff;

	ret = cpuid_info(0x80000001, &data);

	if (ret) {
		return (1);