Example #1
0
/******************************************************************************
 * Generic MIB initialisation.
 *
 * This is a hack, and should be replaced with SYSINITs
 * at some point.
 */
void
sysctl_mib_init(void)
{
	cputype = cpu_type();
	cpusubtype = cpu_subtype();
	cputhreadtype = cpu_threadtype();
#if defined(__i386__) || defined (__x86_64__)
	cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
#else
#error Unsupported arch
#endif

	/*
	 * Populate the optional portion of the hw.* MIB.
	 *
	 * XXX This could be broken out into parts of the code
	 *     that actually directly relate to the functions in
	 *     question.
	 */

	if (cputhreadtype != CPU_THREADTYPE_NONE) {
		sysctl_register_oid(&sysctl__hw_cputhreadtype);
	}

#if defined (__i386__) || defined (__x86_64__)
	/* hw.cpufamily */
	cpufamily = cpuid_cpufamily();

	/* hw.cacheconfig */
	cacheconfig[0] = ml_cpu_cache_sharing(0);
	cacheconfig[1] = ml_cpu_cache_sharing(1);
	cacheconfig[2] = ml_cpu_cache_sharing(2);
	cacheconfig[3] = ml_cpu_cache_sharing(3);
	cacheconfig[4] = 0;

	/* hw.cachesize */
	cachesize[0] = ml_cpu_cache_size(0);
	cachesize[1] = ml_cpu_cache_size(1);
	cachesize[2] = ml_cpu_cache_size(2);
	cachesize[3] = ml_cpu_cache_size(3);
	cachesize[4] = 0;

	/* hw.packages */
	packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
			/ cpuid_info()->thread_count;

#else
#error unknown architecture
#endif /* !__i386__ && !__x86_64 && !__arm__ */

}
Example #2
0
static void
commpage_populate_one( 
	vm_map_t	submap,		// commpage32_map or compage64_map
	char **		kernAddressPtr,	// &commPagePtr32 or &commPagePtr64
	size_t		area_used,	// _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED
	commpage_address_t base_offset,	// will become commPageBaseOffset
	commpage_time_data** time_data,	// &time_data32 or &time_data64
	const char*	signature,	// "commpage 32-bit" or "commpage 64-bit"
	vm_prot_t	uperm)
{
	uint8_t		c1;
	uint16_t	c2;
	int		c4;
	uint64_t	c8;
	uint32_t	cfamily;
	short   version = _COMM_PAGE_THIS_VERSION;

	next = 0;
	commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm );
	*kernAddressPtr = commPagePtr;				// save address either in commPagePtr32 or 64
	commPageBaseOffset = base_offset;

	*time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START );

	/* Stuff in the constants.  We move things into the comm page in strictly
	* ascending order, so we can check for overlap and panic if so.
	* Note: the 32-bit cpu_capabilities vector is retained in addition to
	* the expanded 64-bit vector.
	*/
	commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature)));
	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities));
	commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short));
	commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t));

	c2 = 32;  // default
	if (_cpu_capabilities & kCache64)
		c2 = 64;
	else if (_cpu_capabilities & kCache128)
		c2 = 128;
	commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2);

	c4 = MP_SPIN_TRIES;
	commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4);

	/* machine_info valid after ml_get_max_cpus() */
	c1 = machine_info.physical_cpu_max;
	commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1);
	c1 = machine_info.logical_cpu_max;
	commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1);

	c8 = ml_cpu_cache_size(0);
	commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8);

	cfamily = cpuid_info()->cpuid_cpufamily;
	commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4);

	if (next > _COMM_PAGE_END)
		panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr);

}
Example #3
0
/******************************************************************************
 * Generic MIB initialisation.
 *
 * This is a hack, and should be replaced with SYSINITs
 * at some point.
 */
void
sysctl_mib_init(void)
{
	cputype = cpu_type();
	cpusubtype = cpu_subtype();
	cputhreadtype = cpu_threadtype();
#if defined(__i386__) || defined (__x86_64__)
    cpu64bit = (_get_cpu_capabilities() & k64Bit) == k64Bit;
#elif defined(__arm__)
    kprintf("sysctl_mib_init: NEED ARM DEFINES\n");
#else
#error Unsupported arch
#endif

	/*
	 * Populate the optional portion of the hw.* MIB.
	 *
	 * XXX This could be broken out into parts of the code
	 *     that actually directly relate to the functions in
	 *     question.
	 */

	if (cputhreadtype != CPU_THREADTYPE_NONE) {
		sysctl_register_oid(&sysctl__hw_cputhreadtype);
	}

#if defined (__i386__) || defined (__x86_64__)
#define is_capability_set(k) (((_get_cpu_capabilities() & (k)) == (k)) ? 1 : 0)
	mmx_flag		= is_capability_set(kHasMMX);
	sse_flag		= is_capability_set(kHasSSE);
	sse2_flag		= is_capability_set(kHasSSE2);
	sse3_flag		= is_capability_set(kHasSSE3);
	supplementalsse3_flag	= is_capability_set(kHasSupplementalSSE3);
	sse4_1_flag		= is_capability_set(kHasSSE4_1);
	sse4_2_flag		= is_capability_set(kHasSSE4_2);
	x86_64_flag		= is_capability_set(k64Bit);
	aes_flag		= is_capability_set(kHasAES);
	avx1_0_flag		= is_capability_set(kHasAVX1_0);
	rdrand_flag		= is_capability_set(kHasRDRAND);
	f16c_flag		= is_capability_set(kHasF16C);
	enfstrg_flag		= is_capability_set(kHasENFSTRG);

	/* hw.cpufamily */
	cpufamily = cpuid_cpufamily();

	/* hw.cacheconfig */
	cacheconfig[0] = ml_cpu_cache_sharing(0);
	cacheconfig[1] = ml_cpu_cache_sharing(1);
	cacheconfig[2] = ml_cpu_cache_sharing(2);
	cacheconfig[3] = ml_cpu_cache_sharing(3);
	cacheconfig[4] = 0;

	/* hw.cachesize */
	cachesize[0] = ml_cpu_cache_size(0);
	cachesize[1] = ml_cpu_cache_size(1);
	cachesize[2] = ml_cpu_cache_size(2);
	cachesize[3] = ml_cpu_cache_size(3);
	cachesize[4] = 0;

	/* hw.packages */
	packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count)
			/ cpuid_info()->thread_count;
#elif defined(__arm__)
    kprintf("sysctl_mib_init: shortcircuiting to finish, reimplement\n");
#else
#error unknown architecture
#endif /* !__i386__ && !__x86_64 && !__arm__ */

}