/* * Supporting some variables requires us to do "real" work. We * gather some of that here. */ static int sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req) { char dummy[65]; int epochTemp; ml_cpu_info_t cpu_info; int val, doquad; long long qval; host_basic_info_data_t hinfo; kern_return_t kret; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; /* * Test and mask off the 'return quad' flag. * Note that only some things here support it. */ doquad = arg2 & CTLHW_RETQUAD; arg2 &= ~CTLHW_RETQUAD; ml_cpu_get_info(&cpu_info); #define BSD_HOST 1 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); /* * Handle various OIDs. * * OIDs that can return int or quad set val and qval and then break. * Errors and int-only values return inline. */ switch (arg2) { case HW_NCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.max_cpus)); } else { return(EINVAL); } case HW_AVAILCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.avail_cpus)); } else { return(EINVAL); } case HW_LOCAL_PHYSICALCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.physical_cpu)); } else { return(EINVAL); } case HW_LOCAL_PHYSICALCPUMAX: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.physical_cpu_max)); } else { return(EINVAL); } case HW_LOCAL_LOGICALCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.logical_cpu)); } else { return(EINVAL); } case HW_LOCAL_LOGICALCPUMAX: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.logical_cpu_max)); } else { return(EINVAL); } case HW_PAGESIZE: { vm_map_t map = get_task_map(current_task()); val = vm_map_page_size(map); qval = (long long)val; break; } case HW_CACHELINE: val = cpu_info.cache_line_size; qval = (long long)val; break; case HW_L1ICACHESIZE: val = cpu_info.l1_icache_size; qval = (long long)val; break; case HW_L1DCACHESIZE: val = cpu_info.l1_dcache_size; qval = (long long)val; break; case HW_L2CACHESIZE: if (cpu_info.l2_cache_size == 0xFFFFFFFF) return(EINVAL); val = cpu_info.l2_cache_size; qval = (long long)val; break; case HW_L3CACHESIZE: if (cpu_info.l3_cache_size == 0xFFFFFFFF) return(EINVAL); val = cpu_info.l3_cache_size; qval = (long long)val; break; /* * Deprecated variables. We still support these for * backwards compatibility purposes only. */ case HW_MACHINE: bzero(dummy, sizeof(dummy)); if(!PEGetMachineName(dummy,64)) return(EINVAL); dummy[64] = 0; return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1)); case HW_MODEL: bzero(dummy, sizeof(dummy)); if(!PEGetModelName(dummy,64)) return(EINVAL); dummy[64] = 0; return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1)); case HW_USERMEM: { int usermem = mem_size - vm_page_wire_count * page_size; return(SYSCTL_RETURN(req, usermem)); } case HW_EPOCH: epochTemp = PEGetPlatformEpoch(); if (epochTemp == -1) return(EINVAL); return(SYSCTL_RETURN(req, epochTemp)); case HW_VECTORUNIT: { int vector = cpu_info.vector_unit == 0? 0 : 1; return(SYSCTL_RETURN(req, vector)); } case HW_L2SETTINGS: if (cpu_info.l2_cache_size == 0xFFFFFFFF) return(EINVAL); return(SYSCTL_RETURN(req, cpu_info.l2_settings)); case HW_L3SETTINGS: if (cpu_info.l3_cache_size == 0xFFFFFFFF) return(EINVAL); return(SYSCTL_RETURN(req, cpu_info.l3_settings)); default: return(ENOTSUP); } /* * Callers may come to us with either int or quad buffers. */ if (doquad) { return(SYSCTL_RETURN(req, qval)); } return(SYSCTL_RETURN(req, val)); }
static void commpage_init_cpu_capabilities( void ) { uint64_t bits; int cpus; ml_cpu_info_t cpu_info; bits = 0; ml_cpu_get_info(&cpu_info); switch (cpu_info.vector_unit) { case 9: bits |= kHasAVX1_0; /* fall thru */ case 8: bits |= kHasSSE4_2; /* fall thru */ case 7: bits |= kHasSSE4_1; /* fall thru */ case 6: bits |= kHasSupplementalSSE3; /* fall thru */ case 5: bits |= kHasSSE3; /* fall thru */ case 4: bits |= kHasSSE2; /* fall thru */ case 3: bits |= kHasSSE; /* fall thru */ case 2: bits |= kHasMMX; default: break; } switch (cpu_info.cache_line_size) { case 128: bits |= kCache128; break; case 64: bits |= kCache64; break; case 32: bits |= kCache32; break; default: break; } cpus = commpage_cpus(); // how many CPUs do we have bits |= (cpus << kNumCPUsShift); bits |= kFastThreadLocalStorage; // we use %gs for TLS #define setif(_bits, _bit, _condition) \ if (_condition) _bits |= _bit setif(bits, kUP, cpus == 1); setif(bits, k64Bit, cpu_mode_is64bit()); setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); setif(bits, kHasAES, cpuid_features() & CPUID_FEATURE_AES); setif(bits, kHasF16C, cpuid_features() & CPUID_FEATURE_F16C); setif(bits, kHasRDRAND, cpuid_features() & CPUID_FEATURE_RDRAND); setif(bits, kHasFMA, cpuid_features() & CPUID_FEATURE_FMA); setif(bits, kHasBMI1, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_BMI1); setif(bits, kHasBMI2, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_BMI2); setif(bits, kHasRTM, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_RTM); setif(bits, kHasHLE, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_HLE); setif(bits, kHasAVX2_0, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_AVX2); setif(bits, kHasRDSEED, cpuid_features() & CPUID_LEAF7_FEATURE_RDSEED); setif(bits, kHasADX, cpuid_features() & CPUID_LEAF7_FEATURE_ADX); setif(bits, kHasMPX, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_MPX); setif(bits, kHasSGX, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SGX); uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE); setif(bits, kHasENFSTRG, (misc_enable & 1ULL) && (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_ERMS)); _cpu_capabilities = bits; // set kernel version for use by drivers etc }
static void commpage_init_cpu_capabilities( void ) { uint64_t bits; int cpus; ml_cpu_info_t cpu_info; bits = 0; ml_cpu_get_info(&cpu_info); switch (cpu_info.vector_unit) { case 9: bits |= kHasAVX1_0; /* fall thru */ case 8: bits |= kHasSSE4_2; /* fall thru */ case 7: bits |= kHasSSE4_1; /* fall thru */ case 6: bits |= kHasSupplementalSSE3; /* fall thru */ case 5: bits |= kHasSSE3; /* fall thru */ case 4: bits |= kHasSSE2; /* fall thru */ case 3: bits |= kHasSSE; /* fall thru */ case 2: bits |= kHasMMX; default: break; } switch (cpu_info.cache_line_size) { case 128: bits |= kCache128; break; case 64: bits |= kCache64; break; case 32: bits |= kCache32; break; default: break; } cpus = commpage_cpus(); // how many CPUs do we have /** Sinetek: by default we'd like some reasonable values, ** so that the userspace runs correctly. ** ** On Mountain Lion, kHasSSE4_2 provides vanilla SSE2 routines. ** On Mavericks, we need a bit more support: SSE3, SSE3X. **/ if (IsAmdCPU()) { bits |= kHasSSE4_2; bits &= ~kHasSupplementalSSE3; #define MAVERICKS_AMD #ifdef MAVERICKS_AMD bits |= kHasSSE3; // bits |= kHasSupplementalSSE3; bits &= ~kHasSSE4_2; #endif } bits |= (cpus << kNumCPUsShift); bits |= kFastThreadLocalStorage; // we use %gs for TLS #define setif(_bits, _bit, _condition) \ if (_condition) _bits |= _bit setif(bits, kUP, cpus == 1); setif(bits, k64Bit, cpu_mode_is64bit()); setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); setif(bits, kHasAES, cpuid_features() & CPUID_FEATURE_AES); setif(bits, kHasF16C, cpuid_features() & CPUID_FEATURE_F16C); setif(bits, kHasRDRAND, cpuid_features() & CPUID_FEATURE_RDRAND); setif(bits, kHasFMA, cpuid_features() & CPUID_FEATURE_FMA); setif(bits, kHasBMI1, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_BMI1); setif(bits, kHasBMI2, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_BMI2); setif(bits, kHasRTM, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_RTM); setif(bits, kHasHLE, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_HLE); setif(bits, kHasAVX2_0, cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_AVX2); uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE); setif(bits, kHasENFSTRG, (misc_enable & 1ULL) && (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_ENFSTRG)); _cpu_capabilities = bits; // set kernel version for use by drivers etc }