static uint64_t physicalMemorySizeInBytes() { static uint64_t physicalMemorySize = 0; if (!physicalMemorySize) { #if defined(Q_OS_MACX) host_basic_info_data_t hostInfo; mach_port_t host = mach_host_self(); mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; kern_return_t r = host_info(host, HOST_BASIC_INFO, (host_info_t)&hostInfo, &count); mach_port_deallocate(mach_task_self(), host); if (r == KERN_SUCCESS) physicalMemorySize = hostInfo.max_mem; #elif defined(Q_OS_WIN) MEMORYSTATUSEX statex; statex.dwLength = sizeof(statex); GlobalMemoryStatusEx(&statex); physicalMemorySize = static_cast<uint64_t>(statex.ullTotalPhys); #else long pageSize = sysconf(_SC_PAGESIZE); long numberOfPages = sysconf(_SC_PHYS_PAGES); if (pageSize > 0 && numberOfPages > 0) physicalMemorySize = static_cast<uint64_t>(pageSize) * static_cast<uint64_t>(numberOfPages); #endif } return physicalMemorySize; }
/* * NXGetLocalArchInfo() returns the NXArchInfo matching the cputype and * cpusubtype of the local host. NULL is returned if there is no matching * entry in the ArchInfoTable. */ const NXArchInfo * NXGetLocalArchInfo(void) { struct host_basic_info hbi; kern_return_t ret; unsigned int count; mach_port_t my_mach_host_self; count = HOST_BASIC_INFO_COUNT; my_mach_host_self = mach_host_self(); ret = host_info(my_mach_host_self, HOST_BASIC_INFO, (host_info_t)&hbi, &count); mach_port_deallocate(mach_task_self(), my_mach_host_self); if(ret != KERN_SUCCESS) return(NULL); /* * There is a "bug" in the kernel for compatiblity that on * an 030 machine host_info() returns cpusubtype * CPU_SUBTYPE_MC680x0_ALL and not CPU_SUBTYPE_MC68030_ONLY. */ if(hbi.cpu_type == CPU_TYPE_MC680x0 && hbi.cpu_subtype == CPU_SUBTYPE_MC680x0_ALL) hbi.cpu_subtype = CPU_SUBTYPE_MC68030_ONLY; return(NXGetArchInfoFromCpuType(hbi.cpu_type, hbi.cpu_subtype)); }
void ProcessList_getHostInfo(host_basic_info_data_t *p) { mach_msg_type_number_t info_size = HOST_BASIC_INFO_COUNT; if(0 != host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)p, &info_size)) { err(2, "Unable to retrieve host info\n"); } }
/* Return the OS-provided number of processors. Unlike other methods such as reading sysfs on Linux, this method is not virtualizable; thus it's only used as a fall-back method, allowing `topo_set_fsys_root ()' to have the desired effect. */ unsigned topo_fallback_nbprocessors(void) { #if HAVE_DECL__SC_NPROCESSORS_ONLN return sysconf(_SC_NPROCESSORS_ONLN); #elif HAVE_DECL__SC_NPROC_ONLN return sysconf(_SC_NPROC_ONLN); #elif HAVE_DECL__SC_NPROCESSORS_CONF return sysconf(_SC_NPROCESSORS_CONF); #elif HAVE_DECL__SC_NPROC_CONF return sysconf(_SC_NPROC_CONF); #elif defined(HAVE_HOST_INFO) && HAVE_HOST_INFO struct host_basic_info info; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; host_info(mach_host_self(), HOST_BASIC_INFO, (integer_t*) &info, &count); return info.avail_cpus; #elif defined(WIN_SYS) SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; #else #warning No known way to discover number of available processors on this system #warning topo_fallback_nbprocessors will default to 1 return 1; #endif }
/* GetNbProcessors return the number of processors on this machine. */ unsigned DlThread::GetNbProcessors() { #ifdef _WIN32 /* NOTE: This function will return the "logical" number of processors on the system. So for example, the "hyperthreaded" processors might give 2 here even if there is 1 processor onboard */ SYSTEM_INFO sysInfo; GetSystemInfo(&sysInfo); return sysInfo.dwNumberOfProcessors; #endif #ifdef LINUX return ::get_nprocs_conf(); #endif #ifdef IRIX return ::sysmp(MP_NPROCS); #endif #ifdef DARWIN host_basic_info_data_t hostInfo; mach_msg_type_number_t infoCount; infoCount = HOST_BASIC_INFO_COUNT; host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t)&hostInfo, &infoCount ); return (unsigned int)(hostInfo.max_cpus); #endif }
static cpu_type_t current_kernel_arch(void) { struct host_basic_info hi; unsigned int size; kern_return_t kret; cpu_type_t current_arch; int ret, mib[4]; size_t len; struct kinfo_proc kp; size = sizeof(hi)/sizeof(int); kret = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&hi, &size); if (kret != KERN_SUCCESS) { return 0; } current_arch = hi.cpu_type; /* Now determine if the kernel is running in 64-bit mode */ mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PID; mib[3] = 0; /* kernproc, pid 0 */ len = sizeof(kp); ret = sysctl(mib, sizeof(mib)/sizeof(mib[0]), &kp, &len, NULL, 0); if (ret == -1) { return 0; } if (kp.kp_proc.p_flag & P_LP64) { current_arch |= CPU_ARCH_ABI64; } return current_arch; }
/*{{{ arg_host_switch_char */ char arg_host_switch_char (void) { char prefix_character; #if (defined(HOST_OS_IS_MSDOS) || defined(HOST_OS_IS_VMS)) prefix_character = '/'; #elif (defined(HOST_OS_IS_UNIX)) prefix_character = '-'; #elif (defined(HOST_OS_IS_SERVER)) { int host, os, board; host_info (&host, &os, &board); switch (os) { case _IMS_OS_DOS: case _IMS_OS_VMS: prefix_character = '/'; break; case _IMS_OS_HELIOS: case _IMS_OS_SUNOS: prefix_character = '-'; break; } } #else #include <assert.h> assert(0); #endif return (prefix_character); }
static void initCapabilities(void) { // Discover our CPU type mach_port_t host = mach_host_self(); mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; kern_return_t returnValue = host_info(host, HOST_BASIC_INFO, reinterpret_cast<host_info_t>(&gHostBasicInfo), &count); mach_port_deallocate(mach_task_self(), host); if (returnValue != KERN_SUCCESS) LOG_ERROR("%s : host_info(%d) : %s.\n", __FUNCTION__, returnValue, mach_error_string(returnValue)); }
void server_thread_init(void) { kern_return_t kr; static int first_time = 1; policy_rr_base_data_t rr_base; policy_rr_limit_data_t rr_limit; struct host_basic_info hbi; mach_msg_type_number_t count; if (first_time) first_time = 0; else panic("server_thread_init called again"); /* * Set the server's task scheduling to be POLICY_RR. */ rr_base.quantum = 10; /* XXX should be larger ? */ rr_base.base_priority = BASEPRI_SERVER; rr_limit.max_priority = BASEPRI_SERVER; kr = task_set_policy(mach_task_self(), default_processor_set, POLICY_RR, (policy_base_t) &rr_base, POLICY_RR_BASE_COUNT, (policy_limit_t) &rr_limit, POLICY_RR_LIMIT_COUNT, TRUE); if (kr != KERN_SUCCESS) { MACH3_DEBUG(3, kr, ("server_thread_init: task_set_policy")); } count = HOST_BASIC_INFO_COUNT; kr = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &count); if (kr != KERN_SUCCESS) { MACH3_DEBUG(1, kr, ("server_thread_init: host_info")); host_cpus = 1; } else { host_cpus = hbi.avail_cpus; } cpu_type = hbi.cpu_type; cpu_subtype = hbi.cpu_subtype; slot_name(hbi.cpu_type, hbi.cpu_subtype, &cpu_name, &cpu_subname); #if CONFIG_OSFMACH3_DEBUG printk("Using %d processor%s (type \"%s\" subtype \"%s\").\n", host_cpus, host_cpus > 1 ? "s" : "", cpu_name, cpu_subname); #endif /* CONFIG_OSFMACH3_DEBUG */ server_thread_set_kernel_limit(); }
int arm_mach_o_query_v6 () { host_basic_info_data_t info; mach_msg_type_number_t count; count = HOST_BASIC_INFO_COUNT; host_info (mach_host_self (), HOST_BASIC_INFO, (host_info_t) & info, &count); return (info.cpu_type == BFD_MACH_O_CPU_TYPE_ARM && info.cpu_subtype == BFD_MACH_O_CPU_SUBTYPE_ARM_6); }
__private_extern__ int chudxnu_phys_cpu_count(void) { host_basic_info_data_t hinfo; kern_return_t kr; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; kr = host_info(host_self(), HOST_BASIC_INFO, (integer_t *)&hinfo, &count); if(kr == KERN_SUCCESS) { return hinfo.max_cpus; } else { return 1; // fall back to 1, 0 doesn't make sense at all } }
/* this function is a lightly modified version of some code from Apple's developer homepage to detect G5 CPUs at runtime */ main() { host_basic_info_data_t hostInfo; mach_msg_type_number_t infoCount; boolean_t is_G5; infoCount = HOST_BASIC_INFO_COUNT; host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&hostInfo, &infoCount); is_G5 = ((hostInfo.cpu_type == CPU_TYPE_POWERPC) && (hostInfo.cpu_subtype == CPU_SUBTYPE_POWERPC_970)); if (is_G5) printf("1"); }
EXPORT void GetSystemInfo(LPSYSTEM_INFO lpSystemInfo){ host_name_port_t myhost; host_basic_info_data_t hinfo; vm_size_t page_size; mach_msg_type_number_t count; myhost = mach_host_self(); count = HOST_BASIC_INFO_COUNT; host_info(myhost, HOST_BASIC_INFO, (host_info_t) &hinfo, &count); host_page_size(myhost, &page_size); lpSystemInfo->dwPageSize = page_size; lpSystemInfo->dwNumberOfProcessors = hinfo.avail_cpus; return; }
size_t ncores() { #if EB_SYS_DARWIN host_basic_info_data_t info; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; kern_return_t r = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &count); eb_assert_or_recover(r == KERN_SUCCESS, return 0); eb_assert_or_recover(count == HOST_BASIC_INFO_COUNT, return 0); eb_assert_or_recover(info.logical_cpu > 0 && info.logical_cpu <= SIZE_MAX, return 0); return (size_t)info.logical_cpu; #elif EB_SYS_LINUX long ncores = sysconf(_SC_NPROCESSORS_ONLN); eb_assert_or_recover(ncores > 0 && ncores <= SIZE_MAX, return 0); return (size_t)ncores; #endif }
unsigned int GetTotalMemory() { kern_return_t ke = KERN_SUCCESS; mach_port_t host; struct host_basic_info hbi; mach_msg_type_number_t memsz; host = mach_host_self(); memsz = sizeof(hbi) / sizeof(integer_t); ke = host_info(host, HOST_BASIC_INFO, (host_info_t) &hbi, &memsz); if (ke != KERN_SUCCESS) { return 0; } return hbi.max_mem >> 20; }
static error_t rootdir_gc_vmstat (void *hook, char **contents, ssize_t *contents_len) { host_basic_info_data_t hbi; mach_msg_type_number_t cnt; struct vm_statistics vmstats; error_t err; err = vm_statistics (mach_task_self (), &vmstats); if (err) return EIO; cnt = HOST_BASIC_INFO_COUNT; err = host_info (mach_host_self (), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt); if (err) return err; assert (cnt == HOST_BASIC_INFO_COUNT); *contents_len = asprintf (contents, "nr_free_pages %lu\n" "nr_inactive_anon %lu\n" "nr_active_anon %lu\n" "nr_inactive_file %lu\n" "nr_active_file %lu\n" "nr_unevictable %lu\n" "nr_mlock %lu\n" "pgpgin %lu\n" "pgpgout %lu\n" "pgfault %lu\n", (long unsigned) vmstats.free_count, /* FIXME: how can we distinguish the anon/file pages? Maybe we can ask the default pager how many it manages? */ (long unsigned) vmstats.inactive_count, (long unsigned) vmstats.active_count, (long unsigned) 0, (long unsigned) 0, (long unsigned) vmstats.wire_count, (long unsigned) vmstats.wire_count, (long unsigned) vmstats.pageins, (long unsigned) vmstats.pageouts, (long unsigned) vmstats.faults); return 0; }
static error_t rootdir_gc_meminfo (void *hook, char **contents, ssize_t *contents_len) { host_basic_info_data_t hbi; mach_msg_type_number_t cnt; struct vm_statistics vmstats; default_pager_info_t swap; error_t err; err = vm_statistics (mach_task_self (), &vmstats); if (err) return EIO; cnt = HOST_BASIC_INFO_COUNT; err = host_info (mach_host_self (), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt); if (err) return err; err = get_swapinfo (&swap); if (err) return err; assert (cnt == HOST_BASIC_INFO_COUNT); *contents_len = asprintf (contents, "MemTotal: %14lu kB\n" "MemFree: %14lu kB\n" "Active: %14lu kB\n" "Inactive: %14lu kB\n" "Mlocked: %14lu kB\n" "SwapTotal:%14lu kB\n" "SwapFree: %14lu kB\n" , /* TODO: check that these are really 1024-bytes kBs. */ (long unsigned) hbi.memory_size / 1024, (long unsigned) vmstats.free_count * PAGE_SIZE / 1024, (long unsigned) vmstats.active_count * PAGE_SIZE / 1024, (long unsigned) vmstats.inactive_count * PAGE_SIZE / 1024, (long unsigned) vmstats.wire_count * PAGE_SIZE / 1024, (long unsigned) swap.dpi_total_space / 1024, (long unsigned) swap.dpi_free_space / 1024); return 0; }
int osd_num_processors(void) { int processors = 1; struct host_basic_info host_basic_info; unsigned int count; kern_return_t r; mach_port_t my_mach_host_self; count = HOST_BASIC_INFO_COUNT; my_mach_host_self = mach_host_self(); if ( ( r = host_info(my_mach_host_self, HOST_BASIC_INFO, (host_info_t)(&host_basic_info), &count)) == KERN_SUCCESS ) { processors = host_basic_info.avail_cpus; } mach_port_deallocate(mach_task_self(), my_mach_host_self); return processors; }
char *platform_get_nextopenstep_runtime_cpu(void) { kern_return_t ret; struct host_basic_info hi; unsigned int count = HOST_BASIC_INFO_COUNT; char *cpu_name = NULL; char *cpu_subname = NULL; if (!got_cpu) { ret = host_info(host_self(), HOST_BASIC_INFO, (host_info_t)&hi, &count); if (ret != KERN_SUCCESS) { sprintf(cpu, "Unknown CPU"); } else { slot_name(hi.cpu_type, hi.cpu_subtype, &cpu_name, &cpu_subname); sprintf(cpu, "%s (%s)", cpu_name, cpu_subname); } got_cpu = 1; } return cpu; }
void default_pager_set_policy( mach_port_t master_host_port) { host_priority_info_data_t host_pri_info; mach_port_t default_processor_set_name; mach_port_t default_processor_set; policy_rr_base_data_t rr_base; policy_rr_limit_data_t rr_limit; kern_return_t r; mach_msg_type_number_t count; static char here[] = "default_pager_set_policy"; count = HOST_PRIORITY_INFO_COUNT; r = host_info(mach_host_self(), HOST_PRIORITY_INFO, (host_info_t) &host_pri_info, &count); if (r != KERN_SUCCESS) dprintf(("Could not get host priority info. Error = 0x%x\n", r)); rr_base.quantum = 0; rr_base.base_priority = host_pri_info.system_priority; rr_limit.max_priority = host_pri_info.system_priority; (void)processor_set_default(mach_host_self(), &default_processor_set_name); (void)host_processor_set_priv(master_host_port, default_processor_set_name, &default_processor_set); r = task_set_policy(default_pager_self, default_processor_set, POLICY_RR, (policy_base_t) & rr_base, POLICY_RR_BASE_COUNT, (policy_limit_t) & rr_limit, POLICY_RR_LIMIT_COUNT, TRUE); if (r != KERN_SUCCESS) dprintf(("task_set_policy returned 0x%x %s\n", r, mach_error_string(r))); }
static error_t rootdir_gc_loadavg (void *hook, char **contents, ssize_t *contents_len) { host_load_info_data_t hli; mach_msg_type_number_t cnt; error_t err; cnt = HOST_LOAD_INFO_COUNT; err = host_info (mach_host_self (), HOST_LOAD_INFO, (host_info_t) &hli, &cnt); if (err) return err; assert (cnt == HOST_LOAD_INFO_COUNT); *contents_len = asprintf (contents, "%.2f %.2f %.2f 1/0 0\n", hli.avenrun[0] / (double) LOAD_SCALE, hli.avenrun[1] / (double) LOAD_SCALE, hli.avenrun[2] / (double) LOAD_SCALE); return 0; }
int osd_get_num_processors(void) { // JJG: Override this for mamehub to ensure consistency return 1; int processors = 1; struct host_basic_info host_basic_info; unsigned int count; kern_return_t r; mach_port_t my_mach_host_self; count = HOST_BASIC_INFO_COUNT; my_mach_host_self = mach_host_self(); if ( ( r = host_info(my_mach_host_self, HOST_BASIC_INFO, (host_info_t)(&host_basic_info), &count)) == KERN_SUCCESS ) { processors = host_basic_info.avail_cpus; } mach_port_deallocate(mach_task_self(), my_mach_host_self); return processors; }
/* * Make another Mach call and get the hardware information. */ void detectHardware(void) { kern_return_t kret; struct host_basic_info kbi; unsigned int count = HOST_BASIC_INFO_COUNT; char *pCpuType, *pCpuSubtype; kret = host_info(host_self(), HOST_BASIC_INFO, (host_info_t)&kbi, &count); if (kret != KERN_SUCCESS) { mach_error("host_info() failed.", kret); } else { slot_name(kbi.cpu_type, /* Architecture */ kbi.cpu_subtype, /* Processor */ &pCpuType, &pCpuSubtype); bcopy(pCpuType, machine, 32); bcopy(pCpuSubtype, processor, 32); } }
/* * _dyld_init() is the start off point for the dynamic link editor. It is * called before any part of an executable program runs. This is done either * in the executable runtime startoff or by the kernel as a result of an exec(2) * system call (which goes through __dyld_start to get here). * * This routine causes the dynamic shared libraries an executable uses to be * mapped, sets up the executable and the libraries to call the dynamic link * editor when a lazy reference to a symbol is first used, resolves all non-lazy * symbol references needed to start running the program and then returns to * the executable program to start up the program. */ unsigned long _dyld_init( struct mach_header *mh, unsigned long argc, char **argv, char **envp) { unsigned int count; kern_return_t r; unsigned long entry_point; mach_port_t my_mach_host_self; #ifndef __MACH30__ struct section *s; #endif #ifdef MALLOC_DEBUG extern void cthread_init(void); cthread_init(); #endif /* set lock for dyld data structures */ set_lock(); /* * Get the cputype and cpusubtype of the machine we're running on. */ count = HOST_BASIC_INFO_COUNT; my_mach_host_self = mach_host_self(); if((r = host_info(my_mach_host_self, HOST_BASIC_INFO, (host_info_t) (&host_basic_info), &count)) != KERN_SUCCESS){ mach_port_deallocate(mach_task_self(), my_mach_host_self); mach_error(r, "can't get host basic info"); } mach_port_deallocate(mach_task_self(), my_mach_host_self); #if defined(__GONZO_BUNSEN_BEAKER__) && defined(__ppc__) if(host_basic_info.cpu_type == CPU_TYPE_POWERPC && (host_basic_info.cpu_subtype == CPU_SUBTYPE_POWERPC_7400 || host_basic_info.cpu_subtype == CPU_SUBTYPE_POWERPC_7450 || host_basic_info.cpu_subtype == CPU_SUBTYPE_POWERPC_970)) processor_has_vec = TRUE; #endif /* * Pickup the environment variables for the dynamic link editor. */ pickup_environment_variables(envp); /* * Make initial trace entry if requested. */ DYLD_TRACE_INIT_START(0); /* * Create the executable's path from the exec_path and the current * working directory (if needed). If we did not pick up the exec_path * (we are running with an old kernel) use argv[0] if has a slash in it * as it is a path relative to the current working directory. Of course * argv[0] may not have anything to do with the filename being executed * in all cases but it is likely to be right. */ if(exec_path != NULL) create_executables_path(exec_path); else if(strchr(argv[0], '/') != NULL) create_executables_path(argv[0]); if(dyld_executable_path_debug == TRUE) printf("executables_path = %s\n", executables_path == NULL ? "NULL" : executables_path); #ifdef DYLD_PROFILING s = (struct section *) getsectbynamefromheader( &_mh_dylinker_header, SEG_TEXT, SECT_TEXT); monstartup((char *)(s->addr + dyld_image_vmaddr_slide), (char *)(s->addr + dyld_image_vmaddr_slide + s->size)); #endif #ifndef __MACH30__ /* * See if the profile server for shared pcsample buffers exists. * Then if so try to setup a pcsample buffer for dyld itself. */ profile_server = profile_server_exists(); if(profile_server == TRUE){ s = (struct section *) getsectbynamefromheader( &_mh_dylinker_header, SEG_TEXT, SECT_TEXT); shared_pcsample_buffer("/usr/lib/dyld", s, dyld_image_vmaddr_slide); } #endif /* __MACH30__ */ /* * Start off by loading the executable image as the first object image * that make up the program. This in turn will load the dynamic shared * libraries the executable uses and the libraries those libraries use * to the list of library images that make up the program. */ if((mh->flags & MH_FORCE_FLAT) != 0 || dyld_force_flat_namespace == TRUE) force_flat_namespace = TRUE; if((mh->flags & MH_NOFIXPREBINDING) == MH_NOFIXPREBINDING) dyld_no_fix_prebinding = TRUE; executable_prebound = (mh->flags & MH_PREBOUND) == MH_PREBOUND; load_executable_image(argv[0], mh, &entry_point); /* * If the prebinding set is still set then try to setup this program to * use the prebound state in it's images. If any of these fail then * undo any prebinding and bind as usual. */ if((mh->flags & MH_PREBOUND) != MH_PREBOUND){ /* * The executable is not prebound but if the libraries are setup * for prebinding and the executable when built had no undefined * symbols then try to use the prebound libraries. This is for * the flat namespace case (and only some sub cases, see the * comments in try_to_use_prebound_libraries()). If this fails * then the two-level namespace cases are handled by the routine * find_twolevel_prebound_lib_subtrees() which is called below. */ if(prebinding == TRUE){ if((mh->flags & MH_NOUNDEFS) == MH_NOUNDEFS){ try_to_use_prebound_libraries(); } else{ if(dyld_prebind_debug != 0) print("dyld: %s: prebinding disabled because " "executable not marked with MH_NOUNDEFS\n", argv[0]); prebinding = FALSE; } } } else if(prebinding == TRUE){ set_images_to_prebound(); } if(prebinding == FALSE){ /* * The program was not fully prebound but if we are not forcing * flat namespace semantics we can still use any sub trees of * libraries that are all two-level namespace and prebound. */ if(force_flat_namespace == FALSE) find_twolevel_prebound_lib_subtrees(); /* * First undo any images that were prebound. */ undo_prebound_images(FALSE); /* * Build the initial list of non-lazy symbol references based on the * executable. */ if((mh->flags & MH_BINDATLOAD) != 0 || dyld_bind_at_launch == TRUE) executable_bind_at_load = TRUE; setup_initial_undefined_list(FALSE); /* * With the undefined list set up link in the needed modules. */ link_in_need_modules(FALSE, FALSE, NULL); } else{ if(dyld_prebind_debug != 0){ if((mh->flags & MH_PREBOUND) != MH_PREBOUND) print("dyld: %s: prebinding enabled using only prebound " "libraries\n", argv[0]); else print("dyld: %s: prebinding enabled\n", argv[0]); } } /* * Now with the program about to be launched set * all_twolevel_modules_prebound to TRUE if all libraries are two-level, * prebound and all modules in them are linked. */ set_all_twolevel_modules_prebound(); launched = TRUE; /* * If DYLD_EBADEXEC_ONLY is set then print a message as the program * will launch. */ if(dyld_ebadexec_only == TRUE){ error("executable: %s will be launched (DYLD_EBADEXEC_ONLY set, " "program not started)", argv[0]); link_edit_error(DYLD_FILE_ACCESS, EBADEXEC, argv[0]); } if(dyld_print_libraries_post_launch == TRUE) dyld_print_libraries = TRUE; /* release lock for dyld data structures */ release_lock(); DYLD_TRACE_INIT_END(0); /* * Return the address of the executable's entry point which is used if * this routine was called from __dyld_start. Otherwise this was called * from the runtime startoff of the executable and this return value is * ignored. */ return(entry_point); }
/* * get_arch_from_host() gets the architecture from the host this is running on * and returns zero if the architecture is not known and zero if the * architecture is known. If the parameters family_arch_flag and * specific_arch_flag are not NULL they get fill in with the family * architecture and specific architecure for the host. If the architecture * is unknown and the parameters are not NULL then all fields are set to zero. */ __private_extern__ int get_arch_from_host( struct arch_flag *family_arch_flag, struct arch_flag *specific_arch_flag) { struct host_basic_info host_basic_info; unsigned int count; kern_return_t r; mach_port_t my_mach_host_self; if(family_arch_flag != NULL) memset(family_arch_flag, '\0', sizeof(struct arch_flag)); if(specific_arch_flag != NULL) memset(specific_arch_flag, '\0', sizeof(struct arch_flag)); count = HOST_BASIC_INFO_COUNT; my_mach_host_self = mach_host_self(); if((r = host_info(my_mach_host_self, HOST_BASIC_INFO, (host_info_t)(&host_basic_info), &count)) != KERN_SUCCESS) { mach_port_deallocate(mach_task_self(), my_mach_host_self); return(0); } mach_port_deallocate(mach_task_self(), my_mach_host_self); if(family_arch_flag != NULL) { family_arch_flag->cputype = host_basic_info.cpu_type; } if(specific_arch_flag != NULL) { specific_arch_flag->cputype = host_basic_info.cpu_type; specific_arch_flag->cpusubtype = host_basic_info.cpu_subtype; } switch(host_basic_info.cpu_type) { case CPU_TYPE_MC680x0: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_MC680x0_ALL: case CPU_SUBTYPE_MC68030_ONLY: if(family_arch_flag != NULL) { family_arch_flag->name = "m68k"; family_arch_flag->cpusubtype = CPU_SUBTYPE_MC680x0_ALL; } if(specific_arch_flag != NULL) { specific_arch_flag->name = "m68030"; /* * There is a "bug" in the kernel for compatiblity that on * an 030 machine host_info() returns cpusubtype * CPU_SUBTYPE_MC680x0_ALL and not CPU_SUBTYPE_MC68030_ONLY. */ specific_arch_flag->cpusubtype = CPU_SUBTYPE_MC68030_ONLY; } return(1); case CPU_SUBTYPE_MC68040: if(family_arch_flag != NULL) { family_arch_flag->name = "m68k"; family_arch_flag->cpusubtype = CPU_SUBTYPE_MC680x0_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "m68040"; return(1); } break; case CPU_TYPE_POWERPC: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_POWERPC_ALL: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc"; return(1); case CPU_SUBTYPE_POWERPC_601: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc601"; return(1); case CPU_SUBTYPE_POWERPC_603: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc603"; return(1); case CPU_SUBTYPE_POWERPC_603e: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc603e"; return(1); case CPU_SUBTYPE_POWERPC_603ev: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc603ev"; return(1); case CPU_SUBTYPE_POWERPC_604: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc604"; return(1); case CPU_SUBTYPE_POWERPC_604e: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc604e"; return(1); case CPU_SUBTYPE_POWERPC_750: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc750"; return(1); case CPU_SUBTYPE_POWERPC_7400: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc7400"; return(1); case CPU_SUBTYPE_POWERPC_7450: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc7450"; return(1); case CPU_SUBTYPE_POWERPC_970: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "ppc970"; return(1); default: if(family_arch_flag != NULL) { family_arch_flag->name = "ppc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_POWERPC_ALL; } if(specific_arch_flag != NULL) { specific_arch_flag->name = savestr("PowerPC cpusubtype 1234567890"); if(specific_arch_flag->name != NULL) sprintf(specific_arch_flag->name, "PowerPC cpusubtype %u", host_basic_info.cpu_subtype); } return(1); } break; case CPU_TYPE_VEO: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_VEO_1: if(family_arch_flag != NULL) { family_arch_flag->name = "veo"; family_arch_flag->cpusubtype = CPU_SUBTYPE_VEO_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "veo1"; return(1); case CPU_SUBTYPE_VEO_2: if(family_arch_flag != NULL) { family_arch_flag->name = "veo"; family_arch_flag->cpusubtype = CPU_SUBTYPE_VEO_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "veo2"; return(1); case CPU_SUBTYPE_VEO_3: if(family_arch_flag != NULL) { family_arch_flag->name = "veo"; family_arch_flag->cpusubtype = CPU_SUBTYPE_VEO_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "veo3"; return(1); case CPU_SUBTYPE_VEO_4: if(family_arch_flag != NULL) { family_arch_flag->name = "veo"; family_arch_flag->cpusubtype = CPU_SUBTYPE_VEO_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "veo4"; return(1); default: if(family_arch_flag != NULL) { family_arch_flag->name = "veo"; family_arch_flag->cpusubtype = CPU_SUBTYPE_VEO_ALL; } if(specific_arch_flag != NULL) { specific_arch_flag->name = savestr("VEO cpusubtype 1234567890"); sprintf(specific_arch_flag->name, "VEO cpusubtype %u", host_basic_info.cpu_subtype); } return(1); } break; case CPU_TYPE_MC88000: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_MC88000_ALL: case CPU_SUBTYPE_MC88110: if(family_arch_flag != NULL) { family_arch_flag->name = "m88k"; family_arch_flag->cpusubtype = CPU_SUBTYPE_MC88000_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "m88k"; return(1); } break; case CPU_TYPE_I386: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_I386_ALL: /* case CPU_SUBTYPE_386: same value as above */ if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "i386"; return(1); case CPU_SUBTYPE_486: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "i486"; return(1); case CPU_SUBTYPE_486SX: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "i486SX"; return(1); case CPU_SUBTYPE_PENT: /* same as CPU_SUBTYPE_586 */ if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "pentium"; return(1); case CPU_SUBTYPE_PENTPRO: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "pentpro"; return(1); case CPU_SUBTYPE_PENTII_M3: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "pentIIm3"; return(1); case CPU_SUBTYPE_PENTII_M5: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "pentIIm5"; return(1); case CPU_SUBTYPE_PENTIUM_4: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "pentium4"; return(1); default: if(family_arch_flag != NULL) { family_arch_flag->name = "i386"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I386_ALL; } if(specific_arch_flag != NULL) { specific_arch_flag->name = savestr("Intel family 12 model 12345678"); if(specific_arch_flag->name != NULL) sprintf(specific_arch_flag->name, "Intel family %u model %u", CPU_SUBTYPE_INTEL_FAMILY(host_basic_info.cpu_subtype), CPU_SUBTYPE_INTEL_MODEL(host_basic_info.cpu_subtype)); } return(1); } break; case CPU_TYPE_I860: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_I860_ALL: case CPU_SUBTYPE_I860_860: if(family_arch_flag != NULL) { family_arch_flag->name = "i860"; family_arch_flag->cpusubtype = CPU_SUBTYPE_I860_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "i860"; return(1); } break; case CPU_TYPE_HPPA: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_HPPA_ALL: if(family_arch_flag != NULL) { family_arch_flag->name = "hppa"; family_arch_flag->cpusubtype = CPU_SUBTYPE_HPPA_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "hppa"; return(1); case CPU_SUBTYPE_HPPA_7100LC: if(family_arch_flag != NULL) { family_arch_flag->name = "hppa"; family_arch_flag->cpusubtype = CPU_SUBTYPE_HPPA_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "hppa7100LC"; return(1); } break; case CPU_TYPE_SPARC: switch(host_basic_info.cpu_subtype) { case /*CPU_SUBTYPE_SPARC_ALL*/0: if(family_arch_flag != NULL) { family_arch_flag->name = "sparc"; family_arch_flag->cpusubtype = CPU_SUBTYPE_SPARC_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "sparc"; return(1); } break; case CPU_TYPE_ARM: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_ARM_ALL: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "arm"; return(1); case CPU_SUBTYPE_ARM_V4T: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv4t"; return(1); case CPU_SUBTYPE_ARM_V5TEJ: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv5"; return(1); case CPU_SUBTYPE_ARM_XSCALE: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "xscale"; return(1); case CPU_SUBTYPE_ARM_V6: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv6"; return(1); case CPU_SUBTYPE_ARM_V6M: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv6m"; return(1); case CPU_SUBTYPE_ARM_V7: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv7"; return(1); case CPU_SUBTYPE_ARM_V7F: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv7f"; return(1); case CPU_SUBTYPE_ARM_V7S: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv7s"; return(1); case CPU_SUBTYPE_ARM_V7K: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv7k"; return(1); case CPU_SUBTYPE_ARM_V7M: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv7m"; return(1); case CPU_SUBTYPE_ARM_V7EM: if(family_arch_flag != NULL) { family_arch_flag->name = "arm"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "armv7em"; return(1); } break; case CPU_TYPE_ARM64: switch(host_basic_info.cpu_subtype) { case CPU_SUBTYPE_ARM64_ALL: if(family_arch_flag != NULL) { family_arch_flag->name = "arm64"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM64_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "arm64"; return(1); case CPU_SUBTYPE_ARM64_V8: if(family_arch_flag != NULL) { family_arch_flag->name = "arm64"; family_arch_flag->cpusubtype = CPU_SUBTYPE_ARM64_ALL; } if(specific_arch_flag != NULL) specific_arch_flag->name = "arm64v8"; return(1); } break; } return(0); }
/* ** PR_GetPhysicalMemorySize() ** ** Implementation notes: ** Every platform does it a bit different. ** bytes is the returned value. ** for each platform's "if defined" section ** declare your local variable ** do your thing, assign to bytes. ** */ PR_IMPLEMENT(PRUint64) PR_GetPhysicalMemorySize(void) { PRUint64 bytes = 0; #if defined(LINUX) || defined(SOLARIS) long pageSize = sysconf(_SC_PAGESIZE); long pageCount = sysconf(_SC_PHYS_PAGES); bytes = (PRUint64) pageSize * pageCount; #elif defined(HPUX) struct pst_static info; int result = pstat_getstatic(&info, sizeof(info), 1, 0); if (result == 1) bytes = (PRUint64) info.physical_memory * info.page_size; #elif defined(DARWIN) struct host_basic_info hInfo; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; int result = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hInfo, &count); if (result == KERN_SUCCESS) bytes = hInfo.max_mem; #elif defined(WIN32) /* Try to use the newer GlobalMemoryStatusEx API for Windows 2000+. */ GlobalMemoryStatusExFn globalMemory = (GlobalMemoryStatusExFn) NULL; HMODULE module = GetModuleHandleW(L"kernel32.dll"); if (module) { globalMemory = (GlobalMemoryStatusExFn)GetProcAddress(module, "GlobalMemoryStatusEx"); if (globalMemory) { PR_MEMORYSTATUSEX memStat; memStat.dwLength = sizeof(memStat); if (globalMemory(&memStat)) bytes = memStat.ullTotalPhys; } } if (!bytes) { /* Fall back to the older API. */ MEMORYSTATUS memStat; memset(&memStat, 0, sizeof(memStat)); GlobalMemoryStatus(&memStat); bytes = memStat.dwTotalPhys; } #elif defined(OS2) ULONG ulPhysMem; DosQuerySysInfo(QSV_TOTPHYSMEM, QSV_TOTPHYSMEM, &ulPhysMem, sizeof(ulPhysMem)); bytes = ulPhysMem; #elif defined(AIX) if (odm_initialize() == 0) { int how_many; struct CuAt *obj = getattr("sys0", "realmem", 0, &how_many); if (obj != NULL) { bytes = (PRUint64) atoi(obj->value) * 1024; free(obj); } odm_terminate(); } #else PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); #endif return bytes; } /* end PR_GetPhysicalMemorySize() */
/* ** PR_GetPhysicalMemorySize() ** ** Implementation notes: ** Every platform does it a bit different. ** bytes is the returned value. ** for each platform's "if defined" section ** declare your local variable ** do your thing, assign to bytes. ** */ PR_IMPLEMENT(PRUint64) PR_GetPhysicalMemorySize(void) { PRUint64 bytes = 0; #if defined(LINUX) || defined(SOLARIS) long pageSize = sysconf(_SC_PAGESIZE); long pageCount = sysconf(_SC_PHYS_PAGES); if (pageSize >= 0 && pageCount >= 0) bytes = (PRUint64) pageSize * pageCount; #elif defined(NETBSD) || defined(OPENBSD) int mib[2]; int rc; uint64_t memSize; size_t len = sizeof(memSize); mib[0] = CTL_HW; mib[1] = HW_PHYSMEM64; rc = sysctl(mib, 2, &memSize, &len, NULL, 0); if (-1 != rc) { bytes = memSize; } #elif defined(HPUX) struct pst_static info; int result = pstat_getstatic(&info, sizeof(info), 1, 0); if (result == 1) bytes = (PRUint64) info.physical_memory * info.page_size; #elif defined(DARWIN) mach_port_t mach_host = mach_host_self(); struct host_basic_info hInfo; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; int result = host_info(mach_host, HOST_BASIC_INFO, (host_info_t) &hInfo, &count); mach_port_deallocate(mach_task_self(), mach_host); if (result == KERN_SUCCESS) bytes = hInfo.max_mem; #elif defined(WIN32) MEMORYSTATUSEX memStat; memStat.dwLength = sizeof(memStat); if (GlobalMemoryStatusEx(&memStat)) bytes = memStat.ullTotalPhys; #elif defined(OS2) ULONG ulPhysMem; DosQuerySysInfo(QSV_TOTPHYSMEM, QSV_TOTPHYSMEM, &ulPhysMem, sizeof(ulPhysMem)); bytes = ulPhysMem; #elif defined(AIX) if (odm_initialize() == 0) { int how_many; struct CuAt *obj = getattr("sys0", "realmem", 0, &how_many); if (obj != NULL) { bytes = (PRUint64) atoi(obj->value) * 1024; free(obj); } odm_terminate(); } #else PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0); #endif return bytes; } /* end PR_GetPhysicalMemorySize() */
/* * Supporting some variables requires us to do "real" work. We * gather some of that here. */ static int sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req) { char dummy[65]; int epochTemp; ml_cpu_info_t cpu_info; int val, doquad; long long qval; host_basic_info_data_t hinfo; kern_return_t kret; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; /* * Test and mask off the 'return quad' flag. * Note that only some things here support it. */ doquad = arg2 & CTLHW_RETQUAD; arg2 &= ~CTLHW_RETQUAD; ml_cpu_get_info(&cpu_info); #define BSD_HOST 1 kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); /* * Handle various OIDs. * * OIDs that can return int or quad set val and qval and then break. * Errors and int-only values return inline. */ switch (arg2) { case HW_NCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.max_cpus)); } else { return(EINVAL); } case HW_AVAILCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.avail_cpus)); } else { return(EINVAL); } case HW_LOCAL_PHYSICALCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.physical_cpu)); } else { return(EINVAL); } case HW_LOCAL_PHYSICALCPUMAX: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.physical_cpu_max)); } else { return(EINVAL); } case HW_LOCAL_LOGICALCPU: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.logical_cpu)); } else { return(EINVAL); } case HW_LOCAL_LOGICALCPUMAX: if (kret == KERN_SUCCESS) { return(SYSCTL_RETURN(req, hinfo.logical_cpu_max)); } else { return(EINVAL); } case HW_PAGESIZE: { vm_map_t map = get_task_map(current_task()); val = vm_map_page_size(map); qval = (long long)val; break; } case HW_CACHELINE: val = cpu_info.cache_line_size; qval = (long long)val; break; case HW_L1ICACHESIZE: val = cpu_info.l1_icache_size; qval = (long long)val; break; case HW_L1DCACHESIZE: val = cpu_info.l1_dcache_size; qval = (long long)val; break; case HW_L2CACHESIZE: if (cpu_info.l2_cache_size == 0xFFFFFFFF) return(EINVAL); val = cpu_info.l2_cache_size; qval = (long long)val; break; case HW_L3CACHESIZE: if (cpu_info.l3_cache_size == 0xFFFFFFFF) return(EINVAL); val = cpu_info.l3_cache_size; qval = (long long)val; break; /* * Deprecated variables. We still support these for * backwards compatibility purposes only. */ case HW_MACHINE: bzero(dummy, sizeof(dummy)); if(!PEGetMachineName(dummy,64)) return(EINVAL); dummy[64] = 0; return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1)); case HW_MODEL: bzero(dummy, sizeof(dummy)); if(!PEGetModelName(dummy,64)) return(EINVAL); dummy[64] = 0; return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1)); case HW_USERMEM: { int usermem = mem_size - vm_page_wire_count * page_size; return(SYSCTL_RETURN(req, usermem)); } case HW_EPOCH: epochTemp = PEGetPlatformEpoch(); if (epochTemp == -1) return(EINVAL); return(SYSCTL_RETURN(req, epochTemp)); case HW_VECTORUNIT: { int vector = cpu_info.vector_unit == 0? 0 : 1; return(SYSCTL_RETURN(req, vector)); } case HW_L2SETTINGS: if (cpu_info.l2_cache_size == 0xFFFFFFFF) return(EINVAL); return(SYSCTL_RETURN(req, cpu_info.l2_settings)); case HW_L3SETTINGS: if (cpu_info.l3_cache_size == 0xFFFFFFFF) return(EINVAL); return(SYSCTL_RETURN(req, cpu_info.l3_settings)); default: return(ENOTSUP); } /* * Callers may come to us with either int or quad buffers. */ if (doquad) { return(SYSCTL_RETURN(req, qval)); } return(SYSCTL_RETURN(req, val)); }
static int proc_readosfmach3( struct inode *inode, struct file *file, char *buf, int count) { char * page; int length; unsigned int ino; kern_return_t kr; if (count < 0) return -EINVAL; if (!(page = (char*) __get_free_page(GFP_KERNEL))) return -ENOMEM; ino = inode->i_ino; switch (ino) { case PROC_OSFMACH3_VERSION: kr = host_kernel_version(host_port, page); if (kr != KERN_SUCCESS) { MACH3_DEBUG(2, kr, ("proc_readosfmach3: host_kernel_version")); } length = strlen(page); break; case PROC_OSFMACH3_VM_STATISTICS: { extern struct vm_statistics osfmach3_vm_stats; osfmach3_update_vm_info(); length = sprintf(page, "free_count : %d\n" "active_count : %d\n" "inactive_count : %d\n" "wire_count : %d\n" "zero_fill_count : %d\n" "reactivations : %d\n" "pageins : %d\n" "pageouts : %d\n" "faults : %d\n" "cow_faults : %d\n" "lookups : %d\n" "hits : %d\n", osfmach3_vm_stats.free_count, osfmach3_vm_stats.active_count, osfmach3_vm_stats.inactive_count, osfmach3_vm_stats.wire_count, osfmach3_vm_stats.zero_fill_count, osfmach3_vm_stats.reactivations, osfmach3_vm_stats.pageins, osfmach3_vm_stats.pageouts, osfmach3_vm_stats.faults, osfmach3_vm_stats.cow_faults, osfmach3_vm_stats.lookups, osfmach3_vm_stats.hits); break; } case PROC_OSFMACH3_HOST_SCHED_INFO: { struct host_sched_info sched_info; int count = sizeof(sched_info); kr = host_info(host_port, HOST_SCHED_INFO, (host_info_t)&sched_info, &count); if (kr != KERN_SUCCESS) { MACH3_DEBUG(2, kr, ("proc_readosfmach3: host_info")); } length = sprintf(page, "min_timeout : %d ms\n" "min_quantum : %d ms\n", sched_info.min_timeout, sched_info.min_quantum); break; } case PROC_OSFMACH3_HOST_BASIC_INFO: { struct host_basic_info basic_info; int count = sizeof(basic_info); kr = host_info(host_port, HOST_BASIC_INFO, (host_info_t)&basic_info, &count); if (kr != KERN_SUCCESS) { MACH3_DEBUG(2, kr, ("proc_readosfmach3: host_info")); } length = sprintf(page, "max_cpus : %d\n" "avail_cpus : %d\n" "memory_size : %d Mb\n", basic_info.max_cpus, basic_info.avail_cpus, basic_info.memory_size / (1024 * 1024)); break; } default: free_page((unsigned long) page); return -EBADF; } if (file->f_pos >= length) { free_page((unsigned long) page); return 0; } if (count + file->f_pos > length) count = length - file->f_pos; /* * Copy the bytes */ memcpy_tofs(buf, page + file->f_pos, count); free_page((unsigned long) page); file->f_pos += count; /* Move down the file */ return count; /* all transfered correctly */ }
int main(int argc, char *argv[]) { kern_return_t ret; unsigned int size, count; char *cpu_name, *cpu_subname; int i; int mib[2]; size_t len; uint64_t memsize; processor_set_name_port_t default_pset; host_name_port_t host; struct processor_set_basic_info basic_info; struct processor_set_load_info load_info; host = mach_host_self(); ret = host_kernel_version(host, version); if (ret != KERN_SUCCESS) { mach_error(argv[0], ret); exit(EXIT_FAILURE); } printf("Mach kernel version:\n\t %s\n", version); size = sizeof(hi)/sizeof(int); ret = host_info(host, HOST_BASIC_INFO, (host_info_t)&hi, &size); if (ret != KERN_SUCCESS) { mach_error(argv[0], ret); exit(EXIT_FAILURE); } ret = processor_set_default(host, &default_pset); if (ret != KERN_SUCCESS) { mach_error(argv[0], ret); exit(EXIT_FAILURE); } count = PROCESSOR_SET_BASIC_INFO_COUNT; ret = processor_set_info(default_pset, PROCESSOR_SET_BASIC_INFO, &host, (processor_set_info_t)&basic_info, &count); if (ret != KERN_SUCCESS) { mach_error(argv[0], ret); exit(EXIT_FAILURE); } count = PROCESSOR_SET_LOAD_INFO_COUNT; ret = processor_set_statistics(default_pset, PROCESSOR_SET_LOAD_INFO, (processor_set_info_t)&load_info, &count); if (ret != KERN_SUCCESS) { mach_error(argv[0], ret); exit(EXIT_FAILURE); } mib[0] = CTL_HW; mib[1] = HW_MEMSIZE; len = sizeof(memsize); memsize = 0L; if(sysctl(mib, 2, &memsize, &len, NULL, 0 ) == -1) { perror("sysctl"); exit(EXIT_FAILURE); } if (hi.max_cpus > 1) printf("Kernel configured for up to %d processors.\n", hi.max_cpus); else printf("Kernel configured for a single processor only.\n"); printf("%d processor%s physically available.\n", hi.physical_cpu, (hi.physical_cpu > 1) ? "s are" : " is"); printf("%d processor%s logically available.\n", hi.logical_cpu, (hi.logical_cpu > 1) ? "s are" : " is"); printf("Processor type:"); slot_name(hi.cpu_type, hi.cpu_subtype, &cpu_name, &cpu_subname); printf(" %s (%s)\n", cpu_name, cpu_subname); printf("Processor%s active:", (hi.avail_cpus > 1) ? "s" : ""); for (i = 0; i < hi.avail_cpus; i++) printf(" %d", i); printf("\n"); if (((float)memsize / (1024.0 * 1024.0)) >= 1024.0) printf("Primary memory available: %.2f gigabytes\n", (float)memsize/(1024.0*1024.0*1024.0)); else printf("Primary memory available: %.2f megabytes\n", (float)memsize/(1024.0*1024.0)); printf("Default processor set: %d tasks, %d threads, %d processors\n", load_info.task_count, load_info.thread_count, basic_info.processor_count); printf("Load average: %d.%02d, Mach factor: %d.%02d\n", load_info.load_average/LOAD_SCALE, (load_info.load_average%LOAD_SCALE)/10, load_info.mach_factor/LOAD_SCALE, (load_info.mach_factor%LOAD_SCALE)/10); exit(0); }