/** * @brief retrieves the cpu usage statistics for the domain * @return true if the cpu statistic was retrieved without error, otherwise false */ bool Domain::cpustats() { unsigned int nparams = cpu_count(); virTypedParameterPtr params = static_cast<virTypedParameterPtr>( calloc(nparams, sizeof(virTypedParameter))); int start_cpu = 0; unsigned int ncpus = cpu_count(); unsigned int flags = 0; return (virDomainGetCPUStats(m_domain, params, nparams, start_cpu, ncpus, flags) == 0); }
void init(OptionsMap& o) { int cpus = std::min(cpu_count(), MAX_THREADS); int msd = cpus < 8 ? 4 : 7; o["Use Debug Log"] = Option(false, on_logger); o["Use Search Log"] = Option(false); o["Search Log Filename"] = Option("SearchLog.txt"); o["Book File"] = Option("book.bin"); o["Best Book Move"] = Option(false); o["Contempt Factor"] = Option(0, -50, 50); o["Mobility (Middle Game)"] = Option(100, 0, 200, on_eval); o["Mobility (Endgame)"] = Option(100, 0, 200, on_eval); o["Passed Pawns (Middle Game)"] = Option(100, 0, 200, on_eval); o["Passed Pawns (Endgame)"] = Option(100, 0, 200, on_eval); o["Space"] = Option(100, 0, 200, on_eval); o["Min Split Depth"] = Option(msd, 4, 7, on_threads); o["Max Threads per Split Point"] = Option(5, 4, 8, on_threads); o["Threads"] = Option(cpus, 1, MAX_THREADS, on_threads); o["Use Sleeping Threads"] = Option(true, on_threads); o["Hash"] = Option(32, 4, 8192, on_hash_size); o["Clear Hash"] = Option(on_clear_hash); o["Ponder"] = Option(true); o["OwnBook"] = Option(false); o["MultiPV"] = Option(1, 1, 500); o["Skill Level"] = Option(20, 0, 20); o["Emergency Move Horizon"] = Option(40, 0, 50); o["Emergency Base Time"] = Option(200, 0, 30000); o["Emergency Move Time"] = Option(70, 0, 5000); o["Minimum Thinking Time"] = Option(20, 0, 5000); o["Slow Mover"] = Option(100, 10, 1000); o["UCI_Chess960"] = Option(false); o["UCI_AnalyseMode"] = Option(false, on_eval); }
/*{{{ void ccsp_start_threads (void)*/ void ccsp_start_threads (void) { int cpus = cpu_count (); /* start cpus - 1 threads (as one is already running) */ while (--cpus) { ccsp_new_thread (); } }
int main(int ac, char **av) { int nerr = 0; int ncpus; int i; if (ac > 1) loops = atol(av[1]); if (ac > 2) sec = atol(av[2]); if (ac > 3) threshold = atol(av[3]); smp_init(); ncpus = cpu_count(); if (ncpus > MAX_CPU) ncpus = MAX_CPU; for (i = 0; i < ncpus; ++i) on_cpu(i, kvm_clock_init, (void *)0); if (ac > 2) { printf("Wallclock test, threshold %ld\n", threshold); printf("Seconds get from host: %ld\n", sec); for (i = 0; i < ncpus; ++i) on_cpu(i, wallclock_test, &nerr); } printf("Check the stability of raw cycle ...\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT | PVCLOCK_RAW_CYCLE_BIT); if (cycle_test(ncpus, 1, &ti[0])) printf("Raw cycle is not stable\n"); else printf("Raw cycle is stable\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); printf("Monotonic cycle test:\n"); nerr += cycle_test(ncpus, 1, &ti[1]); printf("Measure the performance of raw cycle ...\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT | PVCLOCK_RAW_CYCLE_BIT); cycle_test(ncpus, 0, &ti[2]); printf("Measure the performance of adjusted cycle ...\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); cycle_test(ncpus, 0, &ti[3]); for (i = 0; i < ncpus; ++i) on_cpu(i, kvm_clock_clear, (void *)0); return nerr > 0 ? 1 : 0; }
int main() { //initialize number of cores long numb_cores = -1; numb_cores = cpu_count(); printf("%ld processors present\n",numb_cores); // if ( mthread(numb_cores) ) };
static int thread_count() { char *e = getenv("STRM_THREAD_MAX"); int n; if (e) { n = atoi(e); if (n > 0) return n; } return cpu_count(); }
int main(int ac, char **av) { int ncpus; int nerr = 0, i; long loops = DEFAULT_TEST_LOOPS; long sec = 0; long threshold = DEFAULT_THRESHOLD; if (ac > 1) loops = atol(av[1]); if (ac > 2) sec = atol(av[2]); if (ac > 3) threshold = atol(av[3]); smp_init(); ncpus = cpu_count(); if (ncpus > MAX_CPU) ncpus = MAX_CPU; for (i = 0; i < ncpus; ++i) on_cpu(i, kvm_clock_init, (void *)0); if (ac > 2) nerr += wallclock_test(sec, threshold); printf("Check the stability of raw cycle ...\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT | PVCLOCK_RAW_CYCLE_BIT); if (cycle_test(ncpus, loops, 1, &ti[0])) printf("Raw cycle is not stable\n"); else printf("Raw cycle is stable\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); printf("Monotonic cycle test:\n"); nerr += cycle_test(ncpus, loops, 1, &ti[1]); printf("Measure the performance of raw cycle ...\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT | PVCLOCK_RAW_CYCLE_BIT); cycle_test(ncpus, loops, 0, &ti[2]); printf("Measure the performance of adjusted cycle ...\n"); pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); cycle_test(ncpus, loops, 0, &ti[3]); for (i = 0; i < ncpus; ++i) on_cpu(i, kvm_clock_clear, (void *)0); return nerr > 0 ? 1 : 0; }
/*{{{ unsigned int ccsp_spin_us (void)*/ unsigned int ccsp_spin_us (void) { char *envstr; int cpus = cpu_count (); if (cpus < 2) { return 0; } else if ((envstr = getenv ("CCSP_SCHEDULER_SPIN")) != NULL) { long spin = strtol (envstr, NULL, 10); if (spin >= 0) { return (unsigned int) spin; } } return 16; }
void init_threads(int mt) { max_threads=mt-1; if(max_threads<0) { max_threads=cpu_count()-1; } if(max_threads<0) max_threads=0; thread_id=do_alloc(max_threads, sizeof(*thread_id)); num_threads=0; threads_started=0; thread_cond_init(&thread_not_needed); thread_mutex_init(&thread_num_mutex); set_concurrency(1); fprintf(stderr, "maximum threads: %d\n", max_threads+1); fprintf(LOG, "maximum threads: %d\n", max_threads+1); }
double *cpu_util_percent(bool percpu, CpuTimes *prev_times) { CpuTimes *current = NULL; int i, ncpus = percpu ? cpu_count(1) : 1; double *percentage = (double *)calloc(ncpus, sizeof(double)); check(prev_times, "Need a reference point. prev_times can't be NULL"); current = cpu_times(percpu); check(current, "Couldn't obtain CPU times"); for (i = 0; i < ncpus; i++) { percentage[i] = calculate_cpu_util_percentage(prev_times + i, current + i); } free(current); return percentage; error: free(current); return NULL; }
void apic_map(void) { uint i; struct vmm_flags flags = {.present = 1, .writeable = 1, .privileged = 1}; /* map each lapic and ioapic base addr */ for(i=0; i<IO_APIC_NUM; i++) { if(io_apic_tbl[i].base_addr == 0) { //*(byte *)(io_apic_tbl[i].base_addr) = '0'; break; } vmm_map(flags, io_apic_tbl[i].base_addr, io_apic_tbl[i].base_addr); } for(i=0; i<cpu_count(); i++) { uint lapic_base = cpu_get(i)->lapic_base; vmm_map(flags, lapic_base, lapic_base); } }
CpuTimes *cpu_times_percent(bool percpu, CpuTimes *prev_times) { CpuTimes *current = NULL; CpuTimes *t; int i, ncpus = percpu ? cpu_count(1) : 1; CpuTimes *ret; check(prev_times, "Need a reference point. prev_times can't be NULL"); current = cpu_times(percpu); check(current, "Couldn't obtain CPU times"); ret = (CpuTimes *)calloc(ncpus, sizeof(CpuTimes)); check_mem(ret); for (i = 0; i < ncpus; i++) { t = calculate_cpu_times_percentage(prev_times + i, current + i); *(ret + i) = *t; free(t); } free(current); return ret; error: free(current); return NULL; }
static void parse_arguments(int argc, char *argv[]) { extern char *optarg; int opt; char *c; while ((opt = getopt(argc, argv, "b:p:l:t:u:q:aiSh")) != EOF) { switch (opt) { case 'b': if (sk_count >= MAX_BIND_ADDRS) err_quit(errfd, "ERROR: max number of bind addresses (%d) exceeded", MAX_BIND_ADDRS); if ((c = strdup(optarg)) == NULL) err_sys_quit(errfd, "ERROR: strdup"); srv_socket[sk_count++].addr = c; break; case 'p': vp_count = atoi(optarg); if (vp_count < 1) err_quit(errfd, "ERROR: invalid number of processes: %s", optarg); break; case 'l': logdir = optarg; break; case 't': max_wait_threads = (int) strtol(optarg, &c, 10); if (*c++ == ':') max_threads = atoi(c); if (max_wait_threads < 0 || max_threads < 0) err_quit(errfd, "ERROR: invalid number of threads: %s", optarg); break; case 'u': username = optarg; break; case 'q': listenq_size = atoi(optarg); if (listenq_size < 1) err_quit(errfd, "ERROR: invalid listen queue size: %s", optarg); break; case 'a': log_access = 1; break; case 'i': interactive_mode = 1; break; case 'S': /* * Serialization decision is tricky on some platforms. For example, * Solaris 2.6 and above has kernel sockets implementation, so supposedly * there is no need for serialization. The ST library may be compiled * on one OS version, but used on another, so the need for serialization * should be determined at run time by the application. Since it's just * an example, the serialization decision is left up to user. * Only on platforms where the serialization is never needed on any OS * version st_netfd_serialize_accept() is a no-op. */ serialize_accept = 1; break; case 'h': case '?': usage(argv[0]); } } if (logdir == NULL && !interactive_mode) { err_report(errfd, "ERROR: logging directory is required\n"); usage(argv[0]); } if (getuid() == 0 && username == NULL) err_report(errfd, "WARNING: running as super-user!"); if (vp_count == 0 && (vp_count = cpu_count()) < 1) vp_count = 1; if (sk_count == 0) { sk_count = 1; srv_socket[0].addr = "0.0.0.0"; } }
/* * This program acts as a generic gateway. It listens for connections * to a local address ('-l' option). Upon accepting a client connection, * it connects to the specified remote address ('-r' option) and then * just pumps the data through without any modification. */ int main(int argc, char *argv[]) { extern char *optarg; int opt, sock, n; int laddr, raddr, num_procs; int serialize_accept = 0; struct sockaddr_in lcl_addr, cli_addr; st_netfd_t cli_nfd, srv_nfd; prog = argv[0]; num_procs = laddr = raddr = 0; /* Parse arguments */ while((opt = getopt(argc, argv, "l:r:p:Sh")) != EOF) { switch (opt) { case 'l': read_address(optarg, &lcl_addr); laddr = 1; break; case 'r': read_address(optarg, &rmt_addr); if (rmt_addr.sin_addr.s_addr == INADDR_ANY) { fprintf(stderr, "%s: invalid remote address: %s\n", prog, optarg); exit(1); } raddr = 1; break; case 'p': num_procs = atoi(optarg); if (num_procs < 1) { fprintf(stderr, "%s: invalid number of processes: %s\n", prog, optarg); exit(1); } break; case 'S': /* * Serialization decision is tricky on some platforms. For example, * Solaris 2.6 and above has kernel sockets implementation, so supposedly * there is no need for serialization. The ST library may be compiled * on one OS version, but used on another, so the need for serialization * should be determined at run time by the application. Since it's just * an example, the serialization decision is left up to user. * Only on platforms where the serialization is never needed on any OS * version st_netfd_serialize_accept() is a no-op. */ serialize_accept = 1; break; case 'h': case '?': fprintf(stderr, "Usage: %s -l <[host]:port> -r <host:port> " "[-p <num_processes>] [-S]\n", prog); exit(1); } } if (!laddr) { fprintf(stderr, "%s: local address required\n", prog); exit(1); } if (!raddr) { fprintf(stderr, "%s: remote address required\n", prog); exit(1); } if (num_procs == 0) num_procs = cpu_count(); fprintf(stderr, "%s: starting proxy daemon on %s:%d\n", prog, inet_ntoa(lcl_addr.sin_addr), ntohs(lcl_addr.sin_port)); /* Start the daemon */ start_daemon(); /* Initialize the ST library */ if (st_init() < 0) { print_sys_error("st_init"); exit(1); } /* Create and bind listening socket */ if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { print_sys_error("socket"); exit(1); } n = 1; if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&n, sizeof(n)) < 0) { print_sys_error("setsockopt"); exit(1); } if (bind(sock, (struct sockaddr *)&lcl_addr, sizeof(lcl_addr)) < 0) { print_sys_error("bind"); exit(1); } listen(sock, 128); if ((srv_nfd = st_netfd_open_socket(sock)) == NULL) { print_sys_error("st_netfd_open"); exit(1); } /* See the comment regarding serialization decision above */ if (num_procs > 1 && serialize_accept && st_netfd_serialize_accept(srv_nfd) < 0) { print_sys_error("st_netfd_serialize_accept"); exit(1); } /* Start server processes */ set_concurrency(num_procs); for ( ; ; ) { n = sizeof(cli_addr); cli_nfd = st_accept(srv_nfd, (struct sockaddr *)&cli_addr, &n, -1); if (cli_nfd == NULL) { print_sys_error("st_accept"); exit(1); } if (st_thread_create(handle_request, cli_nfd, 0, 0) == NULL) { print_sys_error("st_thread_create"); exit(1); } } /* NOTREACHED */ return 1; }
int topology_init(void) { struct topology_functions funcs = topology_funcs; if (topology_initialized) { return EXIT_SUCCESS; } if (init_configuration()) { ERROR_PLAIN_PRINT(Cannot initialize configuration module to check for topology file name); return EXIT_FAILURE; } if ((config.topologyCfgFileName == NULL) || access(config.topologyCfgFileName, R_OK)) { cpu_set_t cpuSet; CPU_ZERO(&cpuSet); sched_getaffinity(0,sizeof(cpu_set_t), &cpuSet); if (cpu_count(&cpuSet) < sysconf(_SC_NPROCESSORS_CONF)) { funcs.init_cpuInfo = proc_init_cpuInfo; funcs.init_cpuFeatures = proc_init_cpuFeatures; funcs.init_nodeTopology = proc_init_nodeTopology; funcs.init_cacheTopology = proc_init_cacheTopology; cpuid_topology.activeHWThreads = ((cpu_count(&cpuSet) < sysconf(_SC_NPROCESSORS_CONF)) ? cpu_count(&cpuSet) : sysconf(_SC_NPROCESSORS_CONF)); } else { cpuid_topology.activeHWThreads = sysconf(_SC_NPROCESSORS_CONF); } funcs.init_cpuInfo(cpuSet); topology_setName(); funcs.init_cpuFeatures(); funcs.init_nodeTopology(cpuSet); topology_setupTree(); funcs.init_cacheTopology(); sched_setaffinity(0, sizeof(cpu_set_t), &cpuSet); } else { cpu_set_t cpuSet; CPU_ZERO(&cpuSet); sched_getaffinity(0,sizeof(cpu_set_t), &cpuSet); DEBUG_PRINT(DEBUGLEV_INFO, Reading topology information from %s, config.topologyCfgFileName); readTopologyFile(config.topologyCfgFileName); cpuid_topology.activeHWThreads = 0; for (int i=0;i<cpuid_topology.numHWThreads;i++) { if (CPU_ISSET(cpuid_topology.threadPool[i].apicId, &cpuSet)) { cpuid_topology.activeHWThreads++; cpuid_topology.threadPool[i].inCpuSet = 1; } } topology_setName(); topology_setupTree(); } topology_initialized = 1; return EXIT_SUCCESS; }