static void generate_mass_array(data_t a[]) { data_t (*rand_next)(); int dist, i; m = mu = 1.0/(data_t)N; min = m*0.5; max = m*1.5; dist = fixed_flag ? 2 : rand_func()%3; switch (dist) { case 0: /* [1/2N, 3/2N) */ rand_next = rand_uniform; break; case 1: /* N(1/N, 1/10N) */ sigma = m*1e-1; rand_next = rand_normal; break; default: /* La(1/N, 1/10N) */ b = m*1e-1; rand_next = rand_laplace; break; } for (i = 0; i < N; i++) do a[i] = rand_next(); while (a[i] < min || a[i] > max); }
int main(int argc, char **argv) { int seed = time(NULL); srand(seed); for (int i = 0; i < 5; i++) { printf("i: %d\n", rand_next(16)); } }
static int f_an_wait(char *s, char *s2) { u32_t time; if (s >= 0x20000000 && strcmp("rand", s) == 0) { if (_argc < 2) { time = rand_next() % 1000; } else { time = rand_next() % (u32_t)s2; } } else { time = (u32_t)s; } if (time > 20000) time = rand_next() % 20000; if (time == 0) time = 1; print("wait %i ms\n", time); annoy_wait(time); return 0; }
static void ensure_initialized (gboolean *enable_worker_tracking) { ThreadPoolHillClimbing *hc; const char *threads_per_cpu_env; gint threads_per_cpu; gint threads_count; if (enable_worker_tracking) { // TODO implement some kind of switch to have the possibily to use it *enable_worker_tracking = FALSE; } if (status >= STATUS_INITIALIZED) return; if (status == STATUS_INITIALIZING || InterlockedCompareExchange (&status, STATUS_INITIALIZING, STATUS_NOT_INITIALIZED) != STATUS_NOT_INITIALIZED) { while (status == STATUS_INITIALIZING) mono_thread_info_yield (); g_assert (status >= STATUS_INITIALIZED); return; } g_assert (!threadpool); threadpool = g_new0 (ThreadPool, 1); g_assert (threadpool); threadpool->domains = g_ptr_array_new (); mono_mutex_init_recursive (&threadpool->domains_lock); threadpool->parked_threads = g_ptr_array_new (); mono_mutex_init (&threadpool->parked_threads_lock); threadpool->working_threads = g_ptr_array_new (); mono_mutex_init (&threadpool->working_threads_lock); threadpool->heuristic_adjustment_interval = 10; mono_mutex_init (&threadpool->heuristic_lock); mono_rand_open (); hc = &threadpool->heuristic_hill_climbing; hc->wave_period = HILL_CLIMBING_WAVE_PERIOD; hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE; hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER; hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE; hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS; hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO; hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND; hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE; hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW; hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH; hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR; hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT; hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT; hc->current_control_setting = 0; hc->total_samples = 0; hc->last_thread_count = 0; hc->average_throughput_noise = 0; hc->elapsed_since_last_change = 0; hc->accumulated_completion_count = 0; hc->accumulated_sample_duration = 0; hc->samples = g_new0 (gdouble, hc->samples_to_measure); hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure); hc->random_interval_generator = rand_create (); hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high); if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU"))) threads_per_cpu = 1; else threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50); threads_count = mono_cpu_count () * threads_per_cpu; threadpool->limit_worker_min = threadpool->limit_io_min = threads_count; threadpool->limit_worker_max = threadpool->limit_io_max = threads_count * 100; threadpool->counters._.max_working = threadpool->limit_worker_min; threadpool->cpu_usage_state = g_new0 (MonoCpuUsageState, 1); threadpool->suspended = FALSE; status = STATUS_INITIALIZED; }
void mono_threadpool_worker_init (MonoThreadPoolWorkerCallback callback) { ThreadPoolHillClimbing *hc; const char *threads_per_cpu_env; gint threads_per_cpu; gint threads_count; mono_refcount_init (&worker, destroy); worker.callback = callback; mono_coop_mutex_init (&worker.parked_threads_lock); worker.parked_threads_count = 0; mono_coop_cond_init (&worker.parked_threads_cond); worker.worker_creation_current_second = -1; mono_coop_mutex_init (&worker.worker_creation_lock); worker.heuristic_adjustment_interval = 10; mono_coop_mutex_init (&worker.heuristic_lock); mono_rand_open (); hc = &worker.heuristic_hill_climbing; hc->wave_period = HILL_CLIMBING_WAVE_PERIOD; hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE; hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER; hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE; hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS; hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO; hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND; hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE; hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW; hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH; hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR; hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT; hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT; hc->current_control_setting = 0; hc->total_samples = 0; hc->last_thread_count = 0; hc->average_throughput_noise = 0; hc->elapsed_since_last_change = 0; hc->accumulated_completion_count = 0; hc->accumulated_sample_duration = 0; hc->samples = g_new0 (gdouble, hc->samples_to_measure); hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure); hc->random_interval_generator = rand_create (); hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high); if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU"))) threads_per_cpu = 1; else threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50); threads_count = mono_cpu_count () * threads_per_cpu; worker.limit_worker_min = threads_count; #if defined (PLATFORM_ANDROID) || defined (HOST_IOS) worker.limit_worker_max = CLAMP (threads_count * 100, MIN (threads_count, 200), MAX (threads_count, 200)); #else worker.limit_worker_max = threads_count * 100; #endif worker.counters._.max_working = worker.limit_worker_min; worker.cpu_usage_state = g_new0 (MonoCpuUsageState, 1); worker.suspended = FALSE; worker.monitor_status = MONITOR_STATUS_NOT_RUNNING; }
static int f_rand() { print("%08x\n", rand_next()); return 0; }