void * high_prio_thread(void *arg) { nsec_t high_start, high_end, high_get_lock; unsigned int i; stats_container_init(&cpu_delay_dat, iterations); stats_container_init(&cpu_delay_hist, HIST_BUCKETS); stats_quantiles_init(&cpu_delay_quantiles, (int)log10(iterations)); printf("High prio thread started\n"); for (i = 0; i < iterations; i++) { /* Wait for all threads to reach barrier wait. When woken up, low prio thread will own the mutex */ pthread_barrier_wait(&bar1); high_start = rt_gettime(); pthread_mutex_lock(&lock); high_end = rt_gettime(); high_get_lock = high_end - low_unlock; busy_work_ms(high_work_time); pthread_mutex_unlock(&lock); rec.x = i; rec.y = high_get_lock / NS_PER_US; stats_container_append(&cpu_delay_dat, rec); /* Wait for all threads to finish this iteration */ pthread_barrier_wait(&bar2); } stats_hist(&cpu_delay_hist, &cpu_delay_dat); stats_container_save("samples", "pi_perf Latency Scatter Plot", "Iteration", "Latency (us)", &cpu_delay_dat, "points"); stats_container_save("hist", "pi_perf Latency Histogram", "Latency (us)", "Samples", &cpu_delay_hist, "steps"); printf("Time taken for high prio thread to get the lock once released by low prio thread\n"); printf("Min delay = %ld us\n", stats_min(&cpu_delay_dat)); printf("Max delay = %ld us\n", stats_max(&cpu_delay_dat)); printf("Average delay = %4.2f us\n", stats_avg(&cpu_delay_dat)); printf("Standard Deviation = %4.2f us\n", stats_stddev(&cpu_delay_dat)); printf("Quantiles:\n"); stats_quantiles_calc(&cpu_delay_dat, &cpu_delay_quantiles); stats_quantiles_print(&cpu_delay_quantiles); max_pi_delay = stats_max(&cpu_delay_dat); return NULL; }
void test_signal(long iter, long nthreads) { int i; int j; int k; pthread_t *pt; unsigned long max = 0; unsigned long min = 0; stats_container_t dat; stats_record_t rec; stats_container_init(&dat,iter * nthreads); pt = (pthread_t *)malloc(sizeof(*pt) * nthreads); if (pt == NULL) { fprintf(stderr, "Out of memory\n"); exit(-1); } for (j = 0; j < nthreads; j++) { child_waiting[j] = 0; pt[j] = create_thread_(j); } for (i = 0; i < (iter - 1) * nthreads; i+=nthreads) { for (j = 0 , k = i; j < nthreads; j++ , k++) { wake_child(j, broadcast_flag); rec.x = k; rec.y = latency; stats_container_append(&dat, rec); pthread_mutex_lock(&child_mutex); child_waiting[j] = 0; pthread_mutex_unlock(&child_mutex); } } for (j = 0; j < nthreads; j++) { wake_child(j, broadcast_flag); pthread_mutex_lock(&child_mutex); child_waiting[j] = 3; pthread_mutex_unlock(&child_mutex); if (pthread_join(pt[j], NULL) != 0) { fprintf(stderr, "%d: ", j); perror("pthread_join"); exit(-1); } } min = (unsigned long)-1; for (i = 0; i < iter * nthreads; i++){ latency = dat.records[i].y; if (latency > PASS_US) fail = 1; min = MIN(min, latency); max = MAX(max, latency); } printf("Recording statistics...\n"); printf("Minimum: %lu us\n", min); printf("Maximum: %lu us\n", max); printf("Average: %f us\n", stats_avg(&dat)); printf("Standard Deviation: %f\n", stats_stddev(&dat)); }
void *thread_worker(void* arg) { struct timespec start, stop; int i; unsigned long long delta; unsigned long long min=-1, max=0; stats_container_t dat; stats_record_t rec; stats_container_init(&dat, NUMRUNS); for (i=0; i < NUMRUNS; i++) { do_work(1); /* warm cache */ /* do test */ clock_gettime(CLOCK_MONOTONIC, &start); do_work(NUMLOOPS); clock_gettime(CLOCK_MONOTONIC, &stop); /* calc delta, min and max */ delta = ts_sub(stop, start); if (delta < min) min = delta; if (delta> max) max = delta; rec.x = i; rec.y = delta; stats_container_append(&dat, rec); printf("delta: %llu ns\n", delta); usleep(1); /* let other things happen */ } printf("max jitter: "); print_unit(max - min); stats_container_save("samples", "Scheduling Jitter Scatter Plot",\ "Iteration", "Delay (ns)", &dat, "points"); return NULL; }
void * low_prio_thread(void *arg) { nsec_t low_start, low_hold; unsigned int i; stats_container_init(&low_dat, iterations); printf("Low prio thread started\n"); for (i = 0; i < iterations; i++) { pthread_mutex_lock(&lock); /* Wait for all threads to reach barrier wait. Since we already own the mutex, high prio thread will boost our priority. */ pthread_barrier_wait(&bar1); low_start = rt_gettime(); busy_work_ms(low_work_time); low_unlock = rt_gettime(); low_hold = low_unlock - low_start; pthread_mutex_unlock(&lock); rec.x = i; rec.y = low_hold / NS_PER_US; stats_container_append(&low_dat, rec); if (i == iterations-1) end = 1; /* Wait for all threads to finish this iteration */ pthread_barrier_wait(&bar2); } return NULL; }
int periodic_thread(nsec_t period, int iterations, int loops) { stats_container_t dat; stats_container_t hist; stats_quantiles_t quantiles; stats_record_t rec; int i = 0; int fail = 0; nsec_t next, now; nsec_t exe_start, exe_end, exe_time; char *samples_filename; char *hist_filename; stats_container_init(&dat, iterations); stats_container_init(&hist, HIST_BUCKETS); stats_quantiles_init(&quantiles, (int)log10(iterations)); if (asprintf(&samples_filename, "%s-samples", filename_prefix) == -1) { fprintf(stderr, "Failed to allocate string for samples filename\n"); return -1; } if (asprintf(&hist_filename, "%s-hist", filename_prefix) == -1) { fprintf(stderr, "Failed to allocate string for samples filename\n"); return -1; } next = rt_gettime(); while (i < iterations) { next += period; now = rt_gettime(); if (now > next) { printf("Missed period, aborting (didn't get scheduled in time)\n"); fail = 1; break; } exe_start = rt_gettime(); calc(loops); exe_end = rt_gettime(); exe_time = exe_end - exe_start; rec.x = i; rec.y = exe_time/NS_PER_US; stats_container_append(&dat, rec); i++; now = rt_gettime(); if (now > next) { printf("Missed period, aborting (calc took too long)\n"); fail = 1; break; } rt_nanosleep(next - now); } stats_container_save(samples_filename, "Periodic CPU Load Scatter Plot",\ "Iteration", "Runtime (us)", &dat, "points"); stats_container_save(hist_filename, "Periodic CPU Load Histogram",\ "Runtime (us)", "Samples", &hist, "steps"); printf(" Execution Time Statistics:\n"); printf("Min: %ld us\n", stats_min(&dat)); printf("Max: %ld us\n", stats_max(&dat)); printf("Avg: %.4f us\n", stats_avg(&dat)); printf("StdDev: %.4f us\n", stats_stddev(&dat)); printf("Quantiles:\n"); stats_quantiles_calc(&dat, &quantiles); stats_quantiles_print(&quantiles); printf("Criteria: no missed periods\n"); printf("Result: %s\n", fail ? "FAIL":"PASS"); free(samples_filename); free(hist_filename); return fail; }
int main(int argc, char *argv[]) { int per_id; setup(); pass_criteria = PASS_US; rt_init("d:l:ht:i:", parse_args, argc, argv); printf("-------------------------------\n"); printf("Scheduling Latency\n"); printf("-------------------------------\n\n"); if (load_ms*NS_PER_MS >= period-OVERHEAD) { printf("ERROR: load must be < period - %d us\n", OVERHEAD/NS_PER_US); exit(1); } if (iterations == 0) iterations = DEFAULT_ITERATIONS; if (iterations < MIN_ITERATIONS) { printf("Too few iterations (%d), use min iteration instead (%d)\n", iterations, MIN_ITERATIONS); iterations = MIN_ITERATIONS; } printf("Running %d iterations with a period of %llu ms\n", iterations, period/NS_PER_MS); printf("Periodic load duration: %d ms\n", load_ms); printf("Expected running time: %d s\n", (int)(iterations*((float)period / NS_PER_SEC))); if (stats_container_init(&dat, iterations)) exit(1); if (stats_container_init(&hist, HIST_BUCKETS)) { stats_container_free(&dat); exit(1); } /* use the highest value for the quantiles */ if (stats_quantiles_init(&quantiles, (int)log10(iterations))) { stats_container_free(&hist); stats_container_free(&dat); exit(1); } /* wait one quarter second to execute */ start = rt_gettime() + 250 * NS_PER_MS; per_id = create_fifo_thread(periodic_thread, (void*)0, PRIO); join_thread(per_id); join_threads(); printf("\nCriteria: latencies < %d us\n", (int)pass_criteria); printf("Result: %s\n", ret ? "FAIL" : "PASS"); stats_container_free(&dat); stats_container_free(&hist); stats_quantiles_free(&quantiles); return ret; }
int main(int argc, char *argv[]) { int i, j, k, err; unsigned long long delta; unsigned long long max, min; struct sched_param param; stats_container_t dat; stats_container_t hist; stats_quantiles_t quantiles; stats_record_t rec; struct timespec *start_data; struct timespec *stop_data; if (stats_cmdline(argc, argv) < 0) { printf("usage: %s help\n", argv[0]); exit(1); } if (iterations < MIN_ITERATION) { iterations = MIN_ITERATION; printf("user \"iterations\" value is too small (use: %d)\n", iterations); } stats_container_init(&dat, iterations); stats_container_init(&hist, HIST_BUCKETS); stats_quantiles_init(&quantiles, (int)log10(iterations)); setup(); mlockall(MCL_CURRENT | MCL_FUTURE); start_data = calloc(iterations, sizeof(struct timespec)); if (start_data == NULL) { printf("Memory allocation Failed (too many Iteration: %d)\n", iterations); exit(1); } stop_data = calloc(iterations, sizeof(struct timespec)); if (stop_data == NULL) { printf("Memory allocation Failed (too many Iteration: %d)\n", iterations); free(start_data); exit(1); } /* switch to SCHED_FIFO 99 */ param.sched_priority = sched_get_priority_max(SCHED_FIFO); err = sched_setscheduler(0, SCHED_FIFO, ¶m); /* Check that the user has the appropriate privileges */ if (err) { if (errno == EPERM) { fprintf(stderr, "This program runs with a scheduling policy of SCHED_FIFO at priority %d\n", param.sched_priority); fprintf(stderr, "You don't have the necessary privileges to create such a real-time process.\n"); } else { fprintf(stderr, "Failed to set scheduler, errno %d\n", errno); } exit(1); } printf("\n----------------------\n"); printf("Gettimeofday() Latency\n"); printf("----------------------\n"); printf("Iterations: %d\n\n", iterations); /* collect iterations pairs of gtod calls */ max = min = 0; if (latency_threshold) { latency_trace_enable(); latency_trace_start(); } /* This loop runs for a long time, hence can cause soft lockups. Calling sleep periodically avoids this. */ for (i = 0; i < (iterations / 10000); i++) { for (j = 0; j < 10000; j++) { k = (i * 10000) + j; clock_gettime(CLOCK_MONOTONIC, &start_data[k]); clock_gettime(CLOCK_MONOTONIC, &stop_data[k]); } usleep(1000); } for (i = 0; i < iterations; i++) { delta = timespec_subtract(&start_data[i], &stop_data[i]); rec.x = i; rec.y = delta; stats_container_append(&dat, rec); if (i == 0 || delta < min) min = delta; if (delta > max) max = delta; if (latency_threshold && delta > latency_threshold) break; } if (latency_threshold) { latency_trace_stop(); if (i != iterations) { printf ("Latency threshold (%lluus) exceeded at iteration %d\n", latency_threshold, i); latency_trace_print(); stats_container_resize(&dat, i + 1); } } stats_hist(&hist, &dat); stats_container_save(filenames[SCATTER_FILENAME], titles[SCATTER_TITLE], labels[SCATTER_LABELX], labels[SCATTER_LABELY], &dat, "points"); stats_container_save(filenames[HIST_FILENAME], titles[HIST_TITLE], labels[HIST_LABELX], labels[HIST_LABELY], &hist, "steps"); /* report on deltas */ printf("Min: %llu ns\n", min); printf("Max: %llu ns\n", max); printf("Avg: %.4f ns\n", stats_avg(&dat)); printf("StdDev: %.4f ns\n", stats_stddev(&dat)); printf("Quantiles:\n"); stats_quantiles_calc(&dat, &quantiles); stats_quantiles_print(&quantiles); stats_container_free(&dat); stats_container_free(&hist); stats_quantiles_free(&quantiles); return 0; }
void *signal_receiving_thread(void *arg) { int i, ret, sig; long delta; long max, min; sigset_t set, oset; stats_container_t dat; stats_container_t hist; stats_quantiles_t quantiles; stats_record_t rec; stats_container_init(&dat, ITERATIONS); stats_container_init(&hist, HIST_BUCKETS); stats_quantiles_init(&quantiles, (int)log10(ITERATIONS)); debug(DBG_DEBUG, "Signal receiving thread running\n"); if ((sigaddset(&set, SIGNALNUMBER))) { perror("sigaddset:"); exit(1); } if ((ret = pthread_sigmask(SIG_BLOCK, &set, &oset))) { printf("pthread_sigmask returned %d\n", ret); exit(1); } /* Let the sending thread know that receiver is ready */ atomic_set(1, &flag); debug(DBG_DEBUG, "Signal receiving thread ready to receive\n"); if (latency_threshold) { latency_trace_enable(); latency_trace_start(); } /* Warm up */ for (i = 0; i < 5; i++) { sigwait(&set, &sig); atomic_set(1, &flag); } max = min = 0; fail = 0; debug(DBG_INFO, "\n\n"); for (i = 0; i < ITERATIONS; i++) { sigwait(&set, &sig); end = rt_gettime(); delta = (end - begin)/NS_PER_US; rec.x = i; rec.y = delta; stats_container_append(&dat, rec); if (i == 0 || delta < min) min = delta; if (delta > max) max = delta; if (delta > pass_criteria) fail++; debug(DBG_INFO, "Iteration %d: Took %ld us. Max = %ld us, " "Min = %ld us\n", i, delta, max, min); fflush(stdout); buffer_print(); if (latency_threshold && (delta > latency_threshold)) { atomic_set(2, &flag); break; } atomic_set(1, &flag); } if (latency_threshold) { latency_trace_stop(); if (i != ITERATIONS) { printf("Latency threshold (%luus) exceeded at iteration %d\n", latency_threshold, i); fflush(stdout); buffer_print(); latency_trace_print(); stats_container_resize(&dat, i + 1); } } stats_hist(&hist, &dat); stats_container_save("samples", "pthread_kill Latency Scatter Plot", "Iteration", "Latency (us)", &dat, "points"); stats_container_save("hist", "pthread_kill Latency Histogram", "Latency (us)", "Samples", &hist, "steps"); printf("\n"); printf("Min: %lu us\n", stats_min(&dat)); printf("Max: %lu us\n", stats_max(&dat)); printf("Avg: %.4f us\n", stats_avg(&dat)); printf("StdDev: %.4f us\n", stats_stddev(&dat)); printf("Quantiles:\n"); stats_quantiles_calc(&dat, &quantiles); stats_quantiles_print(&quantiles); printf("Failures: %d\n", fail); printf("Criteria: Time < %d us\n", (int)pass_criteria); printf("Result: %s", fail ? "FAIL" : "PASS"); printf("\n\n"); return NULL; }
int main(int argc, char *argv[]) { int i; setup(); rt_init("hi:", parse_args, argc, argv); if (iterations < 100) { fprintf(stderr, "Number of iteration cannot be less than 100.\n"); exit(1); } printf("------------------------------------\n"); printf("Periodic CPU Load Execution Variance\n"); printf("------------------------------------\n\n"); printf("Running %d iterations per thread\n", iterations); printf("Thread Group A:\n"); printf(" threads: %d\n", THREADS_PER_GROUP); printf(" priority: %d\n", PRIO_A); printf(" period: %d ms\n", PERIOD_A/NS_PER_MS); printf("Thread Group B:\n"); printf(" threads: %d\n", THREADS_PER_GROUP); printf(" priority: %d\n", PRIO_B); printf(" period: %d ms\n", PERIOD_B/NS_PER_MS); printf("Thread Group C:\n"); printf(" threads: %d\n", THREADS_PER_GROUP); printf(" priority: %d\n", PRIO_C); printf(" period: %d ms\n", PERIOD_C/NS_PER_MS); printf("\n"); for (i=0; i<(THREADS_PER_GROUP * NUM_GROUPS); i++) { stats_container_init(&dat[i], iterations); stats_quantiles_init(&quantiles[i], (int)log10(iterations)); } struct periodic_arg parg_a = {PERIOD_A, iterations, calc, (void *)CALC_LOOPS_A }; struct periodic_arg parg_b = {PERIOD_B, iterations, calc, (void *)CALC_LOOPS_B }; struct periodic_arg parg_c = {PERIOD_C, iterations, calc, (void *)CALC_LOOPS_C }; for (i=0; i < THREADS_PER_GROUP; i++) create_fifo_thread(periodic_thread, (void*)&parg_a, PRIO_A); for (i=0; i < THREADS_PER_GROUP; i++) create_fifo_thread(periodic_thread, (void*)&parg_b, PRIO_B); for (i=0; i < THREADS_PER_GROUP; i++) create_fifo_thread(periodic_thread, (void*)&parg_c, PRIO_C); join_threads(); printf("\nExecution Time Statistics:\n\n"); for (i=0; i<(THREADS_PER_GROUP * NUM_GROUPS); i++) { printf("TID %d (%c)\n", i, groupname[i>>2]); printf(" Min: %ld us\n", stats_min(&dat[i])); printf(" Max: %ld us\n", stats_max(&dat[i])); printf(" Avg: %f us\n", stats_avg(&dat[i])); printf(" StdDev: %f us\n\n", stats_stddev(&dat[i])); printf(" Quantiles:\n"); stats_quantiles_calc(&dat[i], &quantiles[i]); stats_quantiles_print(&quantiles[i]); printf("Criteria: TID %d did not miss a period\n", i); printf("Result: %s\n", fail[i] ? "FAIL":"PASS"); printf("\n"); if (fail[i]) ret = 1; } // FIXME: define pass criteria // printf("\nCriteria: latencies < %d us\n", PASS_US); // printf("Result: %s\n", ret ? "FAIL" : "PASS"); for (i=0; i<(THREADS_PER_GROUP * NUM_GROUPS); i++) { stats_container_free(&dat[i]); stats_quantiles_free(&quantiles[i]); } return ret; }
void main_thread(void) { int ret, i, j; nsec_t start, end; long smin = 0, smax = 0, cmin = 0, cmax = 0, delta = 0; float savg, cavg; int cpuid; if ( stats_container_init(&sdat, iterations) || stats_container_init(&shist, HIST_BUCKETS) || stats_container_init(&cdat, iterations) || stats_container_init(&chist, HIST_BUCKETS) ) { fprintf (stderr, "Cannot init stats container\n"); exit(1); } tids = malloc(sizeof(int) * numcpus); if (!tids) { perror("malloc"); exit(1); } memset(tids, 0, numcpus); cpuid = set_affinity(); if (cpuid == -1) { fprintf(stderr, "Main thread: Can't set affinity.\n"); exit(1); } /* run matrix mult operation sequentially */ curdat = &sdat; curdat->index = iterations-1; printf("\nRunning sequential operations\n"); start = rt_gettime(); for (i = 0; i < iterations; i++) matrix_mult_record(MATRIX_SIZE, i); end = rt_gettime(); delta = (long)((end - start)/NS_PER_US); savg = delta/iterations; /* don't use the stats record, use the total time recorded */ smin = stats_min(&sdat); smax = stats_max(&sdat); printf("Min: %ld us\n", smin); printf("Max: %ld us\n", smax); printf("Avg: %.4f us\n", savg); printf("StdDev: %.4f us\n", stats_stddev(&sdat)); if ( stats_hist(&shist, &sdat) || stats_container_save("sequential", "Matrix Multiplication Sequential Execution Runtime Scatter Plot", "Iteration", "Runtime (us)", &sdat, "points") || stats_container_save("sequential_hist", "Matrix Multiplicatoin Sequential Execution Runtime Histogram", "Runtime (us)", "Samples", &shist, "steps") ) { fprintf(stderr, "Warning: could not save sequential mults stats\n"); } pthread_barrier_init(&mult_start, NULL, numcpus+1); set_priority(PRIO); curdat = &cdat; curdat->index = iterations-1; online_cpu_id = -1; /* Redispatch cpus */ /* Create numcpus-1 concurrent threads */ for (j = 0; j < numcpus; j++) { tids[j] = create_fifo_thread(concurrent_thread, NULL, PRIO); if (tids[j] == -1) { printf("Thread creation failed (max threads exceeded?)\n"); exit(1); } } /* run matrix mult operation concurrently */ printf("\nRunning concurrent operations\n"); pthread_barrier_wait(&mult_start); start = rt_gettime(); join_threads(); end = rt_gettime(); delta = (long)((end - start)/NS_PER_US); cavg = delta/iterations; /* don't use the stats record, use the total time recorded */ cmin = stats_min(&cdat); cmax = stats_max(&cdat); printf("Min: %ld us\n", cmin); printf("Max: %ld us\n", cmax); printf("Avg: %.4f us\n", cavg); printf("StdDev: %.4f us\n", stats_stddev(&cdat)); if ( stats_hist(&chist, &cdat) || stats_container_save("concurrent", "Matrix Multiplication Concurrent Execution Runtime Scatter Plot", "Iteration", "Runtime (us)", &cdat, "points") || stats_container_save("concurrent_hist", "Matrix Multiplication Concurrent Execution Runtime Histogram", "Iteration", "Runtime (us)", &chist, "steps") ) { fprintf(stderr, "Warning: could not save concurrent mults stats\n"); } printf("\nConcurrent Multipliers:\n"); printf("Min: %.4f\n", (float)smin/cmin); printf("Max: %.4f\n", (float)smax/cmax); printf("Avg: %.4f\n", (float)savg/cavg); ret = 1; if (savg > (cavg * criteria)) ret = 0; printf("\nCriteria: %.2f * average concurrent time < average sequential time\n", criteria); printf("Result: %s\n", ret ? "FAIL" : "PASS"); return; }
int main(int argc, char **argv) { pthread_t *threads; long i; int ret; struct timespec intv; struct sched_param param; rt_init("a:r:t:e:l:h:", parse_args, argc, argv); signal(SIGINT, stop_log); if (argc >= (optind + 1)) nr_tasks = atoi(argv[optind]); else { numcpus = sysconf(_SC_NPROCESSORS_ONLN); nr_tasks = numcpus + 1; } intervals = malloc(sizeof(stats_container_t) * nr_tasks); if (!intervals) debug(DBG_ERR, "malloc failed\n"); memset(intervals, 0, sizeof(stats_container_t) * nr_tasks); intervals_length = malloc(sizeof(stats_container_t) * nr_tasks); if (!intervals_length) debug(DBG_ERR, "malloc failed\n"); memset(intervals_length, 0, sizeof(stats_container_t) * nr_tasks); if (!intervals_loops) debug(DBG_ERR, "malloc failed\n"); intervals_loops = malloc(sizeof(stats_container_t) * nr_tasks); memset(intervals_loops, 0, sizeof(stats_container_t) * nr_tasks); threads = malloc(sizeof(*threads) * nr_tasks); if (!threads) debug(DBG_ERR, "malloc failed\n"); memset(threads, 0, sizeof(*threads) * nr_tasks); ret = pthread_barrier_init(&start_barrier, NULL, nr_tasks + 1); ret = pthread_barrier_init(&end_barrier, NULL, nr_tasks + 1); if (ret < 0) debug(DBG_ERR, "pthread_barrier_init failed: %s\n", strerror(ret)); for (i = 0; i < nr_tasks; i++) { stats_container_init(&intervals[i], nr_runs); stats_container_init(&intervals_length[i], nr_runs); stats_container_init(&intervals_loops[i], nr_runs); } thread_pids = malloc(sizeof(long) * nr_tasks); if (!thread_pids) debug(DBG_ERR, "malloc thread_pids failed\n"); for (i = 0; i < nr_tasks; i++) { threads[i] = create_fifo_thread(start_task, (void *)i, prio_start + i); } /* * Progress bar uses stderr to let users see it when * redirecting output. So we convert stderr to use line * buffering so the progress bar doesn't flicker. */ setlinebuf(stderr); /* up our prio above all tasks */ memset(¶m, 0, sizeof(param)); param.sched_priority = nr_tasks + prio_start; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) debug(DBG_WARN, "Warning, can't set priority of" "main thread !\n"); intv.tv_sec = INTERVAL / NS_PER_SEC; intv.tv_nsec = INTERVAL % (1 * NS_PER_SEC); print_progress_bar(0); setup_ftrace_marker(); for (loop = 0; loop < nr_runs; loop++) { unsigned long long end; now = rt_gettime() / NS_PER_US; ftrace_write("Loop %d now=%lld\n", loop, now); pthread_barrier_wait(&start_barrier); ftrace_write("All running!!!\n"); rt_nanosleep(intv.tv_nsec); print_progress_bar((loop * 100) / nr_runs); end = rt_gettime() / NS_PER_US; ftrace_write("Loop %d end now=%lld diff=%lld\n", loop, end, end - now); ret = pthread_barrier_wait(&end_barrier); if (stop || (check && check_times(loop))) { loop++; nr_runs = loop; break; } } putc('\n', stderr); pthread_barrier_wait(&start_barrier); done = 1; pthread_barrier_wait(&end_barrier); join_threads(); print_results(); if (stop) { /* * We use this test in bash while loops * So if we hit Ctrl-C then let the while * loop know to break. */ if (check < 0) exit(-1); else exit(1); } if (check < 0) exit(-1); else exit(0); return 0; }