int main(void) { test_atomic(); test_atomic_acquire(); test_atomic_release(); test_atomic_read(); test_atomic_write(); test_atomic_full(); test_atomic_release_write(); test_atomic_acquire_read(); test_atomic_dd_acquire_read(); # if defined(AO_HAVE_fetch_and_add1) && defined(AO_HAVE_fetch_and_sub1) run_parallel(4, add1sub1_thr, add1sub1_test, "add1/sub1"); # endif # if defined(AO_HAVE_store_release_write) && defined(AO_HAVE_load_acquire_read) run_parallel(3, acqrel_thr, acqrel_test, "store_release_write/load_acquire_read"); # endif # if defined(AO_HAVE_test_and_set_acquire) run_parallel(5, test_and_set_thr, test_and_set_test, "test_and_set"); # endif test_atomic_emulation(); return 0; }
static void run_parallel_two_CPUs(enum queue_behavior_type type, uint32_t loops, int q_size, int prefill) { struct alf_queue *queue = NULL; cpumask_t cpumask; if (!(queue = alloc_and_init_queue(q_size, prefill))) return; /* fail */ /* Restrict the CPUs to run on */ cpumask_clear(&cpumask); cpumask_set_cpu(0, &cpumask); cpumask_set_cpu(1, &cpumask); if (type & SPSC) { run_parallel("alf_queue_SPSC_parallel_two_CPUs", loops, &cpumask, 0, queue, time_bench_CPU_enq_or_deq_spsc); } else if (type & MPMC) { run_parallel("alf_queue_MPMC_parallel_two_CPUs", loops, &cpumask, 0, queue, time_bench_CPU_enq_or_deq_mpmc); } else { pr_err("%s() WRONG TYPE!!! FIX\n", __func__); } alf_queue_free(queue); }
int main(int argc, char **argv) { int nthreads; int exper_n; if (1 == argc) { # if !defined(HAVE_MMAP) nthreads = 3; # else nthreads = 10; # endif } else if (2 == argc) { nthreads = atoi(argv[1]); if (nthreads < 1 || nthreads > MAX_NTHREADS) { fprintf(stderr, "Invalid # of threads argument\n"); exit(1); } } else { fprintf(stderr, "Usage: %s [# of threads]\n", argv[0]); exit(1); } printf("Performing %d reversals of %d element lists in %d threads\n", N_REVERSALS, LIST_LENGTH, nthreads); AO_malloc_enable_mmap(); run_parallel(nthreads, run_one_test, dummy_test, "AO_malloc/AO_free"); return 0; }
int main(int argc, char **argv) { int nthreads; if (1 == argc) { nthreads = DEFAULT_NTHREADS; } else if (2 == argc) { nthreads = atoi(argv[1]); if (nthreads < 1 || nthreads > MAX_NTHREADS) { fprintf(stderr, "Invalid # of threads argument\n"); exit(1); } } else { fprintf(stderr, "Usage: %s [# of threads]\n", argv[0]); exit(1); } printf("Performing %d reversals of %d element lists in %d threads\n", N_REVERSALS, LIST_LENGTH, nthreads); AO_malloc_enable_mmap(); /* Test various corner cases. */ AO_free(NULL); AO_free(AO_malloc(0)); # ifdef HAVE_MMAP AO_free(AO_malloc(CHUNK_SIZE - (sizeof(AO_t)-1))); /* large alloc */ # endif run_parallel(nthreads, run_one_test, dummy_test, "AO_malloc/AO_free"); return 0; }
static void run_parallel_many_CPUs_bulk(enum queue_behavior_type type, uint32_t loops, int q_size, int prefill, int CPUs, int bulk) { struct alf_queue *queue = NULL; cpumask_t cpumask; int i; if (CPUs == 0) return; if (!(queue = alloc_and_init_queue(q_size, prefill))) return; /* fail */ /* Restrict the CPUs to run on */ if (verbose) pr_info("Limit to %d parallel CPUs (bulk:%d)\n", CPUs, bulk); cpumask_clear(&cpumask); for (i = 0; i < CPUs ; i++) { cpumask_set_cpu(i, &cpumask); } if (type & SPSC) { if (CPUs > 2) { pr_err("%s() ERR SPSC does not support CPUs > 2\n", __func__); goto out; } run_parallel("alf_queue_BULK_SPSC_parallel_many_CPUs", loops, &cpumask, bulk, queue, time_bench_CPU_BULK_enq_or_deq_spsc); } else if (type & MPMC) { run_parallel("alf_queue_BULK_MPMC_parallel_many_CPUs", loops, &cpumask, bulk, queue, time_bench_CPU_BULK_enq_or_deq_mpmc); } else { pr_err("%s() WRONG TYPE!!! FIX\n", __func__); } out: alf_queue_free(queue); }
int main() { struct timer tm ; init_timer(&tm) ; pthread_barrier_init(&barrier0,NULL,2); pthread_barrier_init(&barrier1,NULL,2); start_timer(&tm) ; int ret1 = run_parallel() ; stop_timer(&tm) ; print_time(&tm) ; int ret2 = run_serial() ; if(ret1 != ret2) { printf("Mismatch\n") ; printf("par=%d,ser=%d\n", ret1, ret2) ; } else printf("Success %d\n", ret1) ; return 0 ; }