void do_one_test() { double sum, sum0; DBUG_ENTER("do_one_test"); reset(wt_cycle_stats); reset(wt_wait_stats); wt_success_stats=0; cnt=0; test_concurrently("waiting_threads", test_wt, THREADS, CYCLES); sum=sum0=0; for (cnt=0; cnt < WT_CYCLE_STATS; cnt++) sum+= wt_cycle_stats[0][cnt] + wt_cycle_stats[1][cnt]; for (cnt=0; cnt < WT_CYCLE_STATS; cnt++) if (wt_cycle_stats[0][cnt] + wt_cycle_stats[1][cnt] > 0) { sum0+=wt_cycle_stats[0][cnt] + wt_cycle_stats[1][cnt]; diag("deadlock cycles of length %2u: %4u %4u %8.2f %%", cnt, wt_cycle_stats[0][cnt], wt_cycle_stats[1][cnt], 1e2*sum0/sum); } diag("depth exceeded: %u %u", wt_cycle_stats[0][cnt], wt_cycle_stats[1][cnt]); for (cnt=0; cnt < WT_WAIT_STATS; cnt++) if (wt_wait_stats[cnt]>0) diag("deadlock waits up to %7llu us: %5u", wt_wait_table[cnt], wt_wait_stats[cnt]); diag("timed out: %u", wt_wait_stats[cnt]); diag("successes: %u", wt_success_stats); DBUG_VOID_RETURN; }
void do_tests() { plan(4); bad= my_atomic_initialize(); ok(!bad, "my_atomic_initialize() returned %d", bad); my_atomic_rwlock_init(&rwl); b32= c32= 0; test_concurrently("my_atomic_add32", test_atomic_add, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_fas32", test_atomic_fas, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES); my_atomic_rwlock_destroy(&rwl); }
void do_tests() { plan(4); lf_alloc_init(&lf_allocator, sizeof(TLA), offsetof(TLA, not_used)); lf_hash_init(&lf_hash, sizeof(int), LF_HASH_UNIQUE, 0, sizeof(int), 0, &my_charset_bin); bad= my_atomic_initialize(); ok(!bad, "my_atomic_initialize() returned %d", bad); test_concurrently("lf_pinbox", test_lf_pinbox, N= THREADS, CYCLES); test_concurrently("lf_alloc", test_lf_alloc, N= THREADS, CYCLES); test_concurrently("lf_hash", test_lf_hash, N= THREADS, CYCLES/10); lf_hash_destroy(&lf_hash); lf_alloc_destroy(&lf_allocator); }
void do_tests() { plan(6); bad= my_atomic_initialize(); ok(!bad, "my_atomic_initialize() returned %d", bad); my_atomic_rwlock_init(&rwl); b32= c32= 0; test_concurrently("my_atomic_add32", test_atomic_add, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_fas32", test_atomic_fas, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES); { /* If b is not volatile, the wrong assembly code is generated on OSX Lion as the variable is optimized away as a constant. See Bug#62533 / Bug#13030056. Another workaround is to specify architecture explicitly using e.g. CFLAGS/CXXFLAGS= "-m64". */ volatile int64 b=0x1000200030004000LL; a64=0; my_atomic_add64(&a64, b); ok(a64==b, "add64"); } a64=0; test_concurrently("my_atomic_add64", test_atomic_add64, THREADS, CYCLES); my_atomic_rwlock_destroy(&rwl); /* workaround until we know why it crashes randomly on some machine (BUG#22320). */ sleep(2); }
void do_tests() { plan(49); ok(my_timer_init_ext() == 0, "my_timer_init_ext"); test_create_and_delete(); test_reset(); test_timer(); test_timer_reuse(); test_independent_timers(); test_concurrently("per-thread", test_timer_per_thread, THREADS, 5); test_reinitialization(); my_timer_deinit(); }