TEST(sfrlock, contended_reads_writes_sanity_cpp) { volatile unsigned cnt = 0; const unsigned thread_count = 10; const unsigned inc = 100; SFRLock sfrlock; measure_time_concurrent(thread_count, [&] (unsigned idx) { if (idx < thread_count / 2) { for (unsigned j = repeat / 5; j; j--) { { std::lock_guard<SFRWriteLock> lg(sfrlock.writeLock()); for (unsigned k = inc; k; k--) { cnt++; } } usleep(1); } } else { for (unsigned j = repeat; j; j--) { { std::lock_guard<SFRReadLock> lg(sfrlock.readLock()); EXPECT_EQ(cnt % inc, 0); } } } }); EXPECT_EQ(cnt, inc * (repeat / 5) * (thread_count / 2)); }
TEST(sfrlock, contended_reads_write_sanity) { volatile unsigned cnt = 0; const unsigned thread_count = 5; const unsigned inc = 100; sfrlock_t sfrlock; sfrlock_init(&sfrlock); measure_time_concurrent(thread_count, [&] (unsigned idx) { if (!idx) { for (unsigned j = repeat / 5; j; j--) { sfrlock_wrlock(&sfrlock); for (unsigned k = inc; k; k--) { cnt++; } sfrlock_wrunlock(&sfrlock); usleep(1); } } else { for (unsigned j = repeat; j; j--) { sfrlock_rdlock(&sfrlock); EXPECT_EQ(cnt % inc, 0); sfrlock_rdunlock(&sfrlock); } } }); EXPECT_EQ(cnt, inc * repeat / 5); }
TEST(sfrlock, contended_write_cost) { double t; double r; const unsigned thread_count = 5; sfrlock_t sfrlock; pthread_rwlock_t rwlock; pthread_mutex_t mutex; sfrlock_init(&sfrlock); pthread_rwlock_init(&rwlock, nullptr); pthread_mutex_init(&mutex, nullptr); r = measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { sfrlock_wrlock(&sfrlock); sfrlock_wrunlock(&sfrlock); } }); printf("sfrlock_t time: %lf ms\n", r / 1e6); t = measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { pthread_rwlock_wrlock(&rwlock); pthread_rwlock_unlock(&rwlock); } }); printf("pthread_rwlock_t time: %lf ms (%+.2lf%%)\n", t / 1e6, -(1 - (t / r)) * 100); t = measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { pthread_mutex_lock(&mutex); pthread_mutex_unlock(&mutex); } }); printf("pthread_mutex_t time: %lf ms (%+.2lf%%)\n", t / 1e6, -(1 - (t / r)) * 100); pthread_rwlock_destroy(&rwlock); pthread_mutex_destroy(&mutex); }
TEST(sfrlock, concurrent_reads_sanity_cpp) { const unsigned thread_count = 20; SFRLock sfrlock; measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { std::lock_guard<SFRReadLock> lg(sfrlock.readLock()); } sfrlock.readLock().lock(); }); }
TEST(sfrlock, contended_writes_sanity_cpp) { unsigned cnt = 0; const unsigned thread_count = 5; SFRLock sfrlock; measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { { std::lock_guard<SFRWriteLock> lg(sfrlock.writeLock()); cnt++; } } }); EXPECT_EQ(cnt, thread_count * repeat); }
TEST(sfrlock, concurrent_reads_sanity) { const unsigned thread_count = 20; sfrlock_t sfrlock; sfrlock_init(&sfrlock); measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { sfrlock_rdlock(&sfrlock); sfrlock_rdunlock(&sfrlock); } sfrlock_rdlock(&sfrlock); }); }
TEST(sfrlock, contended_writes_sanity) { unsigned cnt = 0; const unsigned thread_count = 5; sfrlock_t sfrlock; sfrlock_init(&sfrlock); measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { sfrlock_wrlock(&sfrlock); cnt++; sfrlock_wrunlock(&sfrlock); } }); EXPECT_EQ(cnt, thread_count * repeat); }
TEST(cwlock, contended_single_acquirer) { const unsigned thread_count = 30; cwlock_t cwlock; unsigned owners = 0; cwlock_init(&cwlock); measure_time_concurrent(thread_count, [&] (unsigned) { for (unsigned j = repeat; j; j--) { if (cwlock_lock(&cwlock)) { EXPECT_EQ(__sync_fetch_and_add(&owners, 1), 0); EXPECT_EQ(__sync_sub_and_fetch(&owners, 1), 0); cwlock_unlock(&cwlock); } } }); }
TEST(cwlock, contended_release_only_once_done) { const unsigned thread_count = 30; cwlock_t cwlock; unsigned inside = 0; unsigned owners = 0; cwlock_init(&cwlock); measure_time_concurrent(thread_count, [&] (unsigned) { if (cwlock_lock(&cwlock)) { EXPECT_EQ(__sync_fetch_and_add(&owners, 1), 0); /* * Sleep for half a second so that all threads will block before we * increment the inside count and release the other threads. */ usleep(500000); __sync_fetch_and_add(&inside, 1); EXPECT_EQ(__sync_sub_and_fetch(&owners, 1), 0); cwlock_unlock(&cwlock); } EXPECT_EQ(inside, 1); }); }