static void BM_lookupEventTag_NOT(benchmark::State& state) { prechargeEventMap(); while (set.find(notTag) != set.end()) { ++notTag; if (notTag >= USHRT_MAX) notTag = 1; } while (state.KeepRunning()) { size_t len; android_lookupEventTag_len(map, &len, notTag); } ++notTag; if (notTag >= USHRT_MAX) notTag = 1; }
static void BM_AdeptJacobianReverse(benchmark::State &state) { while (state.KeepRunning()) { double jac[9]; adept::adouble vector[3]; adept::set_values(vector, 3, g_vector); adept::adouble bivector[3]; adept::set_values(bivector, 3, g_bivector); g_stack.new_recording(); adept::adouble vec_ip_biv[3] = {0.0, 0.0, 0.0}; InnerProductVectorBivector(vector, bivector, vec_ip_biv); g_stack.independent(vector, 3); g_stack.dependent(vec_ip_biv, 3); g_stack.jacobian_reverse(jac); } }
static void BM_lexer(benchmark::State& state) { while (state.KeepRunning()) { std::ifstream fin("benchmarks/test.sexp"); if (!fin) { throw std::runtime_error("failed to open benchmarks/test.sexp"); } else { sexp::Lexer lexer(fin); while(lexer.getNextToken() != sexp::Lexer::TOKEN_EOF); } } }
static void Iterate_all_files(benchmark::State& state) { std::unique_ptr<TemporaryFile> temp_file(CreateZip()); ZipArchiveHandle handle; void* iteration_cookie; ZipEntry data; ZipString name; while (state.KeepRunning()) { OpenArchive(temp_file->path, &handle); StartIteration(handle, &iteration_cookie); while (Next(iteration_cookie, &data, &name) == 0) { } EndIteration(iteration_cookie); CloseArchive(handle); } }
static void FindEntry_no_match(benchmark::State& state) { // Create a temporary zip archive. std::unique_ptr<TemporaryFile> temp_file(CreateZip()); ZipArchiveHandle handle; ZipEntry data; // In order to walk through all file names in the archive, look for a name // that does not exist in the archive. std::string_view name("thisFileNameDoesNotExist"); // Start the benchmark. while (state.KeepRunning()) { OpenArchive(temp_file->path, &handle); FindEntry(handle, name, &data); CloseArchive(handle); } }
static void DuplicateInputs(benchmark::State& state) { const CScript SCRIPT_PUB{CScript(OP_TRUE)}; const CChainParams& chainparams = Params(); CBlock block{}; CMutableTransaction coinbaseTx{}; CMutableTransaction naughtyTx{}; CBlockIndex* pindexPrev = ::ChainActive().Tip(); assert(pindexPrev != nullptr); block.nBits = GetNextWorkRequired(pindexPrev, &block, chainparams.GetConsensus()); block.nNonce = 0; auto nHeight = pindexPrev->nHeight + 1; // Make a coinbase TX coinbaseTx.vin.resize(1); coinbaseTx.vin[0].prevout.SetNull(); coinbaseTx.vout.resize(1); coinbaseTx.vout[0].scriptPubKey = SCRIPT_PUB; coinbaseTx.vout[0].nValue = GetBlockSubsidy(nHeight, chainparams.GetConsensus()); coinbaseTx.vin[0].scriptSig = CScript() << nHeight << OP_0; naughtyTx.vout.resize(1); naughtyTx.vout[0].nValue = 0; naughtyTx.vout[0].scriptPubKey = SCRIPT_PUB; uint64_t n_inputs = (((MAX_BLOCK_SERIALIZED_SIZE / WITNESS_SCALE_FACTOR) - (CTransaction(coinbaseTx).GetTotalSize() + CTransaction(naughtyTx).GetTotalSize())) / 41) - 100; for (uint64_t x = 0; x < (n_inputs - 1); ++x) { naughtyTx.vin.emplace_back(GetRandHash(), 0, CScript(), 0); } naughtyTx.vin.emplace_back(naughtyTx.vin.back()); block.vtx.push_back(MakeTransactionRef(std::move(coinbaseTx))); block.vtx.push_back(MakeTransactionRef(std::move(naughtyTx))); block.hashMerkleRoot = BlockMerkleRoot(block); while (state.KeepRunning()) { CValidationState cvstate{}; assert(!CheckBlock(block, cvstate, chainparams.GetConsensus(), false, false)); assert(cvstate.GetRejectReason() == "bad-txns-inputs-duplicate"); } }
BENCHMARK_F( Tcp4Fixture, AddEntryToTcp4Target )( benchmark::State& state ) { char buffer[1024]; for(auto _ : state){ if( stumpless_add_entry( target, entry ) <= 0 ) { state.SkipWithError( "could not send an entry to the tcp target" ); } recv_from_handle( accepted, buffer, 1024 ); } state.counters["CallsToAlloc"] = ( double ) tcp4_memory_counter.malloc_count; state.counters["MemoryAllocated"] = ( double ) tcp4_memory_counter.alloc_total; state.counters["CallsToRealloc"] = ( double ) tcp4_memory_counter.realloc_count; state.counters["CallsToFree"] = ( double ) tcp4_memory_counter.free_count; state.counters["MemoryFreed"] = ( double ) tcp4_memory_counter.free_total; }
static void DATABASE_store_append(benchmark::State& state) { // Serialize the example result set into a string. std::string content; auto qd = getExampleQueryData(20, 100); serializeQueryDataJSON(qd, content); size_t k = 0; while (state.KeepRunning()) { setDatabaseValue(kPersistentSettings, "key" + std::to_string(k), content); deleteDatabaseValue(kPersistentSettings, "key" + std::to_string(k)); k++; } // All benchmarks will share a single database handle. for (size_t i = 0; i < k; ++i) { // deleteDatabaseValue(kPersistentSettings, "key" + std::to_string(i)); } }
static void BnBExhaustion(benchmark::State& state) { // Setup std::vector<OutputGroup> utxo_pool; CoinSet selection; CAmount value_ret = 0; CAmount not_input_fees = 0; while (state.KeepRunning()) { // Benchmark CAmount target = make_hard_case(17, utxo_pool); SelectCoinsBnB(utxo_pool, target, 0, selection, value_ret, not_input_fees); // Should exhaust // Cleanup utxo_pool.clear(); selection.clear(); } }
static void BM_WindowsCriticalSection(benchmark::State& state) { if (state.thread_index == 0) { InitializeCriticalSection(&CriticalSection); } while (state.KeepRunning()) { EnterCriticalSection(&CriticalSection); LeaveCriticalSection(&CriticalSection); } if (state.thread_index == 0) { DeleteCriticalSection(&CriticalSection); } }
[[gnu::noinline]] void bm(benchmark::State& state, D d){ std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution< T > dis( std::numeric_limits< T >::min(), std::numeric_limits< T >::max() ); auto m = make_matrix_fn(d, [&dis, &gen](auto, auto){ return dis(gen); }, maker::heap_t()); while(state.KeepRunning()){ auto res = sobel_x< T >(m); benchmark::DoNotOptimize(res); } }
static void DeserializeAndCheckBlockTest(benchmark::State& state) { CDataStream stream((const char*)block_bench::block413567, (const char*)&block_bench::block413567[sizeof(block_bench::block413567)], SER_NETWORK, PROTOCOL_VERSION); char a = '\0'; stream.write(&a, 1); // Prevent compaction const auto chainParams = CreateChainParams(CBaseChainParams::MAIN); while (state.KeepRunning()) { CBlock block; // Note that CBlock caches its checked state, so we need to recreate it here stream >> block; assert(stream.Rewind(sizeof(block_bench::block413567))); CValidationState validationState; assert(CheckBlock(block, validationState, chainParams->GetConsensus())); } }
[[gnu::noinline]] void BM_sobel(benchmark::State& state, D d){ using value_type = InputType; std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution< value_type > dis( std::numeric_limits< value_type >::min(), std::numeric_limits< value_type >::max() ); auto m = make_matrix_fn(d, [&dis, &gen](auto, auto){ return dis(gen); }); while(state.KeepRunning()){ auto res = sobel_x< ResultType >(m); } }
static void BENCH_Dart_count_multi_threaded(benchmark::State& state) { while (state.KeepRunning()) { uint32 nb_darts_2 = 0u; std::vector<uint32> nb_darts_per_thread(cgogn::nb_threads() + 2); for (auto& n : nb_darts_per_thread) n = 0u; nb_darts_2 = 0u; bench_map.parallel_foreach_dart([&nb_darts_per_thread] (cgogn::Dart, uint32 thread_index) { nb_darts_per_thread[thread_index]++; }); for (uint32 n : nb_darts_per_thread) nb_darts_2 += n; cgogn_assert(nb_darts_2 == bench_map.nb_darts()); } }
static void RollingBloom(benchmark::State& state) { CRollingBloomFilter filter(120000, 0.000001); std::vector<unsigned char> data(32); uint32_t count = 0; uint64_t match = 0; while (state.KeepRunning()) { count++; data[0] = count; data[1] = count >> 8; data[2] = count >> 16; data[3] = count >> 24; filter.insert(data); data[0] = count >> 24; data[1] = count >> 16; data[2] = count >> 8; data[3] = count; match += filter.contains(data); } }
static void BM_ALIGNED_NOREMAINDER(benchmark::State& state) { const int n{40}; while (state.KeepRunning()) { // state.PauseTiming(); // il::Array<float> v{n, 0.0f, il::align, 32, 0}; float w[40] __attribute__((align(32, 0))); // state.ResumeTiming(); // float* const w{v.data()}; // __assume(n % 8 == 0); // __assume_aligned(w, 32); //#pragma omp simd aligned(w: 32) il::escape((void*)w); il::clobber(); for (int i = 0; i < 40; ++i) { w[i] = (w[i] / 5.3f) * (w[i] * w[i] + w[i]) - (12.5f / (w[i] + 0.3f)) + (w[i] / (14.3f / (w[i] + 1.4f))) - (w[i] / 23.0f) + (14.8f / (2.4f + w[i])); } il::escape((void*)w); il::clobber(); } }
// This Benchmark tests the CheckQueue with a slightly realistic workload, // where checks all contain a prevector that is indirect 50% of the time // and there is a little bit of work done between calls to Add. static void CCheckQueueSpeedPrevectorJob(benchmark::State& state) { struct PrevectorJob { prevector<PREVECTOR_SIZE, uint8_t> p; PrevectorJob(){ } PrevectorJob(FastRandomContext& insecure_rand){ p.resize(insecure_rand.randrange(PREVECTOR_SIZE*2)); } bool operator()() { return true; } void swap(PrevectorJob& x){p.swap(x.p);}; }; CCheckQueue<PrevectorJob> queue {QUEUE_BATCH_SIZE}; boost::thread_group tg; for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) { tg.create_thread([&]{queue.Thread();}); } while (state.KeepRunning()) { // Make insecure_rand here so that each iteration is identical. FastRandomContext insecure_rand(true); CCheckQueueControl<PrevectorJob> control(&queue); std::vector<std::vector<PrevectorJob>> vBatches(BATCHES); for (auto& vChecks : vBatches) { vChecks.reserve(BATCH_SIZE); for (size_t x = 0; x < BATCH_SIZE; ++x) vChecks.emplace_back(insecure_rand); control.Add(vChecks); } // control waits for completion by RAII, but // it is done explicitly here for clarity control.Wait(); } tg.interrupt_all(); tg.join_all(); }
[[gnu::noinline]] void BM_sobel(benchmark::State& state, std::pair< int, int > d){ using value_type = InputType; std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution< value_type > dis( std::numeric_limits< value_type >::min(), std::numeric_limits< value_type >::max() ); boost::numeric::ublas::matrix< InputType > m(d.first, d.second); for(size_t y = 0; y < m.size2(); ++y){ for(size_t x = 0; x < m.size1(); ++x){ m(x, y) = dis(gen); } } while(state.KeepRunning()){ auto res = uBLAS::sobel_x< ResultType >(m); } }
static void CCheckQueueSpeed(benchmark::State& state) { struct FakeJobNoWork { bool operator()() { return true; } void swap(FakeJobNoWork& x){}; }; CCheckQueue<FakeJobNoWork> queue {QUEUE_BATCH_SIZE}; boost::thread_group tg; for (auto x = 0; x < std::max(MIN_CORES, GetNumCores()); ++x) { tg.create_thread([&]{queue.Thread();}); } while (state.KeepRunning()) { CCheckQueueControl<FakeJobNoWork> control(&queue); // We call Add a number of times to simulate the behavior of adding // a block of transactions at once. std::vector<std::vector<FakeJobNoWork>> vBatches(BATCHES); for (auto& vChecks : vBatches) { vChecks.resize(BATCH_SIZE); } for (auto& vChecks : vBatches) { // We can't make vChecks in the inner loop because we want to measure // the cost of getting the memory to each thread and we might get the same // memory control.Add(vChecks); } // control waits for completion by RAII, but // it is done explicitly here for clarity control.Wait(); } tg.interrupt_all(); tg.join_all(); }
static void BM_SetValue(benchmark::State& state) { std::set<typename Q::Point> data = ConstructRandomSet<typename Q::Point>(state.range_x(),state.range_x()); int64_t cpt=0; while (state.KeepRunning()) { state.PauseTiming(); typename Q::Domain dom(typename Q::Point().diagonal(0), typename Q::Point().diagonal(state.range_x())); Q image( dom ); for(typename std::set<typename Q::Point>::const_iterator it = data.begin(), itend=data.end(); it != itend; ++it) { state.ResumeTiming(); image.setValue( *it , 42); state.PauseTiming(); cpt++; } } // const int64_t items_processed = // static_cast<int64_t>(state.iterations())*state.range_x(); state.SetItemsProcessed(cpt); }
static void BM_MotorSpinPoint(benchmark::State &state) { while (state.KeepRunning()) { MotorSpinPoint(g_motor, g_point, g_point_spin_motor); } }
// Sanity test: this should loop ten times, and // min/max/average should be close to 100ms. static void Sleep100ms(benchmark::State& state) { while (state.KeepRunning()) { MilliSleep(100); } }
static void bench_create(benchmark::State &state) { while (state.KeepRunning()) { std::vector<int> v; (void)v; } }
static void bench_reserve(benchmark::State &state) { while (state.KeepRunning()) { std::vector<int> v; v.reserve(1); } }
static void BM_StringCreation(benchmark::State& state) { while (state.KeepRunning()) std::string empty_string; }
// Define another benchmark static void BM_StringCopy(benchmark::State& state) { std::string x = "hello"; while (state.KeepRunning()) std::string copy(x); }
static void bench_push_back(benchmark::State &state) { while (state.KeepRunning()) { std::vector<int> v; v.push_back(42); } }
static void BM_InnerProductVectorBivector2(benchmark::State &state) { while (state.KeepRunning()) { InnerProductVectorBivector2(g_vector, g_bivector, g_vec_ip_biv); } }
void Problem3_Divisor_List(benchmark::State& state) { Problem3 p; while(state.KeepRunning()) { benchmark::DoNotOptimize(p.divisor_list(600851475143ull)); } }
void storeTest(benchmark::State& state_) { while (state_.KeepRunning()) for (int i = 0; i < state_.range_x(); ++i) store(i, i); }