void TestSuite::run_suite( const IFilter& filter, ITestListener& test_listener, TestResult& test_suite_result, TestResult& cumulated_result) const { TestResult local_cumulated_result(cumulated_result); local_cumulated_result.merge(test_suite_result); bool has_begun_suite = false; for (size_t i = 0; i < impl->m_factories.size(); ++i) { ITestCaseFactory& factory = *impl->m_factories[i]; // Skip test cases that aren't let through by the filter. if (!filter.accepts(factory.get_name())) continue; if (!has_begun_suite) { // Tell the listener that a test suite is about to be executed. test_listener.begin_suite(*this); test_suite_result.signal_suite_execution(); has_begun_suite = true; } // Tell the listener that a test case is about to be executed. test_listener.begin_case(*this, factory.get_name()); // Instantiate and run the test case. TestResult test_case_result; run_case(factory, test_listener, test_case_result); // Accumulate the test results. test_suite_result.merge(test_case_result); local_cumulated_result.merge(test_case_result); // Tell the listener that the test case execution has ended. test_listener.end_case( *this, factory.get_name(), test_suite_result, test_case_result, local_cumulated_result); } if (has_begun_suite) { // Report a test suite failure if one or more test cases failed. if (test_suite_result.get_case_failure_count() > 0) test_suite_result.signal_suite_failure(); // Tell the listener that the test suite execution has ended. test_listener.end_suite( *this, test_suite_result, cumulated_result); } }
// Run those test suites whose name pass a given filter. void TestSuiteRepository::run( const IFilter& filter, ITestListener& test_listener, TestResult& cumulated_result) const { for (size_t i = 0; i < impl->m_suites.size(); ++i) { TestSuite& test_suite = *impl->m_suites[i]; if (filter.accepts(test_suite.get_name())) test_suite.run(test_listener, cumulated_result); else test_suite.run(filter, test_listener, cumulated_result); } }
void BenchmarkSuiteRepository::run( const IFilter& filter, BenchmarkResult& result) const { for (size_t i = 0; i < impl->m_suites.size(); ++i) { BenchmarkSuite& suite = *impl->m_suites[i]; // Create a benchmark result for this benchmark suite. BenchmarkResult suite_result; suite_result.add_listeners(result); // Run the benchmark suite. if (filter.accepts(suite.get_name())) suite.run(suite_result); else suite.run(filter, suite_result); // Merge the benchmark suite result into the final benchmark result. result.merge(suite_result); } }
void BenchmarkSuite::run( const IFilter& filter, BenchmarkResult& suite_result) const { BenchmarkingThreadContext benchmarking_context; bool has_begun_suite = false; for (size_t i = 0; i < impl->m_factories.size(); ++i) { IBenchmarkCaseFactory* factory = impl->m_factories[i]; // Skip benchmark cases that aren't let through by the filter. if (!filter.accepts(factory->get_name())) continue; if (!has_begun_suite) { // Tell the listeners that a benchmark suite is about to be executed. suite_result.begin_suite(*this); suite_result.signal_suite_execution(); has_begun_suite = true; } // Instantiate the benchmark case. auto_ptr<IBenchmarkCase> benchmark(factory->create()); // Recreate the stopwatch (and the underlying timer) for every benchmark // case, since the CPU frequency will fluctuate quite a bit depending on // the CPU load. We need an up-to-date frequency estimation in order to // compute accurate call rates. Impl::StopwatchType stopwatch(100000); // Tell the listeners that a benchmark case is about to be executed. suite_result.begin_case(*this, *benchmark.get()); #ifdef NDEBUG try #endif { suite_result.signal_case_execution(); // Estimate benchmarking parameters. Impl::BenchmarkParams params; Impl::estimate_benchmark_params( benchmark.get(), stopwatch, params); // Measure the overhead of calling IBenchmarkCase::run(). const double overhead = Impl::measure_call_overhead(stopwatch, params); // Run the benchmark case. const double execution_time = Impl::measure_iteration_runtime( benchmark.get(), stopwatch, params); // Gather the timing results. TimingResult timing_result; timing_result.m_iteration_count = params.m_iteration_count; timing_result.m_measurement_count = params.m_measurement_count; timing_result.m_frequency = static_cast<double>(stopwatch.get_timer().frequency()); timing_result.m_ticks = execution_time > overhead ? execution_time - overhead : 0.0; // Post the timing result. suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, timing_result); } #ifdef NDEBUG catch (const exception& e) { suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, "an unexpected exception was caught: %s.", e.what()); suite_result.signal_case_failure(); } catch (...) { suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, "an unexpected exception was caught (no details available)."); suite_result.signal_case_failure(); } #endif // Tell the listeners that the benchmark case execution has ended. suite_result.end_case(*this, *benchmark.get()); } if (has_begun_suite) { // Report a benchmark suite failure if one or more benchmark cases failed. if (suite_result.get_case_failure_count() > 0) suite_result.signal_suite_failure(); // Tell the listeners that the benchmark suite execution has ended. suite_result.end_suite(*this); } }
void BenchmarkSuite::run( const IFilter& filter, BenchmarkResult& suite_result) const { BenchmarkingThreadContext benchmarking_context; bool has_begun_suite = false; for (size_t i = 0; i < impl->m_factories.size(); ++i) { IBenchmarkCaseFactory* factory = impl->m_factories[i]; // Skip benchmark cases that aren't let through by the filter. if (!filter.accepts(factory->get_name())) continue; if (!has_begun_suite) { // Tell the listeners that a benchmark suite is about to be executed. suite_result.begin_suite(*this); suite_result.signal_suite_execution(); has_begun_suite = true; } // Instantiate the benchmark case. unique_ptr<IBenchmarkCase> benchmark(factory->create()); // Recreate the stopwatch (and the underlying timer) for every benchmark // case, since the CPU frequency will fluctuate quite a bit depending on // the CPU load. We need an up-to-date frequency estimation in order to // compute accurate call rates. Impl::StopwatchType stopwatch(100000); // Tell the listeners that a benchmark case is about to be executed. suite_result.begin_case(*this, *benchmark.get()); #ifdef NDEBUG try #endif { suite_result.signal_case_execution(); // Estimate benchmarking parameters. const size_t measurement_count = Impl::compute_measurement_count(benchmark.get(), stopwatch); // Measure the overhead of calling IBenchmarkCase::run(). const double overhead_ticks = Impl::measure_call_overhead_ticks(stopwatch, measurement_count); // Run the benchmark case. const double runtime_ticks = Impl::measure_runtime( benchmark.get(), stopwatch, BenchmarkSuite::Impl::measure_runtime_ticks, measurement_count); #ifdef GENERATE_BENCHMARK_PLOTS vector<Vector2d> points; for (size_t j = 0; j < 100; ++j) { const double ticks = Impl::measure_runtime( benchmark.get(), stopwatch, BenchmarkSuite::Impl::measure_runtime_ticks, max<size_t>(1, measurement_count / 100)); points.emplace_back( static_cast<double>(j), ticks > overhead_ticks ? ticks - overhead_ticks : 0.0); } stringstream sstr; sstr << "unit benchmarks/plots/"; sstr << get_name() << "_" << benchmark->get_name(); sstr << ".gnuplot"; GnuplotFile plotfile; plotfile.new_plot().set_points(points); plotfile.write(sstr.str()); #endif // Gather the timing results. TimingResult timing_result; timing_result.m_iteration_count = 1; timing_result.m_measurement_count = measurement_count; timing_result.m_frequency = static_cast<double>(stopwatch.get_timer().frequency()); timing_result.m_ticks = runtime_ticks > overhead_ticks ? runtime_ticks - overhead_ticks : 0.0; // Post the timing result. suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, timing_result); } #ifdef NDEBUG catch (const exception& e) { if (e.what()[0] != '\0') { suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, "an unexpected exception was caught: %s", e.what()); } else { suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, "an unexpected exception was caught (no details available)."); } suite_result.signal_case_failure(); } catch (...) { suite_result.write( *this, *benchmark.get(), __FILE__, __LINE__, "an unexpected exception was caught (no details available)."); suite_result.signal_case_failure(); } #endif // Tell the listeners that the benchmark case execution has ended. suite_result.end_case(*this, *benchmark.get()); } if (has_begun_suite) { // Report a benchmark suite failure if one or more benchmark cases failed. if (suite_result.get_case_failure_count() > 0) suite_result.signal_suite_failure(); // Tell the listeners that the benchmark suite execution has ended. suite_result.end_suite(*this); } }