void BenchmarkRunnerThread::run()
{
    set_current_thread_name("benchmarks");

    auto_release_ptr<XMLFileBenchmarkListener> xmlfile_listener(
        create_xmlfile_benchmark_listener());

    const string xmlfile_name = "benchmark." + get_time_stamp_string() + ".xml";
    const bf::path xmlfile_path =
          bf::path(Application::get_tests_root_path())
        / "unit benchmarks"
        / "results"
        / xmlfile_name;

    if (!xmlfile_listener->open(xmlfile_path.string().c_str()))
    {
        emit signal_cannot_create_benchmark_file();
        return;
    }

    BenchmarkResult result;
    result.add_listener(xmlfile_listener.get());

    const bf::path old_current_path =
        Application::change_current_directory_to_tests_root_path();

    BenchmarkSuiteRepository::instance().run(result);

    bf::current_path(old_current_path);

    emit signal_finished();
}
Esempio n. 2
0
bool Benchmark::Exec() {
  bool Success = true;

  Results.reserve(BenchLoops);

  Printer->BenchmarkStart(BenchName);

  for(unsigned I = 0; I < BenchLoops && Success; ++I) {
    Printer->IterationStart(BenchName, I);

    try {
      BenchmarkResult *Result;

      PreSetUp();
      SetUp();
      Result = Run(BenchClass);
      TearDown();
      PostTearDown();

      // A result have been computed.
      if(Result) {
        Results.push_back(Result);
        Success = Result->IsSuccess();

      // Catastrophic error.
      } else
        Success = false;

    } catch(Error ex) {
      Printer->FrameworkError(BenchName, ex.what());
      Success = false;

    } catch(cl::Error ex) {
      Printer->OpenCLError(BenchName, ex.what());
      Success = false;

    } catch(std::exception ex) {
      Printer->UnknownError(BenchName, ex.what());
      Success = false;
    }

    Printer->IterationEnd(BenchName, I);
  }

  Printer->BenchmarkEnd(BenchName);

  Printer->BenchmarkResults(BenchName, Success, Results);

  return Success;
}
vector<MatrixSlice> MatrixOnlineSchedulingRowwise::getSliceDefinitions(
    const Matrix<float>& result,
    const BenchmarkResult& nodeSet)
{
    return slicer.layout(
        result.rows(),
        result.columns(),
        nodeSet.size() * getWorkQueueSize() * getWorkQueueSize(),
        1);
}
void BenchmarkSuiteRepository::run(
    const IFilter&      filter,
    BenchmarkResult&    result) const
{
    for (size_t i = 0; i < impl->m_suites.size(); ++i)
    {
        BenchmarkSuite& suite = *impl->m_suites[i];

        // Create a benchmark result for this benchmark suite.
        BenchmarkResult suite_result;
        suite_result.add_listeners(result);

        // Run the benchmark suite.
        if (filter.accepts(suite.get_name()))
            suite.run(suite_result);
        else suite.run(filter, suite_result);

        // Merge the benchmark suite result into the final benchmark result.
        result.merge(suite_result);
    }
}
Esempio n. 5
0
void BenchmarkSuite::run(
    const IFilter&      filter,
    BenchmarkResult&    suite_result) const
{
    BenchmarkingThreadContext benchmarking_context;
    bool has_begun_suite = false;

    for (size_t i = 0; i < impl->m_factories.size(); ++i)
    {
        IBenchmarkCaseFactory* factory = impl->m_factories[i];

        // Skip benchmark cases that aren't let through by the filter.
        if (!filter.accepts(factory->get_name()))
            continue;

        if (!has_begun_suite)
        {
            // Tell the listeners that a benchmark suite is about to be executed.
            suite_result.begin_suite(*this);
            suite_result.signal_suite_execution();
            has_begun_suite = true;
        }

        // Instantiate the benchmark case.
        auto_ptr<IBenchmarkCase> benchmark(factory->create());

        // Recreate the stopwatch (and the underlying timer) for every benchmark
        // case, since the CPU frequency will fluctuate quite a bit depending on
        // the CPU load.  We need an up-to-date frequency estimation in order to
        // compute accurate call rates.
        Impl::StopwatchType stopwatch(100000);

        // Tell the listeners that a benchmark case is about to be executed.
        suite_result.begin_case(*this, *benchmark.get());

#ifdef NDEBUG
        try
#endif
        {
            suite_result.signal_case_execution();

            // Estimate benchmarking parameters.
            Impl::BenchmarkParams params;
            Impl::estimate_benchmark_params(
                benchmark.get(),
                stopwatch,
                params);

            // Measure the overhead of calling IBenchmarkCase::run().
            const double overhead =
                Impl::measure_call_overhead(stopwatch, params);

            // Run the benchmark case.
            const double execution_time =
                Impl::measure_iteration_runtime(
                    benchmark.get(),
                    stopwatch,
                    params);

            // Gather the timing results.
            TimingResult timing_result;
            timing_result.m_iteration_count = params.m_iteration_count;
            timing_result.m_measurement_count = params.m_measurement_count;
            timing_result.m_frequency = static_cast<double>(stopwatch.get_timer().frequency());
            timing_result.m_ticks = execution_time > overhead ? execution_time - overhead : 0.0;

            // Post the timing result.
            suite_result.write(
                *this,
                *benchmark.get(),
                __FILE__,
                __LINE__,
                timing_result);
        }
#ifdef NDEBUG
        catch (const exception& e)
        {
            suite_result.write(
                *this,
                *benchmark.get(),
                __FILE__,
                __LINE__,
                "an unexpected exception was caught: %s.",
                e.what());

            suite_result.signal_case_failure();
        }
        catch (...)
        {
            suite_result.write(
                *this,
                *benchmark.get(),
                __FILE__,
                __LINE__,
                "an unexpected exception was caught (no details available).");

            suite_result.signal_case_failure();
        }
#endif

        // Tell the listeners that the benchmark case execution has ended.
        suite_result.end_case(*this, *benchmark.get());
    }

    if (has_begun_suite)
    {
        // Report a benchmark suite failure if one or more benchmark cases failed.
        if (suite_result.get_case_failure_count() > 0)
            suite_result.signal_suite_failure();

        // Tell the listeners that the benchmark suite execution has ended.
        suite_result.end_suite(*this);
    }
}
Esempio n. 6
0
void BenchmarkSuite::run(
    const IFilter&      filter,
    BenchmarkResult&    suite_result) const
{
    BenchmarkingThreadContext benchmarking_context;
    bool has_begun_suite = false;

    for (size_t i = 0; i < impl->m_factories.size(); ++i)
    {
        IBenchmarkCaseFactory* factory = impl->m_factories[i];

        // Skip benchmark cases that aren't let through by the filter.
        if (!filter.accepts(factory->get_name()))
            continue;

        if (!has_begun_suite)
        {
            // Tell the listeners that a benchmark suite is about to be executed.
            suite_result.begin_suite(*this);
            suite_result.signal_suite_execution();
            has_begun_suite = true;
        }

        // Instantiate the benchmark case.
        unique_ptr<IBenchmarkCase> benchmark(factory->create());

        // Recreate the stopwatch (and the underlying timer) for every benchmark
        // case, since the CPU frequency will fluctuate quite a bit depending on
        // the CPU load.  We need an up-to-date frequency estimation in order to
        // compute accurate call rates.
        Impl::StopwatchType stopwatch(100000);

        // Tell the listeners that a benchmark case is about to be executed.
        suite_result.begin_case(*this, *benchmark.get());

#ifdef NDEBUG
        try
#endif
        {
            suite_result.signal_case_execution();

            // Estimate benchmarking parameters.
            const size_t measurement_count =
                Impl::compute_measurement_count(benchmark.get(), stopwatch);

            // Measure the overhead of calling IBenchmarkCase::run().
            const double overhead_ticks =
                Impl::measure_call_overhead_ticks(stopwatch, measurement_count);

            // Run the benchmark case.
            const double runtime_ticks =
                Impl::measure_runtime(
                    benchmark.get(),
                    stopwatch,
                    BenchmarkSuite::Impl::measure_runtime_ticks,
                    measurement_count);

#ifdef GENERATE_BENCHMARK_PLOTS
            vector<Vector2d> points;

            for (size_t j = 0; j < 100; ++j)
            {
                const double ticks =
                    Impl::measure_runtime(
                        benchmark.get(),
                        stopwatch,
                        BenchmarkSuite::Impl::measure_runtime_ticks,
                        max<size_t>(1, measurement_count / 100));
                points.emplace_back(
                    static_cast<double>(j),
                    ticks > overhead_ticks ? ticks - overhead_ticks : 0.0);
            }

            stringstream sstr;
            sstr << "unit benchmarks/plots/";
            sstr << get_name() << "_" << benchmark->get_name();
            sstr << ".gnuplot";

            GnuplotFile plotfile;
            plotfile.new_plot().set_points(points);
            plotfile.write(sstr.str());
#endif

            // Gather the timing results.
            TimingResult timing_result;
            timing_result.m_iteration_count = 1;
            timing_result.m_measurement_count = measurement_count;
            timing_result.m_frequency = static_cast<double>(stopwatch.get_timer().frequency());
            timing_result.m_ticks = runtime_ticks > overhead_ticks ? runtime_ticks - overhead_ticks : 0.0;

            // Post the timing result.
            suite_result.write(
                *this,
                *benchmark.get(),
                __FILE__,
                __LINE__,
                timing_result);
        }
#ifdef NDEBUG
        catch (const exception& e)
        {
            if (e.what()[0] != '\0')
            {
                suite_result.write(
                    *this,
                    *benchmark.get(),
                    __FILE__,
                    __LINE__,
                    "an unexpected exception was caught: %s",
                    e.what());
            }
            else
            {
                suite_result.write(
                    *this,
                    *benchmark.get(),
                    __FILE__,
                    __LINE__,
                    "an unexpected exception was caught (no details available).");
            }

            suite_result.signal_case_failure();
        }
        catch (...)
        {
            suite_result.write(
                *this,
                *benchmark.get(),
                __FILE__,
                __LINE__,
                "an unexpected exception was caught (no details available).");

            suite_result.signal_case_failure();
        }
#endif

        // Tell the listeners that the benchmark case execution has ended.
        suite_result.end_case(*this, *benchmark.get());
    }

    if (has_begun_suite)
    {
        // Report a benchmark suite failure if one or more benchmark cases failed.
        if (suite_result.get_case_failure_count() > 0)
            suite_result.signal_suite_failure();

        // Tell the listeners that the benchmark suite execution has ended.
        suite_result.end_suite(*this);
    }
}