Пример #1
0
static void
test_reorder (void)
{
  guint n = g_test_perf () ? 1000000 : 100;
  GtkRBTree *tree;
  GtkRBNode *node;
  gint *reorder;
  guint i;
  double elapsed;

  reorder = fisher_yates_shuffle (n);
  tree = create_unsorted_tree (reorder, n);

  g_test_timer_start ();

  _gtk_rbtree_reorder (tree, reorder, n);

  elapsed = g_test_timer_elapsed ();
  if (g_test_perf ())
    g_test_minimized_result (elapsed, "reordering rbtree with %u items: %gsec", n, elapsed);

  _gtk_rbtree_test (tree);

  for (node = _gtk_rbtree_first (tree), i = 0;
       node != NULL;
       node = _gtk_rbtree_next (tree, node), i++)
    {
      g_assert (GTK_RBNODE_GET_HEIGHT (node) == i);
    }
  g_assert (i == n);

  _gtk_rbtree_free (tree);
}
Пример #2
0
void
perf_002(G_GNUC_UNUSED gpointer *fixture, G_GNUC_UNUSED gconstpointer data)
{
	gint log = 1000000;
	g_test_timer_start();
	for (gint i = 0; i < log; ++i) {
		log4g_error("%d log this message", i);
	}
	gdouble e = g_test_timer_elapsed();
	g_test_minimized_result(e, "logged messages, rate=%d/second",
			(gint)(log / e));
}
Пример #3
0
void
perf_003(G_GNUC_UNUSED gpointer *fixture, G_GNUC_UNUSED gconstpointer data)
{
	gint log = 1000000;
	FILE *file = fopen("tests/file.txt", "w");
	g_test_timer_start();
	for (gint i = 0; i < log; ++i) {
		fprintf(file, "%d log this message\n", i);
	}
	gdouble e = g_test_timer_elapsed();
	g_test_minimized_result(e, "logged messages, rate=%d/second",
			(gint)(log / e));
}
Пример #4
0
static void
test_immediate_performance_n_tasks(TestFixture *fixture,
                                   const void  *data,
                                   int          n_tasks)
{
    int i, j;

    if (!g_test_perf())
        return;

    /* this has to be set up front of there's a race in using it to
     * decide to quit mainloop, because task runner starts running
     * tasks right away, doesn't wait for our local mainloop
     */
    fixture->tasks_started_count = n_tasks;

    /* start here, to include task creation. Also, immediates can start
     * running right away, before we block in main loop.
     */
    g_test_timer_start();

    for (i = 0; i < n_tasks; ++i) {
        HrtTask *task;

        task =
            hrt_task_runner_create_task(fixture->runner);

#define NUM_IMMEDIATES 4
        for (j = 0; j < NUM_IMMEDIATES; ++j) {
            hrt_task_add_immediate(task,
                                   on_immediate_for_performance_many_tasks,
                                   fixture,
                                   on_dnotify_bump_count);
        }
    }

    g_main_loop_run(fixture->loop);

    g_test_minimized_result(g_test_timer_elapsed(),
                            "Run %d tasks with %d immediates each",
                            n_tasks, NUM_IMMEDIATES);

    g_assert_cmpint(fixture->tasks_completed_count, ==, n_tasks);
    g_assert_cmpint(fixture->tasks_completed_count, ==,
                    fixture->tasks_started_count);
    g_assert_cmpint(fixture->dnotify_count, ==, NUM_IMMEDIATES * n_tasks);

#undef NUM_IMMEDIATES
}
Пример #5
0
static void
test_immediate_performance_n_watchers(TestFixture *fixture,
                                      const void  *data,
                                      int          n_watchers)
{
    int i, j;

    if (!g_test_perf())
        return;

    /* this has to be set up front of there's a race in using it to
     * decide to quit mainloop, because task runner starts running
     * tasks right away, doesn't wait for our local mainloop
     */
    fixture->tasks_started_count = NUM_TASKS;

    /* start here, to include task creation. Also, immediates can start
     * running right away, before we block in main loop.
     */
    g_test_timer_start();

    for (i = 0; i < NUM_TASKS; ++i) {
        HrtTask *task;

        task =
            hrt_task_runner_create_task(fixture->runner);

        fixture->tasks[i].task = task;
    }

    /* If we added n_watchers immediates to task 0, then task 1, then 2,
     * etc.  then we'd never use any parallelism because we'd just
     * have one task active at a time using only one thread.  By doing
     * the loop this way we get some use of multiple threads in
     * theory. Also this is more "real world" in that most likely
     * tasks do some work, add an event loop source, do some work,
     * etc. instead of just adding a pile of sources from the
     * same task all at once. This more "real world" scenario is
     * less efficient and slows down the benchmark.
     */
    for (j = 0; j < n_watchers; ++j) {
        for (i = 0; i < NUM_TASKS; ++i) {
            HrtTask *task = fixture->tasks[i].task;
            hrt_task_add_immediate(task,
                                   on_immediate_for_performance_many_watchers,
                                   fixture,
                                   on_dnotify_bump_count);
        }
    }

    g_main_loop_run(fixture->loop);

    g_test_minimized_result(g_test_timer_elapsed(),
                            "Run %d tasks with %d immediates each",
                            NUM_TASKS, n_watchers);

    g_assert_cmpint(fixture->tasks_completed_count, ==, NUM_TASKS);
    g_assert_cmpint(fixture->tasks_completed_count, ==,
                    fixture->tasks_started_count);
    g_assert_cmpint(fixture->dnotify_count, ==, n_watchers * NUM_TASKS);
}