/* the main point of this test is that the immediates run in serial * and each runs exactly once */ static void test_one_task_many_immediates(TestFixture *fixture, const void *data) { HrtTask *task; int i; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks_started_count += 1; /* note that we serialize all immediates for a task so making this * a large number will take forever */ #define NUM_IMMEDIATES 7 for (i = 0; i < NUM_IMMEDIATES; ++i) { hrt_task_add_immediate(task, on_immediate_many_for_one_task, fixture, on_dnotify_bump_count); } g_main_loop_run(fixture->loop); g_assert_cmpint(fixture->tasks_completed_count, ==, 1); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, NUM_IMMEDIATES); /* we should have run each immediate exactly once */ g_assert_cmpint(fixture->tasks[0].immediates_run_count, ==, NUM_IMMEDIATES); }
static gboolean on_server_socket_accepted(HioServer *server, int fd, void *data) { HwfContainer *container = HWF_CONTAINER(data); HrtTask *task; GValue value = { 0, }; hrt_debug("Creating connection for accepted socket %d", fd); task = hrt_task_runner_create_task(container->runner); g_value_init(&value, HJS_TYPE_RUNTIME_SPIDERMONKEY); g_value_set_object(&value, container->runtime); hrt_task_add_arg(task, "runtime", &value); g_value_unset(&value); hio_connection_process_socket(HWF_TYPE_CONNECTION_CONTAINER, task, fd); /* task will be ref'd by the watchers added by hwf_connection_process_socket() */ g_object_unref(task); return TRUE; }
static void test_immediate_performance_n_tasks(TestFixture *fixture, const void *data, int n_tasks) { int i, j; if (!g_test_perf()) return; /* this has to be set up front of there's a race in using it to * decide to quit mainloop, because task runner starts running * tasks right away, doesn't wait for our local mainloop */ fixture->tasks_started_count = n_tasks; /* start here, to include task creation. Also, immediates can start * running right away, before we block in main loop. */ g_test_timer_start(); for (i = 0; i < n_tasks; ++i) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); #define NUM_IMMEDIATES 4 for (j = 0; j < NUM_IMMEDIATES; ++j) { hrt_task_add_immediate(task, on_immediate_for_performance_many_tasks, fixture, on_dnotify_bump_count); } } g_main_loop_run(fixture->loop); g_test_minimized_result(g_test_timer_elapsed(), "Run %d tasks with %d immediates each", n_tasks, NUM_IMMEDIATES); g_assert_cmpint(fixture->tasks_completed_count, ==, n_tasks); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, NUM_IMMEDIATES * n_tasks); #undef NUM_IMMEDIATES }
static void test_immediate_block_completion(TestFixture *fixture, const void *data) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks[0].task = task; fixture->tasks_started_count += 1; if (fixture->completion_should_be_blocked) { hrt_task_block_completion(task); } fixture->tasks[0].watcher = hrt_task_add_immediate(task, on_immediate_for_block_completion_test, fixture, on_dnotify_bump_count); /* When the task is completed, the main loop quits. From the * task, we add a timeout to the main loop. If the main loop quits * before the timeout runs, then we completed without waiting to * unblock. */ g_main_loop_run(fixture->loop); g_source_remove(fixture->completion_check_timeout_id); g_assert_cmpint(fixture->tasks_completed_count, ==, 1); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, 1); g_assert_cmpint(fixture->tasks[0].immediates_run_count, ==, 1); if (fixture->completion_should_be_blocked) { g_assert(fixture->completion_check_timeout_ran); } else { g_assert(!fixture->completion_check_timeout_ran); } }
static void test_immediate_runs_several_times(TestFixture *fixture, const void *data) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks_started_count += 1; hrt_task_add_immediate(task, on_immediate_runs_several_times, fixture, on_dnotify_bump_count); g_main_loop_run(fixture->loop); g_assert_cmpint(fixture->tasks_completed_count, ==, 1); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, 1); g_assert_cmpint(fixture->times_run, ==, SEVERAL_TIMES); }
static void test_immediate_that_sleeps_return_false(TestFixture *fixture, const void *data) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks_started_count += 1; hrt_task_add_immediate(task, on_immediate_sleep_return_false, fixture, on_dnotify_bump_count); g_main_loop_run(fixture->loop); g_assert_cmpint(fixture->tasks_completed_count, ==, 1); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, 1); g_assert_cmpint(fixture->tasks[0].immediates_run_count, ==, 1); }
static void test_immediate_performance_n_watchers(TestFixture *fixture, const void *data, int n_watchers) { int i, j; if (!g_test_perf()) return; /* this has to be set up front of there's a race in using it to * decide to quit mainloop, because task runner starts running * tasks right away, doesn't wait for our local mainloop */ fixture->tasks_started_count = NUM_TASKS; /* start here, to include task creation. Also, immediates can start * running right away, before we block in main loop. */ g_test_timer_start(); for (i = 0; i < NUM_TASKS; ++i) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks[i].task = task; } /* If we added n_watchers immediates to task 0, then task 1, then 2, * etc. then we'd never use any parallelism because we'd just * have one task active at a time using only one thread. By doing * the loop this way we get some use of multiple threads in * theory. Also this is more "real world" in that most likely * tasks do some work, add an event loop source, do some work, * etc. instead of just adding a pile of sources from the * same task all at once. This more "real world" scenario is * less efficient and slows down the benchmark. */ for (j = 0; j < n_watchers; ++j) { for (i = 0; i < NUM_TASKS; ++i) { HrtTask *task = fixture->tasks[i].task; hrt_task_add_immediate(task, on_immediate_for_performance_many_watchers, fixture, on_dnotify_bump_count); } } g_main_loop_run(fixture->loop); g_test_minimized_result(g_test_timer_elapsed(), "Run %d tasks with %d immediates each", NUM_TASKS, n_watchers); g_assert_cmpint(fixture->tasks_completed_count, ==, NUM_TASKS); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, n_watchers * NUM_TASKS); }
static void test_many_tasks_many_immediates(TestFixture *fixture, const void *data) { int i, j; gboolean some_overlap; GTimer *timer; GString *overlap_report; /* this has to be set up front of there's a race in using it to * decide to quit mainloop, because task runner starts running * tasks right away, doesn't wait for our local mainloop */ fixture->tasks_started_count = NUM_TASKS; for (i = 0; i < NUM_TASKS; ++i) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks[i].task = task; /* note that we serialize all immediates for a task so making this * a large number will take forever */ #define NUM_IMMEDIATES 7 for (j = 0; j < NUM_IMMEDIATES; ++j) { hrt_task_add_immediate(task, on_immediate_for_many_tasks, fixture, on_dnotify_bump_count); } } timer = g_timer_new(); g_main_loop_run(fixture->loop); /* we don't want an assertion based on timing, will fail too often, * but print it for manual checking sometimes. */ g_test_message("%g seconds elapsed to run lots of tasks that should have each taken 0.7 seconds\n", g_timer_elapsed(timer, NULL)); g_timer_destroy(timer); g_assert_cmpint(fixture->tasks_completed_count, ==, NUM_TASKS); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, NUM_IMMEDIATES * NUM_TASKS); /* we should have run each immediate exactly once */ for (i = 0; i < NUM_TASKS; ++i) { g_assert(fixture->tasks[i].immediates_run_count == NUM_IMMEDIATES); } /* unfortunately this isn't strictly guaranteed, but it should be * nearly certain. If it keeps failing, increase number of immediates * and number of tasks. */ some_overlap = FALSE; for (i = 0; i < NUM_TASKS; ++i) { if (fixture->tasks[i].saw_another_immediate_in_an_immediate_count > 0) some_overlap = TRUE; } g_assert(some_overlap); overlap_report = g_string_new(NULL); for (i = 0; i < NUM_TASKS; ++i) { g_string_append_printf(overlap_report, " %d", fixture->tasks[i].saw_another_immediate_in_an_immediate_count); } g_test_message("# of immediates of %d run during at least one other task's immediate:\n %s\n", NUM_IMMEDIATES, overlap_report->str); g_string_free(overlap_report, TRUE); #undef NUM_IMMEDIATES }
static void setup(OutputTestFixture *fixture, const void *data, TaskScenario task_scenario, gboolean with_chain) { if (with_chain) { fixture->stream_descs = &various_stream_descs[0]; fixture->n_stream_descs = G_N_ELEMENTS(various_stream_descs); } else { fixture->stream_descs = data; fixture->n_stream_descs = 1; } fixture->loop = g_main_loop_new(NULL, FALSE); fixture->runner = g_object_new(HRT_TYPE_TASK_RUNNER, "event-loop-type", HRT_EVENT_LOOP_EV, NULL); g_signal_connect(G_OBJECT(fixture->runner), "tasks-completed", G_CALLBACK(on_tasks_completed), fixture); switch (task_scenario) { case TASK_SCENARIO_ALL_ONE: { HrtTask *task; int i; fixture->tasks_started_count = 1; task = hrt_task_runner_create_task(fixture->runner); for (i = 0; i < N_STREAMS_IN_CHAIN; ++i) { fixture->write_tasks[i] = g_object_ref(task); fixture->stream_tasks[i] = g_object_ref(task); } if (with_chain) { fixture->chain_task = task; } else { g_object_unref(task); } } break; case TASK_SCENARIO_THREE: { HrtTask *task; int i; /* with no chain there's no chain task so really two ;-) */ if (with_chain) { task = hrt_task_runner_create_task(fixture->runner); fixture->chain_task = task; fixture->tasks_started_count = 3; } else { fixture->tasks_started_count = 2; } task = hrt_task_runner_create_task(fixture->runner); for (i = 0; i < N_STREAMS_IN_CHAIN; ++i) { fixture->write_tasks[i] = g_object_ref(task); } g_object_unref(task); task = hrt_task_runner_create_task(fixture->runner); for (i = 0; i < N_STREAMS_IN_CHAIN; ++i) { fixture->stream_tasks[i] = g_object_ref(task); } g_object_unref(task); } break; case TASK_SCENARIO_ALL_DISTINCT: { int i; if (with_chain) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->chain_task = task; fixture->tasks_started_count = 1 + N_STREAMS_IN_CHAIN * 2; } else { fixture->tasks_started_count = N_STREAMS_IN_CHAIN * 2; } for (i = 0; i < N_STREAMS_IN_CHAIN; ++i) { fixture->write_tasks[i] = hrt_task_runner_create_task(fixture->runner); fixture->stream_tasks[i] = hrt_task_runner_create_task(fixture->runner); } } break; } if (with_chain) { fixture->chain = hio_output_chain_new(fixture->chain_task); } create_socketpair(&fixture->read_fd, &fixture->write_fd); }