static void test_hash_speed(const void *opaque) { size_t chunk_size = (size_t)opaque; uint8_t *in = NULL, *out = NULL; size_t out_len = 0; double total = 0.0; struct iovec iov; int ret; in = g_new0(uint8_t, chunk_size); memset(in, g_test_rand_int(), chunk_size); iov.iov_base = (char *)in; iov.iov_len = chunk_size; g_test_timer_start(); do { ret = qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, &iov, 1, &out, &out_len, NULL); g_assert(ret == 0); total += chunk_size; } while (g_test_timer_elapsed() < 5.0); total /= MiB; g_print("sha256: "); g_print("Testing chunk_size %zu bytes ", chunk_size); g_print("done: %.2f MB in %.2f secs: ", total, g_test_timer_last()); g_print("%.2f MB/sec\n", total / g_test_timer_last()); g_free(out); g_free(in); }
void test_creation(GThreadFunc f, sc_int32 count, sc_int thread_count) { int test_count = count / thread_count; g_message("Threads count: %d, Test per thread: %d", thread_count, test_count); tGThreadVector threads; threads.reserve(thread_count); print_storage_statistics(); g_test_timer_start(); for (size_t i = 0; i < thread_count; ++i) { GThread * thread = g_thread_try_new(0, f, GINT_TO_POINTER(test_count), 0); if (thread == 0) continue; threads.push_back(thread); } for (size_t i = 0; i < thread_count; ++i) g_assert(GPOINTER_TO_INT(g_thread_join(threads[i])) == test_count); printf("Time: %lf\n", g_test_timer_elapsed()); print_storage_statistics(); }
static void test_reorder (void) { guint n = g_test_perf () ? 1000000 : 100; GtkRBTree *tree; GtkRBNode *node; gint *reorder; guint i; double elapsed; reorder = fisher_yates_shuffle (n); tree = create_unsorted_tree (reorder, n); g_test_timer_start (); _gtk_rbtree_reorder (tree, reorder, n); elapsed = g_test_timer_elapsed (); if (g_test_perf ()) g_test_minimized_result (elapsed, "reordering rbtree with %u items: %gsec", n, elapsed); _gtk_rbtree_test (tree); for (node = _gtk_rbtree_first (tree), i = 0; node != NULL; node = _gtk_rbtree_next (tree, node), i++) { g_assert (GTK_RBNODE_GET_HEIGHT (node) == i); } g_assert (i == n); _gtk_rbtree_free (tree); }
static void wmem_time_allocators(void) { double simple_time, block_time; g_test_timer_start(); wmem_time_allocator(WMEM_ALLOCATOR_SIMPLE); simple_time = g_test_timer_elapsed(); g_test_timer_start(); wmem_time_allocator(WMEM_ALLOCATOR_BLOCK); block_time = g_test_timer_elapsed(); printf("(simple: %lf; block: %lf) ", simple_time, block_time); g_assert(simple_time > block_time); }
gpointer start_save_threaded(gpointer data) { g_test_timer_start(); sc_memory_save(s_default_ctx); printf("Save time: %lf\n", g_test_timer_elapsed()); return 0; }
void perf_002(G_GNUC_UNUSED gpointer *fixture, G_GNUC_UNUSED gconstpointer data) { gint log = 1000000; g_test_timer_start(); for (gint i = 0; i < log; ++i) { log4g_error("%d log this message", i); } gdouble e = g_test_timer_elapsed(); g_test_minimized_result(e, "logged messages, rate=%d/second", (gint)(log / e)); }
void perf_003(G_GNUC_UNUSED gpointer *fixture, G_GNUC_UNUSED gconstpointer data) { gint log = 1000000; FILE *file = fopen("tests/file.txt", "w"); g_test_timer_start(); for (gint i = 0; i < log; ++i) { fprintf(file, "%d log this message\n", i); } gdouble e = g_test_timer_elapsed(); g_test_minimized_result(e, "logged messages, rate=%d/second", (gint)(log / e)); }
void test_g_test_timer() { double ret_time1, ret_time2; g_test_timer_start(); ret_time1 = g_test_timer_elapsed(); ret_time2 = g_test_timer_last(); if(!(ret_time1 == ret_time2)) { std_log(LOG_FILENAME_LINE, "g_test_timer* didnt work as expected"); assert_failed = 1; } }
static void test_echo (Fixture *f, gconstpointer context G_GNUC_UNUSED) { guint count = 2000; guint sent; guint received = 0; double elapsed; if (g_test_perf ()) count = 100000; add_echo_filter (f); g_test_timer_start (); for (sent = 0; sent < count; sent++) { DBusMessage *m = dbus_message_new_method_call ( dbus_bus_get_unique_name (f->right_conn), "/", "com.example", "Spam"); DBusPendingCall *pc; if (m == NULL) g_error ("OOM"); if (!dbus_connection_send_with_reply (f->left_conn, m, &pc, DBUS_TIMEOUT_INFINITE) || pc == NULL) g_error ("OOM"); if (dbus_pending_call_get_completed (pc)) pc_count (pc, &received); else if (!dbus_pending_call_set_notify (pc, pc_count, &received, NULL)) g_error ("OOM"); dbus_pending_call_unref (pc); dbus_message_unref (m); } while (received < count) g_main_context_iteration (NULL, TRUE); elapsed = g_test_timer_elapsed (); g_test_maximized_result (count / elapsed, "%u messages / %f seconds", count, elapsed); }
static void test_immediate_performance_n_tasks(TestFixture *fixture, const void *data, int n_tasks) { int i, j; if (!g_test_perf()) return; /* this has to be set up front of there's a race in using it to * decide to quit mainloop, because task runner starts running * tasks right away, doesn't wait for our local mainloop */ fixture->tasks_started_count = n_tasks; /* start here, to include task creation. Also, immediates can start * running right away, before we block in main loop. */ g_test_timer_start(); for (i = 0; i < n_tasks; ++i) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); #define NUM_IMMEDIATES 4 for (j = 0; j < NUM_IMMEDIATES; ++j) { hrt_task_add_immediate(task, on_immediate_for_performance_many_tasks, fixture, on_dnotify_bump_count); } } g_main_loop_run(fixture->loop); g_test_minimized_result(g_test_timer_elapsed(), "Run %d tasks with %d immediates each", n_tasks, NUM_IMMEDIATES); g_assert_cmpint(fixture->tasks_completed_count, ==, n_tasks); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, NUM_IMMEDIATES * n_tasks); #undef NUM_IMMEDIATES }
static void magic_uri_performance (void) { gsize i; g_test_timer_start (); for (i = 0; i < 1000; i++) { magic_uri_uri (); magic_uri_idn (); magic_uri_search (); magic_uri_pseudo (); } g_print ("\nTime needed for URI tests: %f ", g_test_timer_elapsed ()); }
static void perf_lifecycle(void) { Coroutine *coroutine; unsigned int i, max; double duration; max = 1000000; g_test_timer_start(); for (i = 0; i < max; i++) { coroutine = qemu_coroutine_create(empty_coroutine); qemu_coroutine_enter(coroutine, NULL); } duration = g_test_timer_elapsed(); g_test_message("Lifecycle %u iterations: %f s\n", max, duration); }
static void perf_yield(void) { unsigned int i, maxcycles; double duration; maxcycles = 100000000; i = maxcycles; Coroutine *coroutine = qemu_coroutine_create(yield_loop); g_test_timer_start(); while (i > 0) { qemu_coroutine_enter(coroutine, &i); } duration = g_test_timer_elapsed(); g_test_message("Yield %u iterations: %f s\n", maxcycles, duration); }
void test_combined_creation() { int thread_count = g_thread_count; int test_count = (g_task_count) / thread_count; g_message("Threads count: %d, Test per thread: %d", thread_count, test_count); tGThreadVector threads; threads.reserve(thread_count); s_default_ctx = sc_memory_initialize(¶ms); print_storage_statistics(); g_test_timer_start(); for (size_t i = 0; i < thread_count; ++i) { GThreadFunc f = create_node_thread; switch(g_random_int() % 3) { case 0: f = create_link_thread; break; case 1: f = create_arc_thread; break; default: break; } GThread * thread = g_thread_try_new(0, f, GINT_TO_POINTER(test_count), 0); if (thread == 0) continue; threads.push_back(thread); } for (size_t i = 0; i < thread_count; ++i) g_assert(GPOINTER_TO_INT(g_thread_join(threads[i])) == test_count); printf("Time: %lf\n", g_test_timer_elapsed()); print_storage_statistics(); sc_memory_shutdown(SC_FALSE); }
static void perform_for (GrindFunc grind_func, const char *str, const char *label) { gsize len; gulong bytes_ground; gdouble time_elapsed; gdouble result; len = strlen (str); bytes_ground = (gulong) len * NUM_ITERATIONS; g_test_timer_start (); grind_func (str, len); time_elapsed = g_test_timer_elapsed (); result = ((gdouble) bytes_ground / time_elapsed) * 1.0e-6; g_test_maximized_result (result, "%-9s %6.1f MB/s", label, result); }
static void perf_nesting(void) { unsigned int i, maxcycles, maxnesting; double duration; maxcycles = 10000; maxnesting = 1000; Coroutine *root; g_test_timer_start(); for (i = 0; i < maxcycles; i++) { NestData nd = { .n_enter = 0, .n_return = 0, .max = maxnesting, }; root = qemu_coroutine_create(nest); qemu_coroutine_enter(root, &nd); } duration = g_test_timer_elapsed(); g_test_message("Nesting %u iterations of %u depth each: %f s\n", maxcycles, maxnesting, duration); } /* * Yield benchmark */ static void coroutine_fn yield_loop(void *opaque) { unsigned int *counter = opaque; while ((*counter) > 0) { (*counter)--; qemu_coroutine_yield(); } }
static void test_immediate_performance_n_watchers(TestFixture *fixture, const void *data, int n_watchers) { int i, j; if (!g_test_perf()) return; /* this has to be set up front of there's a race in using it to * decide to quit mainloop, because task runner starts running * tasks right away, doesn't wait for our local mainloop */ fixture->tasks_started_count = NUM_TASKS; /* start here, to include task creation. Also, immediates can start * running right away, before we block in main loop. */ g_test_timer_start(); for (i = 0; i < NUM_TASKS; ++i) { HrtTask *task; task = hrt_task_runner_create_task(fixture->runner); fixture->tasks[i].task = task; } /* If we added n_watchers immediates to task 0, then task 1, then 2, * etc. then we'd never use any parallelism because we'd just * have one task active at a time using only one thread. By doing * the loop this way we get some use of multiple threads in * theory. Also this is more "real world" in that most likely * tasks do some work, add an event loop source, do some work, * etc. instead of just adding a pile of sources from the * same task all at once. This more "real world" scenario is * less efficient and slows down the benchmark. */ for (j = 0; j < n_watchers; ++j) { for (i = 0; i < NUM_TASKS; ++i) { HrtTask *task = fixture->tasks[i].task; hrt_task_add_immediate(task, on_immediate_for_performance_many_watchers, fixture, on_dnotify_bump_count); } } g_main_loop_run(fixture->loop); g_test_minimized_result(g_test_timer_elapsed(), "Run %d tasks with %d immediates each", NUM_TASKS, n_watchers); g_assert_cmpint(fixture->tasks_completed_count, ==, NUM_TASKS); g_assert_cmpint(fixture->tasks_completed_count, ==, fixture->tasks_started_count); g_assert_cmpint(fixture->dnotify_count, ==, n_watchers * NUM_TASKS); }