tb_pool_ref_t tb_pool_init(tb_allocator_ref_t allocator, tb_large_pool_ref_t large_pool) { // done tb_bool_t ok = tb_false; tb_pool_impl_t* impl = tb_null; do { // uses allocator? if (allocator) { // make pool impl = (tb_pool_impl_t*)tb_allocator_malloc0(allocator, sizeof(tb_pool_impl_t)); tb_assert_and_check_break(impl); // save allocator impl->allocator = allocator; // ok ok = tb_true; break; } // using the default large pool if (!large_pool) large_pool = tb_large_pool(); tb_assert_and_check_break(large_pool); // make pool impl = (tb_pool_impl_t*)tb_large_pool_malloc0(large_pool, sizeof(tb_pool_impl_t), tb_null); tb_assert_and_check_break(impl); // init lock if (!tb_spinlock_init(&impl->lock)) break; // init pool impl->large_pool = large_pool; impl->small_pool = tb_small_pool_init(large_pool); tb_assert_and_check_break(impl->small_pool); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { if (impl) tb_pool_exit((tb_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_pool_ref_t)impl; }
tb_void_t tb_demo_small_allocator_perf() { // done tb_allocator_ref_t small_allocator = tb_null; tb_allocator_ref_t large_allocator = tb_null; do { // init small allocator small_allocator = tb_small_allocator_init(tb_null); tb_assert_and_check_break(small_allocator); // init large allocator large_allocator = tb_large_allocator_init(tb_null, 0); tb_assert_and_check_break(large_allocator); // make data list tb_size_t maxn = 100000; tb_pointer_t* list = (tb_pointer_t*)tb_allocator_large_nalloc0(large_allocator, maxn, sizeof(tb_pointer_t), tb_null); tb_assert_and_check_break(list); // done __tb_volatile__ tb_size_t indx = 0; __tb_volatile__ tb_hong_t time = tb_mclock(); __tb_volatile__ tb_size_t rand = 0xbeaf; for (indx = 0; indx < maxn; indx++) { // make data list[indx] = tb_allocator_malloc0(small_allocator, (rand & 3071) + 1); tb_assert_and_check_break(list[indx]); // make rand rand = (rand * 10807 + 1) & 0xffffffff; // re-make data if (!(indx & 31)) { list[indx] = tb_allocator_ralloc(small_allocator, list[indx], (rand & 3071) + 1); tb_assert_and_check_break(list[indx]); } // free data __tb_volatile__ tb_size_t size = rand & 15; if (size > 5 && indx) { size -= 5; while (size--) { // the free index tb_size_t free_indx = rand % indx; // free it if (list[free_indx]) tb_allocator_free(small_allocator, list[free_indx]); list[free_indx] = tb_null; } } } time = tb_mclock() - time; #ifdef __tb_debug__ // dump small_allocator tb_allocator_dump(small_allocator); #endif // trace tb_trace_i("time: %lld ms", time); // clear small_allocator tb_allocator_clear(small_allocator); // exit list tb_allocator_large_free(large_allocator, list); } while (0); // exit small allocator if (small_allocator) tb_allocator_exit(small_allocator); small_allocator = tb_null; // exit large allocator if (large_allocator) tb_allocator_exit(large_allocator); large_allocator = tb_null; }