tb_allocator_ref_t tb_default_allocator_init(tb_allocator_ref_t large_allocator) { // check tb_assert_and_check_return_val(large_allocator, tb_null); // done tb_bool_t ok = tb_false; tb_default_allocator_ref_t allocator = tb_null; do { // make allocator allocator = (tb_default_allocator_ref_t)tb_allocator_large_malloc0(large_allocator, sizeof(tb_default_allocator_t), tb_null); tb_assert_and_check_break(allocator); // init base allocator->base.type = TB_ALLOCATOR_TYPE_DEFAULT; allocator->base.flag = TB_ALLOCATOR_FLAG_NONE; allocator->base.malloc = tb_default_allocator_malloc; allocator->base.ralloc = tb_default_allocator_ralloc; allocator->base.free = tb_default_allocator_free; allocator->base.exit = tb_default_allocator_exit; #ifdef __tb_debug__ allocator->base.dump = tb_default_allocator_dump; allocator->base.have = tb_default_allocator_have; #endif // init lock if (!tb_spinlock_init(&allocator->base.lock)) break; // init allocator allocator->large_allocator = large_allocator; allocator->small_allocator = tb_small_allocator_init(large_allocator); tb_assert_and_check_break(allocator->small_allocator); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&allocator->base.lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { if (allocator) tb_default_allocator_exit((tb_allocator_ref_t)allocator); allocator = tb_null; } // ok? return (tb_allocator_ref_t)allocator; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_fixed_pool_ref_t tb_fixed_pool_init_(tb_allocator_ref_t large_allocator, tb_size_t slot_size, tb_size_t item_size, tb_bool_t for_small, tb_fixed_pool_item_init_func_t item_init, tb_fixed_pool_item_exit_func_t item_exit, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_t* pool = tb_null; do { // no allocator? uses the global allocator if (!large_allocator) large_allocator = tb_allocator(); tb_assert_and_check_break(large_allocator); // make pool pool = (tb_fixed_pool_t*)tb_allocator_large_malloc0(large_allocator, sizeof(tb_fixed_pool_t), tb_null); tb_assert_and_check_break(pool); // init pool pool->large_allocator = large_allocator; pool->slot_size = slot_size? slot_size : (tb_page_size() >> 4); pool->item_size = item_size; pool->func_init = item_init; pool->func_exit = item_exit; pool->func_priv = priv; pool->for_small = for_small; tb_assert_and_check_break(pool->slot_size); // init partial slots tb_list_entry_init(&pool->partial_slots, tb_fixed_pool_slot_t, entry, tb_null); // init full slots tb_list_entry_init(&pool->full_slots, tb_fixed_pool_slot_t, entry, tb_null); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (pool) tb_fixed_pool_exit((tb_fixed_pool_ref_t)pool); pool = tb_null; } // ok? return (tb_fixed_pool_ref_t)pool; }