static tb_void_t tb_fixed_pool_slot_exit(tb_fixed_pool_impl_t* impl, tb_fixed_pool_slot_t* slot) { // check tb_assert_and_check_return(impl && impl->large_pool && slot); tb_assert_and_check_return(impl->slot_list && impl->slot_count); // trace tb_trace_d("slot[%lu]: exit: size: %lu", impl->item_size, slot->size); // init the iterator tb_iterator_t iterator = tb_iterator_init_ptr((tb_pointer_t*)impl->slot_list, impl->slot_count); // find the slot from the slot list tb_size_t itor = tb_binary_find_all(&iterator, (tb_cpointer_t)slot); tb_assert_abort(itor != tb_iterator_tail(&iterator) && itor < impl->slot_count && impl->slot_list[itor]); tb_check_return(itor != tb_iterator_tail(&iterator) && itor < impl->slot_count && impl->slot_list[itor]); // remove the slot if (itor + 1 < impl->slot_count) tb_memmov_(impl->slot_list + itor, impl->slot_list + itor + 1, (impl->slot_count - itor - 1) * sizeof(tb_fixed_pool_slot_t*)); // update the slot count impl->slot_count--; // exit slot tb_large_pool_free(impl->large_pool, slot); }
static tb_void_t tb_fixed_pool_slot_exit(tb_fixed_pool_t* pool, tb_fixed_pool_slot_t* slot) { // check tb_assert_and_check_return(pool && pool->large_allocator && slot); tb_assert_and_check_return(pool->slot_list && pool->slot_count); // trace tb_trace_d("slot[%lu]: exit: size: %lu", pool->item_size, slot->size); // make the iterator tb_array_iterator_t array_iterator; tb_iterator_ref_t iterator = tb_iterator_make_for_ptr(&array_iterator, (tb_pointer_t*)pool->slot_list, pool->slot_count); tb_assert(iterator); // find the slot from the slot list tb_size_t itor = tb_binary_find_all(iterator, (tb_cpointer_t)slot); tb_assert(itor != tb_iterator_tail(iterator) && itor < pool->slot_count && pool->slot_list[itor]); tb_check_return(itor != tb_iterator_tail(iterator) && itor < pool->slot_count && pool->slot_list[itor]); // remove the slot if (itor + 1 < pool->slot_count) tb_memmov_(pool->slot_list + itor, pool->slot_list + itor + 1, (pool->slot_count - itor - 1) * sizeof(tb_fixed_pool_slot_t*)); // update the slot count pool->slot_count--; // exit slot tb_allocator_large_free(pool->large_allocator, slot); }
static tb_fixed_pool_slot_t* tb_fixed_pool_slot_init(tb_fixed_pool_impl_t* impl) { // check tb_assert_and_check_return_val(impl && impl->large_pool && impl->slot_size && impl->item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_slot_t* slot = tb_null; do { #ifdef __tb_debug__ // init patch for checking underflow tb_size_t patch = 1; #else tb_size_t patch = 0; #endif // the item space tb_size_t item_space = sizeof(tb_pool_data_head_t) + impl->item_size + patch; // the need space tb_size_t need_space = sizeof(tb_fixed_pool_slot_t) + impl->slot_size * item_space; // make slot tb_size_t real_space = 0; slot = (tb_fixed_pool_slot_t*)tb_large_pool_malloc(impl->large_pool, need_space, &real_space); tb_assert_and_check_break(slot); tb_assert_and_check_break(real_space > sizeof(tb_fixed_pool_slot_t) + item_space); #ifdef __tb_debug__ // remove the debug space size tb_large_pool_diff(impl->large_pool, -(tb_long_t)(impl->slot_size * (TB_POOL_DATA_HEAD_DIFF_SIZE + patch))); #endif // init slot slot->size = real_space; slot->pool = tb_static_fixed_pool_init((tb_byte_t*)&slot[1], real_space - sizeof(tb_fixed_pool_slot_t), impl->item_size, impl->for_small_pool); tb_assert_and_check_break(slot->pool); // no list? if (!impl->slot_list) { // init the slot list tb_size_t size = 0; impl->slot_list = (tb_fixed_pool_slot_t**)tb_large_pool_nalloc(impl->large_pool, 64, sizeof(tb_fixed_pool_slot_t*), &size); tb_assert_and_check_break(impl->slot_list && size); // init the slot count impl->slot_count = 0; // init the slot space impl->slot_space = size / sizeof(tb_fixed_pool_slot_t*); tb_assert_and_check_break(impl->slot_space); } // no enough space? else if (impl->slot_count == impl->slot_space) { // grow the slot list tb_size_t size = 0; impl->slot_list = (tb_fixed_pool_slot_t**)tb_large_pool_ralloc(impl->large_pool, impl->slot_list, (impl->slot_space << 1) * sizeof(tb_fixed_pool_slot_t*), &size); tb_assert_and_check_break(impl->slot_list && size); // update the slot space impl->slot_space = size / sizeof(tb_fixed_pool_slot_t*); tb_assert_and_check_break(impl->slot_space); } // check tb_assert_and_check_break(impl->slot_count < impl->slot_space); // insert the slot to the slot list in the increasing order tb_size_t i = 0; tb_size_t n = impl->slot_count; for (i = 0; i < n; i++) if (slot <= impl->slot_list[i]) break; if (i < n) tb_memmov_(impl->slot_list + i + 1, impl->slot_list + i, (n - i) * sizeof(tb_fixed_pool_slot_t*)); impl->slot_list[i] = slot; // update the slot count impl->slot_count++; // trace tb_trace_d("slot[%lu]: init: size: %lu => %lu, item: %lu => %lu", impl->item_size, need_space, real_space, impl->slot_size, tb_static_fixed_pool_maxn(slot->pool)); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (slot) tb_fixed_pool_slot_exit(impl, slot); slot = tb_null; } // ok? return slot; }
static tb_fixed_pool_slot_t* tb_fixed_pool_slot_init(tb_fixed_pool_t* pool) { // check tb_assert_and_check_return_val(pool && pool->large_allocator && pool->slot_size && pool->item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_slot_t* slot = tb_null; do { #ifdef __tb_debug__ // init patch for checking underflow tb_size_t patch = 1; #else tb_size_t patch = 0; #endif // the item space tb_size_t item_space = sizeof(tb_pool_data_head_t) + pool->item_size + patch; // the need space tb_size_t need_space = sizeof(tb_fixed_pool_slot_t) + pool->slot_size * item_space; // make slot tb_size_t real_space = 0; slot = (tb_fixed_pool_slot_t*)tb_allocator_large_malloc(pool->large_allocator, need_space, &real_space); tb_assert_and_check_break(slot); tb_assert_and_check_break(real_space > sizeof(tb_fixed_pool_slot_t) + item_space); // init slot slot->size = real_space; slot->pool = tb_static_fixed_pool_init((tb_byte_t*)&slot[1], real_space - sizeof(tb_fixed_pool_slot_t), pool->item_size, pool->for_small); tb_assert_and_check_break(slot->pool); // no list? if (!pool->slot_list) { // init the slot list tb_size_t size = 0; pool->slot_list = (tb_fixed_pool_slot_t**)tb_allocator_large_nalloc(pool->large_allocator, 64, sizeof(tb_fixed_pool_slot_t*), &size); tb_assert_and_check_break(pool->slot_list && size); // init the slot count pool->slot_count = 0; // init the slot space pool->slot_space = size / sizeof(tb_fixed_pool_slot_t*); tb_assert_and_check_break(pool->slot_space); } // no enough space? else if (pool->slot_count == pool->slot_space) { // grow the slot list tb_size_t size = 0; pool->slot_list = (tb_fixed_pool_slot_t**)tb_allocator_large_ralloc(pool->large_allocator, pool->slot_list, (pool->slot_space << 1) * sizeof(tb_fixed_pool_slot_t*), &size); tb_assert_and_check_break(pool->slot_list && size); // update the slot space pool->slot_space = size / sizeof(tb_fixed_pool_slot_t*); tb_assert_and_check_break(pool->slot_space); } // check tb_assert_and_check_break(pool->slot_count < pool->slot_space); // insert the slot to the slot list in the increasing order (TODO binary search) tb_size_t i = 0; tb_size_t n = pool->slot_count; for (i = 0; i < n; i++) if (slot <= pool->slot_list[i]) break; if (i < n) tb_memmov_(pool->slot_list + i + 1, pool->slot_list + i, (n - i) * sizeof(tb_fixed_pool_slot_t*)); pool->slot_list[i] = slot; // update the slot count pool->slot_count++; // trace tb_trace_d("slot[%lu]: init: size: %lu => %lu, item: %lu => %lu", pool->item_size, need_space, real_space, pool->slot_size, tb_static_fixed_pool_maxn(slot->pool)); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (slot) tb_fixed_pool_slot_exit(pool, slot); slot = tb_null; } // ok? return slot; }