static tb_bool_t tb_default_allocator_free(tb_allocator_ref_t self, tb_pointer_t data __tb_debug_decl__) { // check tb_default_allocator_ref_t allocator = (tb_default_allocator_ref_t)self; tb_assert_and_check_return_val(allocator, tb_false); // check tb_assert_and_check_return_val(allocator->large_allocator && allocator->small_allocator && data, tb_false); // done tb_bool_t ok = tb_false; do { // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf(data_head->debug.magic == TB_POOL_DATA_MAGIC, "free invalid data: %p", data); // free it ok = (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN)? tb_allocator_free_(allocator->small_allocator, data __tb_debug_args__) : tb_allocator_large_free_(allocator->large_allocator, data __tb_debug_args__); } while (0); // ok? return ok; }
tb_bool_t tb_pool_free_(tb_pool_ref_t pool, tb_pointer_t data __tb_debug_decl__) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // uses allocator? if (impl->allocator) return tb_allocator_free_(impl->allocator, data __tb_debug_args__); // check tb_assert_and_check_return_val(impl->large_pool && impl->small_pool && data, tb_false); // enter tb_spinlock_enter(&impl->lock); // done tb_bool_t ok = tb_false; do { // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "free invalid data: %p", data); // free it ok = (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN)? tb_small_pool_free_(impl->small_pool, data __tb_debug_args__) : tb_large_pool_free_(impl->large_pool, data __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[pool]: [error]: "); // abort tb_abort(); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? return ok; }
static tb_pointer_t tb_default_allocator_ralloc(tb_allocator_ref_t self, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_default_allocator_ref_t allocator = (tb_default_allocator_ref_t)self; tb_assert_and_check_return_val(allocator, tb_null); // check tb_assert_and_check_return_val(allocator && allocator->large_allocator && allocator->small_allocator && size, tb_null); // done tb_pointer_t data_new = tb_null; do { // no data? if (!data) { // malloc it directly data_new = size <= TB_SMALL_ALLOCATOR_DATA_MAXN? tb_allocator_malloc_(allocator->small_allocator, size __tb_debug_args__) : tb_allocator_large_malloc_(allocator->large_allocator, size, tb_null __tb_debug_args__); break; } // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf(data_head->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); tb_assert_and_check_break(data_head->size); // small => small if (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN && size <= TB_SMALL_ALLOCATOR_DATA_MAXN) data_new = tb_allocator_ralloc_(allocator->small_allocator, data, size __tb_debug_args__); // small => large else if (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN) { // make the new data data_new = tb_allocator_large_malloc_(allocator->large_allocator, size, tb_null __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_allocator_free_(allocator->small_allocator, data __tb_debug_args__); } // large => small else if (size <= TB_SMALL_ALLOCATOR_DATA_MAXN) { // make the new data data_new = tb_allocator_malloc_(allocator->small_allocator, size __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_allocator_large_free_(allocator->large_allocator, data __tb_debug_args__); } // large => large else data_new = tb_allocator_large_ralloc_(allocator->large_allocator, data, size, tb_null __tb_debug_args__); } while (0); // ok? return data_new; }