static tb_void_t tb_static_large_allocator_check_data(tb_static_large_allocator_ref_t allocator, tb_static_large_data_head_t const* data_head) { // check tb_assert_and_check_return(allocator && data_head); // done tb_bool_t ok = tb_false; tb_byte_t const* data = (tb_byte_t const*)&(data_head[1]); do { // the base head tb_pool_data_head_t* base_head = tb_static_large_allocator_data_base(data_head); // check tb_assertf_pass_break(!data_head->bfree, "data have been freed: %p", data); tb_assertf_pass_break(base_head->debug.magic == TB_POOL_DATA_MAGIC, "the invalid data: %p", data); tb_assertf_pass_break(((tb_byte_t*)data)[base_head->size] == TB_POOL_DATA_PATCH, "data underflow"); // ok ok = tb_true; } while (0); // failed? dump it if (!ok) { // dump data tb_pool_data_dump(data, tb_true, "[static_large_allocator]: [error]: "); // abort tb_abort(); } }
static tb_void_t tb_static_fixed_pool_check_data(tb_static_fixed_pool_t* pool, tb_pool_data_empty_head_t const* data_head) { // check tb_assert_and_check_return(pool && data_head); // done tb_bool_t ok = tb_false; tb_byte_t const* data = (tb_byte_t const*)data_head + pool->data_head_size; do { // the index tb_size_t index = ((tb_byte_t*)data_head - pool->data) / pool->item_space; // check tb_assertf_pass_break(!(((tb_byte_t*)data_head - pool->data) % pool->item_space), "the invalid data: %p", data); tb_assertf_pass_break(tb_static_fixed_pool_used_bset(pool->used_info, index), "data have been freed: %p", data); tb_assertf_pass_break(data_head->debug.magic == (pool->for_small? TB_POOL_DATA_MAGIC : TB_POOL_DATA_EMPTY_MAGIC), "the invalid data: %p", data); tb_assertf_pass_break(((tb_byte_t*)data)[pool->item_size] == TB_POOL_DATA_PATCH, "data underflow"); // ok ok = tb_true; } while (0); // failed? dump it if (!ok) { // dump data tb_pool_data_dump(data, tb_true, "[static_fixed_pool]: [error]: "); // abort tb_abort(); } }
tb_bool_t tb_static_fixed_pool_free(tb_static_fixed_pool_ref_t self, tb_pointer_t data __tb_debug_decl__) { // check tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)self; tb_assert_and_check_return_val(pool && pool->item_space, tb_false); // done tb_bool_t ok = tb_false; tb_pool_data_empty_head_t* data_head = (tb_pool_data_empty_head_t*)((tb_byte_t*)data - pool->data_head_size); do { // the index tb_size_t index = ((tb_byte_t*)data_head - pool->data) / pool->item_space; // check tb_assertf_pass_and_check_break((tb_byte_t*)data_head >= pool->data && (tb_byte_t*)data_head + pool->item_space <= pool->tail, "the data: %p not belong to pool: %p", data, pool); tb_assertf_pass_break(!(((tb_byte_t*)data_head - pool->data) % pool->item_space), "free the invalid data: %p", data); tb_assertf_pass_and_check_break(pool->item_count, "double free data: %p", data); tb_assertf_pass_and_check_break(tb_static_fixed_pool_used_bset(pool->used_info, index), "double free data: %p", data); tb_assertf_pass_break(data_head->debug.magic == (pool->for_small? TB_POOL_DATA_MAGIC : TB_POOL_DATA_EMPTY_MAGIC), "the invalid data: %p", data); tb_assertf_pass_break(((tb_byte_t*)data)[pool->item_size] == TB_POOL_DATA_PATCH, "data underflow"); #ifdef __tb_debug__ // check the prev data tb_static_fixed_pool_check_prev(pool, data_head); // check the next data tb_static_fixed_pool_check_next(pool, data_head); // update the total size pool->total_size -= pool->item_size; // update the free count pool->free_count++; #endif // free it tb_static_fixed_pool_used_set0(pool->used_info, index); // predict it if no cache if (!pool->pred_index) tb_static_fixed_pool_cache_pred(pool, index); // size-- pool->item_count--; // ok ok = tb_true; } while (0); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[static_fixed_pool]: [error]: "); // abort tb_abort(); } #endif // ok? return ok; }