tb_pointer_t tb_memmov(tb_pointer_t s1, tb_cpointer_t s2, tb_size_t n) { // check #ifdef __tb_debug__ { // overflow dst? tb_size_t n1 = tb_pool_data_size(s1); if (n1 && n > n1) { tb_trace_i("[memmov]: [overflow]: [%p, %lu] => [%p, %lu]", s2, n, s1, n1); tb_backtrace_dump("[memmov]: [overflow]: ", tb_null, 10); tb_pool_data_dump(s1, tb_true, "\t[malloc]: [from]: "); tb_abort(); } // overflow src? tb_size_t n2 = tb_pool_data_size(s2); if (n2 && n > n2) { tb_trace_i("[memmov]: [overflow]: [%p, %lu] => [%p, %lu]", s2, n, s1, n1); tb_backtrace_dump("[memmov]: [overflow]: ", tb_null, 10); tb_pool_data_dump(s2, tb_true, "\t[malloc]: [from]: "); tb_abort(); } } #endif // done return tb_memmov_impl(s1, s2, n); }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_size_t tb_strlen(tb_char_t const* s) { // check #ifdef __tb_debug__ { // overflow? tb_size_t size = tb_pool_data_size(s); if (size) { // no '\0'? tb_size_t real = tb_strnlen(s, size); if (s[real]) { tb_trace_i("[strlen]: [overflow]: [%p, %lu]", s, size); tb_backtrace_dump("[strlen]: [overflow]: ", tb_null, 10); tb_pool_data_dump(s, tb_true, "\t[malloc]: [from]: "); tb_abort(); } } } #endif // done return tb_strlen_impl(s); }
static tb_void_t tb_static_fixed_pool_check_data(tb_static_fixed_pool_t* pool, tb_pool_data_empty_head_t const* data_head) { // check tb_assert_and_check_return(pool && data_head); // done tb_bool_t ok = tb_false; tb_byte_t const* data = (tb_byte_t const*)data_head + pool->data_head_size; do { // the index tb_size_t index = ((tb_byte_t*)data_head - pool->data) / pool->item_space; // check tb_assertf_pass_break(!(((tb_byte_t*)data_head - pool->data) % pool->item_space), "the invalid data: %p", data); tb_assertf_pass_break(tb_static_fixed_pool_used_bset(pool->used_info, index), "data have been freed: %p", data); tb_assertf_pass_break(data_head->debug.magic == (pool->for_small? TB_POOL_DATA_MAGIC : TB_POOL_DATA_EMPTY_MAGIC), "the invalid data: %p", data); tb_assertf_pass_break(((tb_byte_t*)data)[pool->item_size] == TB_POOL_DATA_PATCH, "data underflow"); // ok ok = tb_true; } while (0); // failed? dump it if (!ok) { // dump data tb_pool_data_dump(data, tb_true, "[static_fixed_pool]: [error]: "); // abort tb_abort(); } }
static tb_void_t tb_static_large_allocator_check_data(tb_static_large_allocator_ref_t allocator, tb_static_large_data_head_t const* data_head) { // check tb_assert_and_check_return(allocator && data_head); // done tb_bool_t ok = tb_false; tb_byte_t const* data = (tb_byte_t const*)&(data_head[1]); do { // the base head tb_pool_data_head_t* base_head = tb_static_large_allocator_data_base(data_head); // check tb_assertf_pass_break(!data_head->bfree, "data have been freed: %p", data); tb_assertf_pass_break(base_head->debug.magic == TB_POOL_DATA_MAGIC, "the invalid data: %p", data); tb_assertf_pass_break(((tb_byte_t*)data)[base_head->size] == TB_POOL_DATA_PATCH, "data underflow"); // ok ok = tb_true; } while (0); // failed? dump it if (!ok) { // dump data tb_pool_data_dump(data, tb_true, "[static_large_allocator]: [error]: "); // abort tb_abort(); } }
tb_bool_t tb_static_pool_free_(tb_static_pool_ref_t pool, tb_pointer_t data __tb_debug_decl__) { // check tb_assert_and_check_return_val(pool && data, tb_false); // free data tb_bool_t ok = tb_static_large_pool_free(pool, data __tb_debug_args__); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[static_pool]: [error]: "); // abort tb_abort(); } #endif // ok return ok; }
tb_pointer_t tb_static_pool_ralloc_(tb_static_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_assert_and_check_return_val(pool && data && size, tb_null); tb_assert_and_check_return_val(size <= TB_POOL_DATA_SIZE_MAXN, tb_null); // ralloc data tb_pointer_t data_new = tb_static_large_pool_ralloc(pool, data, size, tb_null __tb_debug_args__); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[static_pool]: [error]: "); // abort tb_abort(); } #endif // check tb_assertf_abort(!(((tb_size_t)data_new) & (TB_POOL_DATA_ALIGN - 1)), "ralloc(%lu): unaligned data: %p", size, data); // ok return data_new; }
tb_bool_t tb_small_pool_free_(tb_small_pool_ref_t pool, tb_pointer_t data __tb_debug_decl__) { // check tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && impl->large_pool && data, tb_false); // disable small pool for debug #ifdef TB_SMALL_POOL_DISABLE return tb_large_pool_free(impl->large_pool, data); #endif // done tb_bool_t ok = tb_false; do { // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "free invalid data: %p", data); // the fixed pool tb_fixed_pool_ref_t fixed_pool = tb_small_pool_find_fixed(impl, data_head->size); tb_assert_and_check_break(fixed_pool); // the data space tb_size_t space = tb_fixed_pool_item_size(fixed_pool); tb_assert_and_check_break(space >= data_head->size); // check underflow tb_assertf_break(space == data_head->size || ((tb_byte_t*)data)[data_head->size] == TB_POOL_DATA_PATCH, "data underflow"); // done ok = tb_fixed_pool_free_(fixed_pool, data __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[small_pool]: [error]: "); // abort tb_abort(); } #endif // ok? return ok; }
tb_bool_t tb_pool_free_(tb_pool_ref_t pool, tb_pointer_t data __tb_debug_decl__) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // uses allocator? if (impl->allocator) return tb_allocator_free_(impl->allocator, data __tb_debug_args__); // check tb_assert_and_check_return_val(impl->large_pool && impl->small_pool && data, tb_false); // enter tb_spinlock_enter(&impl->lock); // done tb_bool_t ok = tb_false; do { // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "free invalid data: %p", data); // free it ok = (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN)? tb_small_pool_free_(impl->small_pool, data __tb_debug_args__) : tb_large_pool_free_(impl->large_pool, data __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[pool]: [error]: "); // abort tb_abort(); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? return ok; }
tb_pointer_t tb_memset_u24(tb_pointer_t s, tb_uint32_t c, tb_size_t n) { // check #ifdef __tb_debug__ { // overflow? tb_size_t size = tb_pool_data_size(s); if (size && (n * 3) > size) { tb_trace_i("[memset_u24]: [overflow]: [%#x x %lu x 3] => [%p, %lu]", c, n, s, size); tb_pool_data_dump(s, tb_true, "\t[malloc]: [from]: "); tb_abort(); } } #endif // done return tb_memset_u24_impl(s, c, n); }
tb_pointer_t tb_memset_u16(tb_pointer_t s, tb_uint16_t c, tb_size_t n) { // check #ifdef __tb_debug__ { // overflow? tb_size_t size = tb_pool_data_size(s); if (size && (n << 1) > size) { tb_trace_i("[memset_u16]: [overflow]: [%#x x %lu x 2] => [%p, %lu]", c, n, s, size); tb_backtrace_dump("[memset_u16]: [overflow]: ", tb_null, 10); tb_pool_data_dump(s, tb_true, "\t[malloc]: [from]: "); tb_abort(); } } #endif // done return tb_memset_u16_impl(s, c, n); }
tb_void_t tb_static_fixed_pool_dump(tb_static_fixed_pool_ref_t self) { // check tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)self; tb_assert_and_check_return(pool && pool->used_info); // dump tb_size_t index = 0; for (index = 0; index < pool->item_maxn; ++index) { // leak? if (tb_static_fixed_pool_used_bset(pool->used_info, index)) { // the data head tb_pool_data_empty_head_t* data_head = (tb_pool_data_empty_head_t*)(pool->data + index * pool->item_space); // check it tb_static_fixed_pool_check_data(pool, data_head); // the data tb_byte_t const* data = (tb_byte_t const*)data_head + pool->data_head_size; // trace tb_trace_e("leak: %p", data); // dump data tb_pool_data_dump(data, tb_false, "[static_fixed_pool]: [error]: "); } } // trace debug info tb_trace_i("[%lu]: peak_size: %lu, wast_rate: %llu/10000, pred_failed: %lu, item_maxn: %lu, free_count: %lu, malloc_count: %lu" , pool->item_size , pool->peak_size , pool->occupied_size? (((tb_hize_t)pool->occupied_size - pool->real_size) * 10000) / (tb_hize_t)pool->occupied_size : 0 , pool->pred_failed , pool->item_maxn , pool->free_count , pool->malloc_count); }
tb_pointer_t tb_allocator_ralloc_(tb_allocator_ref_t allocator, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_assert_and_check_return_val(allocator, tb_null); // enter tb_spinlock_enter(&allocator->lock); // ralloc it tb_pointer_t data_new = tb_null; if (allocator->ralloc) data_new = allocator->ralloc(allocator, data, size __tb_debug_args__); else if (allocator->large_ralloc) data_new = allocator->large_ralloc(allocator, data, size, tb_null __tb_debug_args__); // trace tb_trace_d("ralloc(%p, %lu): %p at %s(): %d, %s", data, size, data_new __tb_debug_args__); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[large_allocator]: [error]: "); // abort tb_abort(); } #endif // check tb_assertf(!(((tb_size_t)data_new) & (TB_POOL_DATA_ALIGN - 1)), "ralloc(%lu): unaligned data: %p", size, data); // leave tb_spinlock_leave(&allocator->lock); // ok? return data_new; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_char_t* tb_strncpy(tb_char_t* s1, tb_char_t const* s2, tb_size_t n) { // check #ifdef __tb_debug__ { // overflow dst? tb_strlen(s2); // strncpy overflow? tb_size_t n1 = tb_pool_data_size(s1); if (n1 && n + 1 > n1) { tb_trace_i("[strncpy]: [overflow]: [%p, %lu] => [%p, %lu]", s2, n, s1, n1); tb_backtrace_dump("[strncpy]: [overflow]: ", tb_null, 10); tb_pool_data_dump(s2, tb_true, "\t[malloc]: [from]: "); tb_abort(); } } #endif // done return tb_strncpy_impl(s1, s2, n); }
tb_bool_t tb_allocator_free_(tb_allocator_ref_t allocator, tb_pointer_t data __tb_debug_decl__) { // check tb_assert_and_check_return_val(allocator, tb_false); // enter tb_spinlock_enter(&allocator->lock); // trace tb_trace_d("free(%p): at %s(): %d, %s", data __tb_debug_args__); // free it tb_bool_t ok = tb_false; if (allocator->free) ok = allocator->free(allocator, data __tb_debug_args__); else if (allocator->large_free) ok = allocator->large_free(allocator, data __tb_debug_args__); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[large_allocator]: [error]: "); // abort tb_abort(); } #endif // leave tb_spinlock_leave(&allocator->lock); // ok? return ok; }
tb_bool_t tb_fixed_pool_free_(tb_fixed_pool_ref_t pool, tb_pointer_t data __tb_debug_decl__) { // check tb_fixed_pool_impl_t* impl = (tb_fixed_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // done tb_bool_t ok = tb_false; do { // check tb_assertf_and_check_break(impl->item_count, "double free data: %p", data); // find the slot tb_fixed_pool_slot_t* slot = tb_fixed_pool_slot_find(impl, data); tb_assertf_and_check_break(slot, "the data: %p not belong to pool: %p", data, pool); tb_assert_and_check_break(slot->pool); // the slot is full? tb_bool_t full = tb_static_fixed_pool_full(slot->pool); // done exit if (impl->func_exit) impl->func_exit(data, impl->func_priv); // free it if (!tb_static_fixed_pool_free(slot->pool, data __tb_debug_args__)) break; // not the current slot? if (slot != impl->current_slot) { // is full? move the slot to the partial slots if (full) { tb_list_entry_remove(&impl->full_slots, &slot->entry); tb_list_entry_insert_tail(&impl->partial_slots, &slot->entry); } // is null? exit the slot else if (tb_static_fixed_pool_null(slot->pool)) { tb_list_entry_remove(&impl->partial_slots, &slot->entry); tb_fixed_pool_slot_exit(impl, slot); } } // update the item count impl->item_count--; // ok ok = tb_true; } while (0); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[fixed_pool]: [error]: "); // abort tb_abort(); } #endif // ok? return ok; }
tb_bool_t tb_static_fixed_pool_free(tb_static_fixed_pool_ref_t self, tb_pointer_t data __tb_debug_decl__) { // check tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)self; tb_assert_and_check_return_val(pool && pool->item_space, tb_false); // done tb_bool_t ok = tb_false; tb_pool_data_empty_head_t* data_head = (tb_pool_data_empty_head_t*)((tb_byte_t*)data - pool->data_head_size); do { // the index tb_size_t index = ((tb_byte_t*)data_head - pool->data) / pool->item_space; // check tb_assertf_pass_and_check_break((tb_byte_t*)data_head >= pool->data && (tb_byte_t*)data_head + pool->item_space <= pool->tail, "the data: %p not belong to pool: %p", data, pool); tb_assertf_pass_break(!(((tb_byte_t*)data_head - pool->data) % pool->item_space), "free the invalid data: %p", data); tb_assertf_pass_and_check_break(pool->item_count, "double free data: %p", data); tb_assertf_pass_and_check_break(tb_static_fixed_pool_used_bset(pool->used_info, index), "double free data: %p", data); tb_assertf_pass_break(data_head->debug.magic == (pool->for_small? TB_POOL_DATA_MAGIC : TB_POOL_DATA_EMPTY_MAGIC), "the invalid data: %p", data); tb_assertf_pass_break(((tb_byte_t*)data)[pool->item_size] == TB_POOL_DATA_PATCH, "data underflow"); #ifdef __tb_debug__ // check the prev data tb_static_fixed_pool_check_prev(pool, data_head); // check the next data tb_static_fixed_pool_check_next(pool, data_head); // update the total size pool->total_size -= pool->item_size; // update the free count pool->free_count++; #endif // free it tb_static_fixed_pool_used_set0(pool->used_info, index); // predict it if no cache if (!pool->pred_index) tb_static_fixed_pool_cache_pred(pool, index); // size-- pool->item_count--; // ok ok = tb_true; } while (0); // failed? dump it #ifdef __tb_debug__ if (!ok) { // trace tb_trace_e("free(%p) failed! at %s(): %lu, %s", data, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[static_fixed_pool]: [error]: "); // abort tb_abort(); } #endif // ok? return ok; }
tb_pointer_t tb_pool_ralloc_(tb_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_null); // uses allocator? if (impl->allocator) return tb_allocator_ralloc_(impl->allocator, data, size __tb_debug_args__); // check tb_assert_and_check_return_val(impl && impl->large_pool && impl->small_pool && size, tb_null); // enter tb_spinlock_enter(&impl->lock); // done tb_pointer_t data_new = tb_null; do { // no data? if (!data) { // malloc it directly data_new = size <= TB_SMALL_POOL_DATA_SIZE_MAXN? tb_small_pool_malloc_(impl->small_pool, size __tb_debug_args__) : tb_large_pool_malloc_(impl->large_pool, size, tb_null __tb_debug_args__); break; } // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); tb_assert_and_check_break(data_head->size); // small => small if (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN && size <= TB_SMALL_POOL_DATA_SIZE_MAXN) data_new = tb_small_pool_ralloc_(impl->small_pool, data, size __tb_debug_args__); // small => large else if (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN) { // make the new data data_new = tb_large_pool_malloc_(impl->large_pool, size, tb_null __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_small_pool_free_(impl->small_pool, data __tb_debug_args__); } // large => small else if (size <= TB_SMALL_POOL_DATA_SIZE_MAXN) { // make the new data data_new = tb_small_pool_malloc_(impl->small_pool, size __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_large_pool_free_(impl->large_pool, data __tb_debug_args__); } // large => large else data_new = tb_large_pool_ralloc_(impl->large_pool, data, size, tb_null __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data if (data) tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[pool]: [error]: "); // abort tb_abort(); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? return data_new; }
tb_pointer_t tb_small_pool_ralloc_(tb_small_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && impl->large_pool && data && size, tb_null); tb_assert_and_check_return_val(size <= TB_SMALL_POOL_DATA_SIZE_MAXN, tb_null); // disable small pool for debug #ifdef TB_SMALL_POOL_DISABLE return tb_large_pool_ralloc(impl->large_pool, data, size, tb_null); #endif // done tb_pointer_t data_new = tb_null; do { // the old data head tb_pool_data_head_t* data_head_old = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head_old->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); // the old fixed pool tb_fixed_pool_ref_t fixed_pool_old = tb_small_pool_find_fixed(impl, data_head_old->size); tb_assert_and_check_break(fixed_pool_old); // the old data space tb_size_t space_old = tb_fixed_pool_item_size(fixed_pool_old); tb_assert_and_check_break(space_old >= data_head_old->size); // check underflow tb_assertf_break(space_old == data_head_old->size || ((tb_byte_t*)data)[data_head_old->size] == TB_POOL_DATA_PATCH, "data underflow"); // the new fixed pool tb_fixed_pool_ref_t fixed_pool_new = tb_small_pool_find_fixed(impl, size); tb_assert_and_check_break(fixed_pool_new); // same space? if (fixed_pool_old == fixed_pool_new) { #ifdef __tb_debug__ // fill the patch bytes if (data_head_old->size > size) tb_memset_((tb_byte_t*)data + size, TB_POOL_DATA_PATCH, data_head_old->size - size); #endif // only update size data_head_old->size = size; // ok data_new = data; break; } // make the new data data_new = tb_fixed_pool_malloc_(fixed_pool_new __tb_debug_args__); tb_assert_and_check_break(data_new); // the new data head tb_pool_data_head_t* data_head_new = &(((tb_pool_data_head_t*)data_new)[-1]); tb_assert_abort(data_head_new->debug.magic == TB_POOL_DATA_MAGIC); #ifdef __tb_debug__ // fill the patch bytes if (data_head_new->size > size) tb_memset_((tb_byte_t*)data_new + size, TB_POOL_DATA_PATCH, data_head_new->size - size); #endif // update size data_head_new->size = size; // copy the old data tb_memcpy_(data_new, data, tb_min(data_head_old->size, size)); // free the old data tb_fixed_pool_free_(fixed_pool_old, data __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[small_pool]: [error]: "); // abort tb_abort(); } #endif // ok return data_new; }