tb_size_t tb_backtrace_frames(tb_pointer_t* frames, tb_size_t nframe, tb_size_t nskip) { // note: cannot use assert tb_check_return_val(frames && nframe, 0); // skip some frames? if (nskip) { // init temp frames tb_pointer_t temp[256] = {0}; tb_check_return_val(nframe + nskip < 256, 0); // done backtrace tb_size_t size = backtrace(temp, nframe + nskip); tb_check_return_val(nskip < size, 0); // update nframe nframe = tb_min(nframe, size - nskip); // save to frames tb_memcpy_(frames, temp + nskip, nframe * sizeof(tb_pointer_t)); } // backtrace else nframe = backtrace(frames, nframe); // ok? return nframe; }
static tb_pointer_t tb_default_allocator_ralloc(tb_allocator_ref_t self, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_default_allocator_ref_t allocator = (tb_default_allocator_ref_t)self; tb_assert_and_check_return_val(allocator, tb_null); // check tb_assert_and_check_return_val(allocator && allocator->large_allocator && allocator->small_allocator && size, tb_null); // done tb_pointer_t data_new = tb_null; do { // no data? if (!data) { // malloc it directly data_new = size <= TB_SMALL_ALLOCATOR_DATA_MAXN? tb_allocator_malloc_(allocator->small_allocator, size __tb_debug_args__) : tb_allocator_large_malloc_(allocator->large_allocator, size, tb_null __tb_debug_args__); break; } // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf(data_head->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); tb_assert_and_check_break(data_head->size); // small => small if (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN && size <= TB_SMALL_ALLOCATOR_DATA_MAXN) data_new = tb_allocator_ralloc_(allocator->small_allocator, data, size __tb_debug_args__); // small => large else if (data_head->size <= TB_SMALL_ALLOCATOR_DATA_MAXN) { // make the new data data_new = tb_allocator_large_malloc_(allocator->large_allocator, size, tb_null __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_allocator_free_(allocator->small_allocator, data __tb_debug_args__); } // large => small else if (size <= TB_SMALL_ALLOCATOR_DATA_MAXN) { // make the new data data_new = tb_allocator_malloc_(allocator->small_allocator, size __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_allocator_large_free_(allocator->large_allocator, data __tb_debug_args__); } // large => large else data_new = tb_allocator_large_ralloc_(allocator->large_allocator, data, size, tb_null __tb_debug_args__); } while (0); // ok? return data_new; }
tb_pointer_t tb_pool_ralloc_(tb_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_pool_impl_t* impl = (tb_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_null); // uses allocator? if (impl->allocator) return tb_allocator_ralloc_(impl->allocator, data, size __tb_debug_args__); // check tb_assert_and_check_return_val(impl && impl->large_pool && impl->small_pool && size, tb_null); // enter tb_spinlock_enter(&impl->lock); // done tb_pointer_t data_new = tb_null; do { // no data? if (!data) { // malloc it directly data_new = size <= TB_SMALL_POOL_DATA_SIZE_MAXN? tb_small_pool_malloc_(impl->small_pool, size __tb_debug_args__) : tb_large_pool_malloc_(impl->large_pool, size, tb_null __tb_debug_args__); break; } // the data head tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); tb_assert_and_check_break(data_head->size); // small => small if (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN && size <= TB_SMALL_POOL_DATA_SIZE_MAXN) data_new = tb_small_pool_ralloc_(impl->small_pool, data, size __tb_debug_args__); // small => large else if (data_head->size <= TB_SMALL_POOL_DATA_SIZE_MAXN) { // make the new data data_new = tb_large_pool_malloc_(impl->large_pool, size, tb_null __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_small_pool_free_(impl->small_pool, data __tb_debug_args__); } // large => small else if (size <= TB_SMALL_POOL_DATA_SIZE_MAXN) { // make the new data data_new = tb_small_pool_malloc_(impl->small_pool, size __tb_debug_args__); tb_assert_and_check_break(data_new); // copy the old data tb_memcpy_(data_new, data, tb_min(data_head->size, size)); // free the old data tb_large_pool_free_(impl->large_pool, data __tb_debug_args__); } // large => large else data_new = tb_large_pool_ralloc_(impl->large_pool, data, size, tb_null __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data if (data) tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[pool]: [error]: "); // abort tb_abort(); } #endif // leave tb_spinlock_leave(&impl->lock); // ok? return data_new; }
tb_pointer_t tb_small_pool_ralloc_(tb_small_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__) { // check tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool; tb_assert_and_check_return_val(impl && impl->large_pool && data && size, tb_null); tb_assert_and_check_return_val(size <= TB_SMALL_POOL_DATA_SIZE_MAXN, tb_null); // disable small pool for debug #ifdef TB_SMALL_POOL_DISABLE return tb_large_pool_ralloc(impl->large_pool, data, size, tb_null); #endif // done tb_pointer_t data_new = tb_null; do { // the old data head tb_pool_data_head_t* data_head_old = &(((tb_pool_data_head_t*)data)[-1]); tb_assertf_break(data_head_old->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data); // the old fixed pool tb_fixed_pool_ref_t fixed_pool_old = tb_small_pool_find_fixed(impl, data_head_old->size); tb_assert_and_check_break(fixed_pool_old); // the old data space tb_size_t space_old = tb_fixed_pool_item_size(fixed_pool_old); tb_assert_and_check_break(space_old >= data_head_old->size); // check underflow tb_assertf_break(space_old == data_head_old->size || ((tb_byte_t*)data)[data_head_old->size] == TB_POOL_DATA_PATCH, "data underflow"); // the new fixed pool tb_fixed_pool_ref_t fixed_pool_new = tb_small_pool_find_fixed(impl, size); tb_assert_and_check_break(fixed_pool_new); // same space? if (fixed_pool_old == fixed_pool_new) { #ifdef __tb_debug__ // fill the patch bytes if (data_head_old->size > size) tb_memset_((tb_byte_t*)data + size, TB_POOL_DATA_PATCH, data_head_old->size - size); #endif // only update size data_head_old->size = size; // ok data_new = data; break; } // make the new data data_new = tb_fixed_pool_malloc_(fixed_pool_new __tb_debug_args__); tb_assert_and_check_break(data_new); // the new data head tb_pool_data_head_t* data_head_new = &(((tb_pool_data_head_t*)data_new)[-1]); tb_assert_abort(data_head_new->debug.magic == TB_POOL_DATA_MAGIC); #ifdef __tb_debug__ // fill the patch bytes if (data_head_new->size > size) tb_memset_((tb_byte_t*)data_new + size, TB_POOL_DATA_PATCH, data_head_new->size - size); #endif // update size data_head_new->size = size; // copy the old data tb_memcpy_(data_new, data, tb_min(data_head_old->size, size)); // free the old data tb_fixed_pool_free_(fixed_pool_old, data __tb_debug_args__); } while (0); // failed? dump it #ifdef __tb_debug__ if (!data_new) { // trace tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_); // dump data tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[small_pool]: [error]: "); // abort tb_abort(); } #endif // ok return data_new; }