Пример #1
0
tb_void_t tb_static_fixed_pool_clear(tb_static_fixed_pool_ref_t self)
{
    // check 
    tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)self;
    tb_assert_and_check_return(pool);

    // clear used_info
    if (pool->used_info) tb_memset_(pool->used_info, 0, pool->info_size);

    // clear size
    pool->item_count = 0;
   
    // init the predict index
    pool->pred_index = 1;

    // clear info
#ifdef __tb_debug__
    pool->peak_size     = 0;
    pool->total_size    = 0;
    pool->real_size     = 0;
    pool->occupied_size = 0;
    pool->malloc_count  = 0;
    pool->free_count    = 0;
    pool->pred_failed   = 0;
#endif
}
Пример #2
0
tb_void_t tb_static_fixed_pool_exit(tb_static_fixed_pool_ref_t self)
{
    // check 
    tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)self;
    tb_assert_and_check_return(pool);

    // clear it
    tb_static_fixed_pool_clear(self);

    // exit it
    tb_memset_(pool, 0, sizeof(tb_static_fixed_pool_t));
}
Пример #3
0
tb_pointer_t tb_allocator_malloc0_(tb_allocator_ref_t allocator, tb_size_t size __tb_debug_decl__)
{
    // check
    tb_assert_and_check_return_val(allocator, tb_null);

    // malloc it
    tb_pointer_t data = tb_allocator_malloc_(allocator, size __tb_debug_args__);

    // clear it
    if (data) tb_memset_(data, 0, size);

    // ok?
    return data;
}
Пример #4
0
tb_pointer_t tb_fixed_pool_malloc0_(tb_fixed_pool_ref_t pool __tb_debug_decl__)
{
    // check
    tb_fixed_pool_impl_t* impl = (tb_fixed_pool_impl_t*)pool;
    tb_assert_and_check_return_val(impl, tb_null);

    // done
    tb_pointer_t data = tb_fixed_pool_malloc_(pool __tb_debug_args__);
    tb_assert_and_check_return_val(data, tb_null);

    // clear it
    tb_memset_(data, 0, impl->item_size);

    // ok
    return data;
}
Пример #5
0
tb_pointer_t tb_static_pool_malloc0_(tb_static_pool_ref_t pool, tb_size_t size __tb_debug_decl__)
{
    // check
    tb_assert_and_check_return_val(pool && size, tb_null);
    tb_assert_and_check_return_val(size <= TB_POOL_DATA_SIZE_MAXN, tb_null);

    // malloc0 data
    tb_pointer_t data = tb_static_large_pool_malloc(pool, size, tb_null __tb_debug_args__);
    tb_assertf_abort(data, "malloc0(%lu) failed!", size);
    tb_assertf_abort(!(((tb_size_t)data) & (TB_POOL_DATA_ALIGN - 1)), "malloc0(%lu): unaligned data: %p", size, data);

    // clear it
    tb_memset_(data, 0, size);

    // ok
    return data;
}
Пример #6
0
tb_pointer_t tb_small_pool_nalloc0_(tb_small_pool_ref_t pool, tb_size_t item, tb_size_t size __tb_debug_decl__)
{
    // check
    tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool;
    tb_assert_and_check_return_val(impl && impl->large_pool && size, tb_null);
    tb_assert_and_check_return_val(item * size <= TB_SMALL_POOL_DATA_SIZE_MAXN, tb_null);

    // disable small pool for debug
#ifdef TB_SMALL_POOL_DISABLE
    return tb_large_pool_nalloc0(impl->large_pool, item, size, tb_null);
#endif

    // done
    tb_pointer_t data = tb_null;
    do
    {
        // the fixed pool
        tb_fixed_pool_ref_t fixed_pool = tb_small_pool_find_fixed(impl, item * size);
        tb_assert_and_check_break(fixed_pool);

        // done
        data = tb_fixed_pool_malloc0_(fixed_pool __tb_debug_args__);
        tb_assert_and_check_break(data);

        // the data head
        tb_pool_data_head_t* data_head = &(((tb_pool_data_head_t*)data)[-1]);
        tb_assert_abort(data_head->debug.magic == TB_POOL_DATA_MAGIC);

#ifdef __tb_debug__
        // fill the patch bytes
        if (data_head->size > (item * size)) tb_memset_((tb_byte_t*)data + (item * size), TB_POOL_DATA_PATCH, data_head->size - (item * size));
#endif

        // update size
        data_head->size = item * size;
 
    } while (0);

    // check
    tb_assertf_abort(data, "nalloc(%lu, %lu) failed!", item, size);

    // ok?
    return data;
}
Пример #7
0
tb_pointer_t tb_allocator_malloc0_(tb_allocator_ref_t allocator, tb_size_t size __tb_debug_decl__)
{
    // check
    tb_assert_and_check_return_val(allocator, tb_null);

#ifdef __tb_alloc_trace__
    tb_trace_w("tb_allocator_malloc0_(): %lu", size, func_, line_, file_);
#endif
    // malloc it
    tb_pointer_t data = tb_allocator_malloc_(allocator, size __tb_debug_args__);
#ifdef __tb_alloc_trace__
    tb_trace_w("tb_allocator_malloc0_(%p): %lu", data, size, func_, line_, file_);
#endif

    // clear it
    if (data) tb_memset_(data, 0, size);

    // ok?
    return data;
}
Пример #8
0
tb_void_t tb_pool_data_save_backtrace(tb_pool_data_debug_head_t* debug_head, tb_size_t skip_frames)
{ 
    tb_size_t nframe = tb_backtrace_frames(debug_head->backtrace, tb_arrayn(debug_head->backtrace), skip_frames + 2); 
    if (nframe < tb_arrayn(debug_head->backtrace)) tb_memset_(debug_head->backtrace + nframe, 0, (tb_arrayn(debug_head->backtrace) - nframe) * sizeof(tb_cpointer_t)); 
}
Пример #9
0
tb_pointer_t tb_static_fixed_pool_malloc(tb_static_fixed_pool_ref_t self __tb_debug_decl__)
{
    // check 
    tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)self;
    tb_assert_and_check_return_val(pool && pool->item_space, tb_null);

    // done
    tb_pointer_t                data = tb_null;
    tb_pool_data_empty_head_t*  data_head = tb_null;
    do
    {
        // full?
        tb_check_break(pool->item_count < pool->item_maxn);

        // predict it?
        data_head = tb_static_fixed_pool_malloc_pred(pool);

        // find it
        if (!data_head) data_head = tb_static_fixed_pool_malloc_find(pool);
        tb_check_break(data_head);

        // the real data
        data = (tb_byte_t*)data_head + pool->data_head_size;

        // save the real size
        if (pool->for_small) ((tb_pool_data_head_t*)data_head)->size = pool->item_size;

        // count++
        pool->item_count++;

#ifdef __tb_debug__

        // init the debug info
        data_head->debug.magic     = pool->for_small? TB_POOL_DATA_MAGIC : TB_POOL_DATA_EMPTY_MAGIC;
        data_head->debug.file      = file_;
        data_head->debug.func      = func_;
        data_head->debug.line      = (tb_uint16_t)line_;

        // save backtrace
        tb_pool_data_save_backtrace(&data_head->debug, 6);

        // make the dirty data and patch 0xcc for checking underflow
        tb_memset_(data, TB_POOL_DATA_PATCH, pool->item_space - pool->data_head_size);
 
        // update the real size
        pool->real_size     += pool->item_size;

        // update the occupied size
        pool->occupied_size += pool->item_space - TB_POOL_DATA_HEAD_DIFF_SIZE - 1;

        // update the total size
        pool->total_size    += pool->item_size;

        // update the peak size
        if (pool->total_size > pool->peak_size) pool->peak_size = pool->total_size;

        // update the malloc count
        pool->malloc_count++;
        
        // check the prev data
        tb_static_fixed_pool_check_prev(pool, data_head);

        // check the next data
        tb_static_fixed_pool_check_next(pool, data_head);
#endif

    } while (0);

    // check
    tb_assertf(data, "malloc(%lu) failed!", pool->item_size);
    tb_assertf(!(((tb_size_t)data) & (TB_POOL_DATA_ALIGN - 1)), "malloc(%lu): unaligned data: %p", pool->item_size, data);

    // ok?
    return data;
}
Пример #10
0
/* //////////////////////////////////////////////////////////////////////////////////////
 * implementation
 */
tb_static_fixed_pool_ref_t tb_static_fixed_pool_init(tb_byte_t* data, tb_size_t size, tb_size_t item_size, tb_bool_t for_small)
{
    // check
    tb_assert_and_check_return_val(data && size && item_size, tb_null);

    // align data and size
    tb_size_t diff = tb_align((tb_size_t)data, TB_POOL_DATA_ALIGN) - (tb_size_t)data;
    tb_assert_and_check_return_val(size > diff + sizeof(tb_static_fixed_pool_t), tb_null);
    size -= diff;
    data += diff;

    // init pool
    tb_static_fixed_pool_t* pool = (tb_static_fixed_pool_t*)data;
    tb_memset_(pool, 0, sizeof(tb_static_fixed_pool_t));

    // for small allocator?
    pool->for_small = !!for_small;
    pool->data_head_size = for_small? sizeof(tb_pool_data_head_t) : sizeof(tb_pool_data_empty_head_t);
#ifndef __tb_debug__
    // fix data alignment, because sizeof(tb_pool_data_empty_head_t) == 1 now.
    if (!for_small) pool->data_head_size = 0;
#endif
    tb_assert_and_check_return_val(!(pool->data_head_size & (TB_POOL_DATA_ALIGN - 1)), tb_null);

#ifdef __tb_debug__
    // init patch for checking underflow
    tb_size_t patch = 1;
#else
    tb_size_t patch = 0;
#endif

    // init the item space
    pool->item_space = pool->data_head_size + item_size + patch;
    pool->item_space = tb_align(pool->item_space, TB_POOL_DATA_ALIGN);
    tb_assert_and_check_return_val(pool->item_space > pool->data_head_size, tb_null);

    // init the used info
    pool->used_info = (tb_byte_t*)tb_align((tb_size_t)&pool[1], TB_POOL_DATA_ALIGN);
    tb_assert_and_check_return_val(data + size > pool->used_info, tb_null);

    /* init the item maxn
     *
     * used_info + maxn * item_space < left
     * align8(maxn) / 8 + maxn * item_space < left
     * (maxn + 7) / 8 + maxn * item_space < left
     * (maxn / 8) + (7 / 8) + maxn * item_space < left
     * maxn * (1 / 8 + item_space) < left - (7 / 8)
     * maxn < (left - (7 / 8)) / (1 / 8 + item_space)
     * maxn < (left * 8 - 7) / (1 + item_space * 8)
     */
    pool->item_maxn = (((data + size - pool->used_info) << 3) - 7) / (1 + (pool->item_space << 3));
    tb_assert_and_check_return_val(pool->item_maxn, tb_null);

    // init the used info size
    pool->info_size = tb_align(pool->item_maxn, TB_CPU_BITSIZE) >> 3;
    tb_assert_and_check_return_val(pool->info_size && !(pool->info_size & (TB_CPU_BITBYTE - 1)), tb_null);
 
    // clear the used info
    tb_memset_(pool->used_info, 0, pool->info_size);

    // init data
    pool->data = (tb_byte_t*)tb_align((tb_size_t)pool->used_info + pool->info_size, TB_POOL_DATA_ALIGN);
    tb_assert_and_check_return_val(data + size > pool->data, tb_null);
    tb_assert_and_check_return_val(pool->item_maxn * pool->item_space <= (tb_size_t)(data + size - pool->data + 1), tb_null);

    // init data tail
    pool->tail = pool->data + pool->item_maxn * pool->item_space;

    // init the item size
    pool->item_size = item_size;

    // init the item count
    pool->item_count = 0;

    // init the predict index
    pool->pred_index = 1;

    // trace
    tb_trace_d("init: data: %p, size: %lu, head_size: %lu, item_size: %lu, item_maxn: %lu, item_space: %lu", pool->data, size, pool->data - (tb_byte_t*)pool, item_size, pool->item_maxn, pool->item_space);

    // ok
    return (tb_static_fixed_pool_ref_t)pool;
}
Пример #11
0
tb_pointer_t tb_small_pool_ralloc_(tb_small_pool_ref_t pool, tb_pointer_t data, tb_size_t size __tb_debug_decl__)
{
    // check
    tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool;
    tb_assert_and_check_return_val(impl && impl->large_pool && data && size, tb_null);
    tb_assert_and_check_return_val(size <= TB_SMALL_POOL_DATA_SIZE_MAXN, tb_null);

    // disable small pool for debug
#ifdef TB_SMALL_POOL_DISABLE
    return tb_large_pool_ralloc(impl->large_pool, data, size, tb_null);
#endif

    // done
    tb_pointer_t data_new = tb_null;
    do
    {
        // the old data head
        tb_pool_data_head_t* data_head_old = &(((tb_pool_data_head_t*)data)[-1]);
        tb_assertf_break(data_head_old->debug.magic == TB_POOL_DATA_MAGIC, "ralloc invalid data: %p", data);

        // the old fixed pool
        tb_fixed_pool_ref_t fixed_pool_old = tb_small_pool_find_fixed(impl, data_head_old->size);
        tb_assert_and_check_break(fixed_pool_old);

        // the old data space
        tb_size_t space_old = tb_fixed_pool_item_size(fixed_pool_old);
        tb_assert_and_check_break(space_old >= data_head_old->size);

        // check underflow
        tb_assertf_break(space_old == data_head_old->size || ((tb_byte_t*)data)[data_head_old->size] == TB_POOL_DATA_PATCH, "data underflow");

        // the new fixed pool
        tb_fixed_pool_ref_t fixed_pool_new = tb_small_pool_find_fixed(impl, size);
        tb_assert_and_check_break(fixed_pool_new);

        // same space?
        if (fixed_pool_old == fixed_pool_new) 
        {
#ifdef __tb_debug__
            // fill the patch bytes
            if (data_head_old->size > size) tb_memset_((tb_byte_t*)data + size, TB_POOL_DATA_PATCH, data_head_old->size - size);
#endif
            // only update size
            data_head_old->size = size;

            // ok
            data_new = data;
            break;
        }

        // make the new data
        data_new = tb_fixed_pool_malloc_(fixed_pool_new __tb_debug_args__);
        tb_assert_and_check_break(data_new);

        // the new data head
        tb_pool_data_head_t* data_head_new = &(((tb_pool_data_head_t*)data_new)[-1]);
        tb_assert_abort(data_head_new->debug.magic == TB_POOL_DATA_MAGIC);

#ifdef __tb_debug__
        // fill the patch bytes
        if (data_head_new->size > size) tb_memset_((tb_byte_t*)data_new + size, TB_POOL_DATA_PATCH, data_head_new->size - size);
#endif

        // update size
        data_head_new->size = size;

        // copy the old data
        tb_memcpy_(data_new, data, tb_min(data_head_old->size, size));

        // free the old data
        tb_fixed_pool_free_(fixed_pool_old, data __tb_debug_args__);

    } while (0);

    // failed? dump it
#ifdef __tb_debug__
    if (!data_new) 
    {
        // trace
        tb_trace_e("ralloc(%p, %lu) failed! at %s(): %lu, %s", data, size, func_, line_, file_);

        // dump data
        tb_pool_data_dump((tb_byte_t const*)data, tb_true, "[small_pool]: [error]: ");

        // abort
        tb_abort();
    }
#endif

    // ok
    return data_new;
}