Exemplo n.º 1
0
static tb_void_t tb_fixed_pool_slot_exit(tb_fixed_pool_t* pool, tb_fixed_pool_slot_t* slot)
{
    // check
    tb_assert_and_check_return(pool && pool->large_allocator && slot);
    tb_assert_and_check_return(pool->slot_list && pool->slot_count);

    // trace
    tb_trace_d("slot[%lu]: exit: size: %lu", pool->item_size, slot->size);

    // make the iterator
    tb_array_iterator_t array_iterator;
    tb_iterator_ref_t   iterator = tb_iterator_make_for_ptr(&array_iterator, (tb_pointer_t*)pool->slot_list, pool->slot_count);
    tb_assert(iterator);

    // find the slot from the slot list
    tb_size_t itor = tb_binary_find_all(iterator, (tb_cpointer_t)slot);
    tb_assert(itor != tb_iterator_tail(iterator) && itor < pool->slot_count && pool->slot_list[itor]);
    tb_check_return(itor != tb_iterator_tail(iterator) && itor < pool->slot_count && pool->slot_list[itor]);
    
    // remove the slot
    if (itor + 1 < pool->slot_count) tb_memmov_(pool->slot_list + itor, pool->slot_list + itor + 1, (pool->slot_count - itor - 1) * sizeof(tb_fixed_pool_slot_t*));

    // update the slot count
    pool->slot_count--;

    // exit slot
    tb_allocator_large_free(pool->large_allocator, slot);
}
Exemplo n.º 2
0
tb_void_t tb_fixed_pool_exit(tb_fixed_pool_ref_t self)
{
    // check
    tb_fixed_pool_t* pool = (tb_fixed_pool_t*)self;
    tb_assert_and_check_return(pool);

    // clear it
    tb_fixed_pool_clear(self);

    // exit the current slot
    if (pool->current_slot) tb_fixed_pool_slot_exit(pool, pool->current_slot);
    pool->current_slot = tb_null;

    // exit the slot list
    if (pool->slot_list) tb_allocator_large_free(pool->large_allocator, pool->slot_list);
    pool->slot_list = tb_null;
    pool->slot_count = 0;
    pool->slot_space = 0;

    // exit it
    tb_allocator_large_free(pool->large_allocator, pool);
}
Exemplo n.º 3
0
/* //////////////////////////////////////////////////////////////////////////////////////
 * private implementation
 */
static tb_void_t tb_default_allocator_exit(tb_allocator_ref_t self)
{
    // check
    tb_default_allocator_ref_t allocator = (tb_default_allocator_ref_t)self;
    tb_assert_and_check_return(allocator);

    // enter
    tb_spinlock_enter(&allocator->base.lock);

    // exit small allocator
    if (allocator->small_allocator) tb_allocator_exit(allocator->small_allocator);
    allocator->small_allocator = tb_null;

    // leave
    tb_spinlock_leave(&allocator->base.lock);

    // exit lock
    tb_spinlock_exit(&allocator->base.lock);

    // exit allocator
    if (allocator->large_allocator) tb_allocator_large_free(allocator->large_allocator, allocator);
}
Exemplo n.º 4
0
tb_void_t tb_demo_small_allocator_perf()
{
    // done
    tb_allocator_ref_t small_allocator = tb_null;
    tb_allocator_ref_t large_allocator = tb_null;
    do
    {
        // init small allocator
        small_allocator = tb_small_allocator_init(tb_null);
        tb_assert_and_check_break(small_allocator);

        // init large allocator
        large_allocator = tb_large_allocator_init(tb_null, 0);
        tb_assert_and_check_break(large_allocator);

        // make data list
        tb_size_t       maxn = 100000;
        tb_pointer_t*   list = (tb_pointer_t*)tb_allocator_large_nalloc0(large_allocator, maxn, sizeof(tb_pointer_t), tb_null);
        tb_assert_and_check_break(list);

        // done 
        __tb_volatile__ tb_size_t indx = 0;
        __tb_volatile__ tb_hong_t time = tb_mclock();
        __tb_volatile__ tb_size_t rand = 0xbeaf;
        for (indx = 0; indx < maxn; indx++)
        {
            // make data
            list[indx] = tb_allocator_malloc0(small_allocator, (rand & 3071) + 1);
            tb_assert_and_check_break(list[indx]);

            // make rand
            rand = (rand * 10807 + 1) & 0xffffffff;

            // re-make data
            if (!(indx & 31)) 
            {
                list[indx] = tb_allocator_ralloc(small_allocator, list[indx], (rand & 3071) + 1);
                tb_assert_and_check_break(list[indx]);
            }

            // free data
            __tb_volatile__ tb_size_t size = rand & 15;
            if (size > 5 && indx)
            {
                size -= 5;
                while (size--) 
                {
                    // the free index
                    tb_size_t free_indx = rand % indx;

                    // free it
                    if (list[free_indx]) tb_allocator_free(small_allocator, list[free_indx]);
                    list[free_indx] = tb_null;
                }
            }
        }
        time = tb_mclock() - time;

#ifdef __tb_debug__
        // dump small_allocator
        tb_allocator_dump(small_allocator);
#endif

        // trace
        tb_trace_i("time: %lld ms", time);

        // clear small_allocator
        tb_allocator_clear(small_allocator);

        // exit list
        tb_allocator_large_free(large_allocator, list);

    } while (0);

    // exit small allocator
    if (small_allocator) tb_allocator_exit(small_allocator);
    small_allocator = tb_null;

    // exit large allocator
    if (large_allocator) tb_allocator_exit(large_allocator);
    large_allocator = tb_null;
}