tb_transfer_pool_ref_t tb_transfer_pool_init(tb_aicp_ref_t aicp) { // done tb_bool_t ok = tb_false; tb_transfer_pool_impl_t* impl = tb_null; do { // using the default aicp if be null if (!aicp) aicp = tb_aicp(); tb_assert_and_check_break(aicp); // make impl impl = tb_malloc0_type(tb_transfer_pool_impl_t); tb_assert_and_check_break(impl); // init lock if (!tb_spinlock_init(&impl->lock)) break; // init pool impl->aicp = aicp; impl->maxn = tb_aicp_maxn(aicp); impl->state = TB_STATE_OK; tb_assert_and_check_break(impl->maxn); // init idle task list tb_list_entry_init(&impl->idle, tb_transfer_task_t, entry, tb_null); // init work task list tb_list_entry_init(&impl->work, tb_transfer_task_t, entry, tb_null); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_transfer_pool_exit((tb_transfer_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_transfer_pool_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_fixed_pool_ref_t tb_fixed_pool_init_(tb_large_pool_ref_t large_pool, tb_size_t slot_size, tb_size_t item_size, tb_bool_t for_small_pool, tb_fixed_pool_item_init_func_t item_init, tb_fixed_pool_item_exit_func_t item_exit, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_impl_t* impl = tb_null; do { // using the default large pool if (!large_pool) large_pool = tb_large_pool(); tb_assert_and_check_break(large_pool); // make pool impl = (tb_fixed_pool_impl_t*)tb_large_pool_malloc0(large_pool, sizeof(tb_fixed_pool_impl_t), tb_null); tb_assert_and_check_break(impl); // init pool impl->large_pool = large_pool; impl->slot_size = slot_size? slot_size : (tb_page_size() >> 4); impl->item_size = item_size; impl->func_init = item_init; impl->func_exit = item_exit; impl->func_priv = priv; impl->for_small_pool = for_small_pool; tb_assert_and_check_break(impl->slot_size); // init partial slots tb_list_entry_init(&impl->partial_slots, tb_fixed_pool_slot_t, entry, tb_null); // init full slots tb_list_entry_init(&impl->full_slots, tb_fixed_pool_slot_t, entry, tb_null); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_fixed_pool_exit((tb_fixed_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_fixed_pool_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_fixed_pool_ref_t tb_fixed_pool_init_(tb_allocator_ref_t large_allocator, tb_size_t slot_size, tb_size_t item_size, tb_bool_t for_small, tb_fixed_pool_item_init_func_t item_init, tb_fixed_pool_item_exit_func_t item_exit, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_t* pool = tb_null; do { // no allocator? uses the global allocator if (!large_allocator) large_allocator = tb_allocator(); tb_assert_and_check_break(large_allocator); // make pool pool = (tb_fixed_pool_t*)tb_allocator_large_malloc0(large_allocator, sizeof(tb_fixed_pool_t), tb_null); tb_assert_and_check_break(pool); // init pool pool->large_allocator = large_allocator; pool->slot_size = slot_size? slot_size : (tb_page_size() >> 4); pool->item_size = item_size; pool->func_init = item_init; pool->func_exit = item_exit; pool->func_priv = priv; pool->for_small = for_small; tb_assert_and_check_break(pool->slot_size); // init partial slots tb_list_entry_init(&pool->partial_slots, tb_fixed_pool_slot_t, entry, tb_null); // init full slots tb_list_entry_init(&pool->full_slots, tb_fixed_pool_slot_t, entry, tb_null); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (pool) tb_fixed_pool_exit((tb_fixed_pool_ref_t)pool); pool = tb_null; } // ok? return (tb_fixed_pool_ref_t)pool; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_co_scheduler_ref_t tb_co_scheduler_init() { // done tb_bool_t ok = tb_false; tb_co_scheduler_t* scheduler = tb_null; do { // make scheduler scheduler = tb_malloc0_type(tb_co_scheduler_t); tb_assert_and_check_break(scheduler); // init dead coroutines tb_list_entry_init(&scheduler->coroutines_dead, tb_coroutine_t, entry, tb_null); // init ready coroutines tb_list_entry_init(&scheduler->coroutines_ready, tb_coroutine_t, entry, tb_null); // init suspend coroutines tb_list_entry_init(&scheduler->coroutines_suspend, tb_coroutine_t, entry, tb_null); // init original coroutine scheduler->original.scheduler = (tb_co_scheduler_ref_t)scheduler; // init running scheduler->running = &scheduler->original; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (scheduler) tb_co_scheduler_exit((tb_co_scheduler_ref_t)scheduler); scheduler = tb_null; } // ok? return (tb_co_scheduler_ref_t)scheduler; }
/* ////////////////////////////////////////////////////////////////////////////////////// * main */ tb_int_t tb_demo_container_list_entry_main(tb_int_t argc, tb_char_t** argv) { // init the entries tb_demo_entry_t entries[12] = { {{0}, 0} , {{0}, 1} , {{0}, 2} , {{0}, 3} , {{0}, 4} , {{0}, 5} , {{0}, 6} , {{0}, 7} , {{0}, 8} , {{0}, 9} , {{0}, 10} , {{0}, 11} }; // init the list tb_list_entry_head_t list; tb_list_entry_init(&list, tb_demo_entry_t, entry, tb_demo_entry_copy); // insert entries tb_list_entry_insert_tail(&list, &entries[5].entry); tb_list_entry_insert_tail(&list, &entries[6].entry); tb_list_entry_insert_tail(&list, &entries[7].entry); tb_list_entry_insert_tail(&list, &entries[8].entry); tb_list_entry_insert_tail(&list, &entries[9].entry); tb_list_entry_insert_head(&list, &entries[4].entry); tb_list_entry_insert_head(&list, &entries[3].entry); tb_list_entry_insert_head(&list, &entries[2].entry); tb_list_entry_insert_head(&list, &entries[1].entry); tb_list_entry_insert_head(&list, &entries[0].entry); // the entry tb_demo_entry_t* entry = (tb_demo_entry_t*)tb_list_entry(&list, &entries[5].entry); tb_trace_i("entry: %lu", entry->data); tb_trace_i(""); // walk it tb_trace_i("insert: %lu", tb_list_entry_size(&list)); tb_for_all_if(tb_demo_entry_t*, item0, tb_list_entry_itor(&list), item0) { tb_trace_i("%lu", item0->data); }
tb_thread_pool_ref_t tb_thread_pool_init(tb_size_t worker_maxn, tb_size_t stack) { // done tb_bool_t ok = tb_false; tb_thread_pool_impl_t* impl = tb_null; do { // make pool impl = tb_malloc0_type(tb_thread_pool_impl_t); tb_assert_and_check_break(impl); // init lock if (!tb_spinlock_init(&impl->lock)) break; // computate the default worker maxn if be zero if (!worker_maxn) worker_maxn = tb_processor_count() << 2; tb_assert_and_check_break(worker_maxn); // init thread stack impl->stack = stack; // init workers impl->worker_size = 0; impl->worker_maxn = worker_maxn; // init jobs pool impl->jobs_pool = tb_fixed_pool_init(tb_null, TB_THREAD_POOL_JOBS_POOL_GROW, sizeof(tb_thread_pool_job_t), tb_null, tb_null, tb_null); tb_assert_and_check_break(impl->jobs_pool); // init jobs urgent tb_list_entry_init(&impl->jobs_urgent, tb_thread_pool_job_t, entry, tb_null); // init jobs waiting tb_list_entry_init(&impl->jobs_waiting, tb_thread_pool_job_t, entry, tb_null); // init jobs pending tb_list_entry_init(&impl->jobs_pending, tb_thread_pool_job_t, entry, tb_null); // init semaphore impl->semaphore = tb_semaphore_init(0); tb_assert_and_check_break(impl->semaphore); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_thread_pool_exit((tb_thread_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_thread_pool_ref_t)impl; }