static tb_aiop_rtor_impl_t* tb_aiop_rtor_select_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_select_impl_t* impl = tb_null; do { // make rtor impl = tb_malloc0_type(tb_aiop_rtor_select_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_select_exit; impl->base.cler = tb_aiop_rtor_select_cler; impl->base.addo = tb_aiop_rtor_select_addo; impl->base.delo = tb_aiop_rtor_select_delo; impl->base.post = tb_aiop_rtor_select_post; impl->base.wait = tb_aiop_rtor_select_wait; // init fds FD_ZERO(&impl->rfdi); FD_ZERO(&impl->wfdi); FD_ZERO(&impl->efdi); FD_ZERO(&impl->rfdo); FD_ZERO(&impl->wfdo); FD_ZERO(&impl->efdo); // init lock if (!tb_spinlock_init(&impl->lock.pfds)) break; if (!tb_spinlock_init(&impl->lock.hash)) break; // init hash impl->hash = tb_hash_init(tb_align8(tb_isqrti(aiop->maxn) + 1), tb_item_func_ptr(tb_null, tb_null), tb_item_func_ptr(tb_null, tb_null)); tb_assert_and_check_break(impl->hash); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_select_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok? return (tb_aiop_rtor_impl_t*)impl; }
static tb_aiop_rtor_impl_t* tb_aiop_rtor_poll_init(tb_aiop_impl_t* aiop) { // check tb_assert_and_check_return_val(aiop && aiop->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_rtor_poll_impl_t* impl = tb_null; do { // make rtor impl = tb_malloc0_type(tb_aiop_rtor_poll_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aiop = aiop; impl->base.code = TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT; impl->base.exit = tb_aiop_rtor_poll_exit; impl->base.cler = tb_aiop_rtor_poll_cler; impl->base.addo = tb_aiop_rtor_poll_addo; impl->base.delo = tb_aiop_rtor_poll_delo; impl->base.post = tb_aiop_rtor_poll_post; impl->base.wait = tb_aiop_rtor_poll_wait; // init lock if (!tb_spinlock_init(&impl->lock.pfds)) break; if (!tb_spinlock_init(&impl->lock.hash)) break; // init pfds impl->pfds = tb_vector_init(tb_align8((aiop->maxn >> 3) + 1), tb_item_func_mem(sizeof(struct pollfd), tb_null, tb_null)); tb_assert_and_check_break(impl->pfds); // init cfds impl->cfds = tb_vector_init(tb_align8((aiop->maxn >> 3) + 1), tb_item_func_mem(sizeof(struct pollfd), tb_null, tb_null)); tb_assert_and_check_break(impl->cfds); // init hash impl->hash = tb_hash_init(tb_align8(tb_isqrti(aiop->maxn) + 1), tb_item_func_ptr(tb_null, tb_null), tb_item_func_ptr(tb_null, tb_null)); tb_assert_and_check_break(impl->hash); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_rtor_poll_exit((tb_aiop_rtor_impl_t*)impl); impl = tb_null; } // ok return (tb_aiop_rtor_impl_t*)impl; }
tb_mutex_ref_t tb_mutex_init() { // done tb_bool_t ok = tb_false; tb_spinlock_ref_t lock = tb_null; do { // make lock lock = tb_malloc0_type(tb_spinlock_t); tb_assert_and_check_break(lock); // init lock if (!tb_spinlock_init(lock)) break; // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it tb_free(lock); lock = tb_null; } // ok? return (tb_mutex_ref_t)lock; }
tb_pool_ref_t tb_pool_init(tb_allocator_ref_t allocator, tb_large_pool_ref_t large_pool) { // done tb_bool_t ok = tb_false; tb_pool_impl_t* impl = tb_null; do { // uses allocator? if (allocator) { // make pool impl = (tb_pool_impl_t*)tb_allocator_malloc0(allocator, sizeof(tb_pool_impl_t)); tb_assert_and_check_break(impl); // save allocator impl->allocator = allocator; // ok ok = tb_true; break; } // using the default large pool if (!large_pool) large_pool = tb_large_pool(); tb_assert_and_check_break(large_pool); // make pool impl = (tb_pool_impl_t*)tb_large_pool_malloc0(large_pool, sizeof(tb_pool_impl_t), tb_null); tb_assert_and_check_break(impl); // init lock if (!tb_spinlock_init(&impl->lock)) break; // init pool impl->large_pool = large_pool; impl->small_pool = tb_small_pool_init(large_pool); tb_assert_and_check_break(impl->small_pool); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { if (impl) tb_pool_exit((tb_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_pool_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_aiop_ref_t tb_aiop_init(tb_size_t maxn) { // check tb_assert_and_check_return_val(maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_impl_t* impl = tb_null; do { // make impl impl = tb_malloc0_type(tb_aiop_impl_t); tb_assert_and_check_break(impl); // init impl impl->maxn = maxn; // init lock if (!tb_spinlock_init(&impl->lock)) break; // init pool impl->pool = tb_fixed_pool_init(tb_null, (maxn >> 4) + 16, sizeof(tb_aioo_impl_t), tb_null, tb_null, tb_null); tb_assert_and_check_break(impl->pool); // init spak if (!tb_socket_pair(TB_SOCKET_TYPE_TCP, impl->spak)) break; // init reactor impl->rtor = tb_aiop_rtor_impl_init(impl); tb_assert_and_check_break(impl->rtor); // addo spak if (!tb_aiop_addo((tb_aiop_ref_t)impl, impl->spak[1], TB_AIOE_CODE_RECV, tb_null)) break; // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_exit((tb_aiop_ref_t)impl); impl = tb_null; } // ok? return (tb_aiop_ref_t)impl; }
tb_allocator_ref_t tb_default_allocator_init(tb_allocator_ref_t large_allocator) { // check tb_assert_and_check_return_val(large_allocator, tb_null); // done tb_bool_t ok = tb_false; tb_default_allocator_ref_t allocator = tb_null; do { // make allocator allocator = (tb_default_allocator_ref_t)tb_allocator_large_malloc0(large_allocator, sizeof(tb_default_allocator_t), tb_null); tb_assert_and_check_break(allocator); // init base allocator->base.type = TB_ALLOCATOR_TYPE_DEFAULT; allocator->base.flag = TB_ALLOCATOR_FLAG_NONE; allocator->base.malloc = tb_default_allocator_malloc; allocator->base.ralloc = tb_default_allocator_ralloc; allocator->base.free = tb_default_allocator_free; allocator->base.exit = tb_default_allocator_exit; #ifdef __tb_debug__ allocator->base.dump = tb_default_allocator_dump; allocator->base.have = tb_default_allocator_have; #endif // init lock if (!tb_spinlock_init(&allocator->base.lock)) break; // init allocator allocator->large_allocator = large_allocator; allocator->small_allocator = tb_small_allocator_init(large_allocator); tb_assert_and_check_break(allocator->small_allocator); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&allocator->base.lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { if (allocator) tb_default_allocator_exit((tb_allocator_ref_t)allocator); allocator = tb_null; } // ok? return (tb_allocator_ref_t)allocator; }
tb_transfer_pool_ref_t tb_transfer_pool_init(tb_aicp_ref_t aicp) { // done tb_bool_t ok = tb_false; tb_transfer_pool_impl_t* impl = tb_null; do { // using the default aicp if be null if (!aicp) aicp = tb_aicp(); tb_assert_and_check_break(aicp); // make impl impl = tb_malloc0_type(tb_transfer_pool_impl_t); tb_assert_and_check_break(impl); // init lock if (!tb_spinlock_init(&impl->lock)) break; // init pool impl->aicp = aicp; impl->maxn = tb_aicp_maxn(aicp); impl->state = TB_STATE_OK; tb_assert_and_check_break(impl->maxn); // init idle task list tb_list_entry_init(&impl->idle, tb_transfer_task_t, entry, tb_null); // init work task list tb_list_entry_init(&impl->work, tb_transfer_task_t, entry, tb_null); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_transfer_pool_exit((tb_transfer_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_transfer_pool_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ tb_timer_ref_t tb_timer_init(tb_size_t maxn, tb_bool_t ctime) { // done tb_bool_t ok = tb_false; tb_timer_impl_t* impl = tb_null; do { // make timer impl = tb_malloc0_type(tb_timer_impl_t); tb_assert_and_check_break(impl); // init func tb_item_func_t func = tb_item_func_ptr(tb_null, tb_null); func.comp = tb_timer_comp_by_when; // init timer impl->maxn = tb_max(maxn, 16); impl->ctime = ctime; // init lock if (!tb_spinlock_init(&impl->lock)) break; // init pool impl->pool = tb_fixed_pool_init(tb_null, (maxn >> 4) + 16, sizeof(tb_timer_task_impl_t), tb_null, tb_null, tb_null); tb_assert_and_check_break(impl->pool); // init heap impl->heap = tb_heap_init((maxn >> 2) + 16, func); tb_assert_and_check_break(impl->heap); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_timer_exit((tb_timer_ref_t)impl); impl = tb_null; } // ok? return (tb_timer_ref_t)impl; }
tb_aicp_ref_t tb_aicp_init(tb_size_t maxn) { // check iovec tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, data, tb_iovec_t, data), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, size, tb_iovec_t, size), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_send_t, data, tb_iovec_t, data), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_send_t, size, tb_iovec_t, size), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_read_t, data, tb_iovec_t, data), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_read_t, size, tb_iovec_t, size), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_writ_t, data, tb_iovec_t, data), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_writ_t, size, tb_iovec_t, size), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_urecv_t, data, tb_iovec_t, data), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_urecv_t, size, tb_iovec_t, size), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_usend_t, data, tb_iovec_t, data), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_usend_t, size, tb_iovec_t, size), tb_null); // check real tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_send_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_read_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_writ_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_sendf_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_sendv_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_recvv_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_readv_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_writv_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_urecv_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_usend_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_urecvv_t, real), tb_null); tb_assert_and_check_return_val(tb_memberof_eq(tb_aice_recv_t, real, tb_aice_usendv_t, real), tb_null); // done tb_bool_t ok = tb_false; tb_aicp_impl_t* impl = tb_null; do { // make impl impl = tb_malloc0_type(tb_aicp_impl_t); tb_assert_and_check_break(impl); // init impl #ifdef __tb_small__ impl->maxn = maxn? maxn : (1 << 4); #else impl->maxn = maxn? maxn : (1 << 8); #endif // init lock if (!tb_spinlock_init(&impl->lock)) break; // init proactor impl->ptor = tb_aicp_ptor_impl_init(impl); tb_assert_and_check_break(impl->ptor && impl->ptor->step >= sizeof(tb_aico_impl_t)); // init aico pool impl->pool = tb_fixed_pool_init(tb_null, (impl->maxn >> 4) + 16, impl->ptor->step, tb_null, tb_null, tb_null); tb_assert_and_check_break(impl->pool); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit impl if (impl) tb_aicp_exit((tb_aicp_ref_t)impl); impl = tb_null; } // ok? return (tb_aicp_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_bool_t tb_trace_init() { // init lock return tb_spinlock_init(&g_lock); }
/* ////////////////////////////////////////////////////////////////////////////////////// * interfaces */ static tb_aicp_ptor_impl_t* tb_aiop_ptor_init(tb_aicp_impl_t* aicp) { // check tb_assert_and_check_return_val(aicp && aicp->maxn, tb_null); // done tb_bool_t ok = tb_false; tb_aiop_ptor_impl_t* impl = tb_null; do { // make ptor impl = tb_malloc0_type(tb_aiop_ptor_impl_t); tb_assert_and_check_break(impl); // init base impl->base.aicp = aicp; impl->base.step = sizeof(tb_aiop_aico_t); impl->base.kill = tb_aiop_ptor_kill; impl->base.exit = tb_aiop_ptor_exit; impl->base.addo = tb_aiop_ptor_addo; impl->base.kilo = tb_aiop_ptor_kilo; impl->base.post = tb_aiop_ptor_post; impl->base.loop_spak = tb_aiop_ptor_spak; // init lock if (!tb_spinlock_init(&impl->lock)) break; // init wait impl->wait = tb_semaphore_init(0); tb_assert_and_check_break(impl->wait); // init aiop impl->aiop = tb_aiop_init(aicp->maxn); tb_assert_and_check_break(impl->aiop); // check tb_assert_and_check_break(tb_aiop_have(impl->aiop, TB_AIOE_CODE_EALL | TB_AIOE_CODE_ONESHOT)); // init spak impl->spak[0] = tb_queue_init((aicp->maxn >> 4) + 16, tb_item_func_mem(sizeof(tb_aice_t), tb_null, tb_null)); impl->spak[1] = tb_queue_init((aicp->maxn >> 4) + 16, tb_item_func_mem(sizeof(tb_aice_t), tb_null, tb_null)); tb_assert_and_check_break(impl->spak[0] && impl->spak[1]); // init file if (!tb_aicp_file_init(impl)) break; // init list impl->maxn = (aicp->maxn >> 4) + 16; impl->list = tb_nalloc0(impl->maxn, sizeof(tb_aioe_t)); tb_assert_and_check_break(impl->list); // init timer and using cache time impl->timer = tb_timer_init((aicp->maxn >> 4) + 16, tb_true); tb_assert_and_check_break(impl->timer); // init ltimer and using cache time impl->ltimer = tb_ltimer_init(aicp->maxn, TB_LTIMER_TICK_S, tb_true); tb_assert_and_check_break(impl->ltimer); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, "aicp_aiop"); #endif // init loop impl->loop = tb_thread_init(tb_null, tb_aiop_spak_loop, impl, 0); tb_assert_and_check_break(impl->loop); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_aiop_ptor_exit((tb_aicp_ptor_impl_t*)impl); return tb_null; } // ok? return (tb_aicp_ptor_impl_t*)impl; }
static tb_bool_t tb_demo_spider_init(tb_demo_spider_t* spider, tb_int_t argc, tb_char_t** argv) { // check tb_assert_and_check_return_val(spider && argc && argv, tb_false); // done tb_bool_t ok = tb_false; do { #ifdef TB_CONFIG_MODULE_HAVE_OBJECT // init option spider->option = tb_option_init("spider", "the spider demo", g_options); tb_assert_and_check_break(spider->option); // done option if (!tb_option_done(spider->option, argc - 1, &argv[1])) break; // check tb_assert_and_check_break(tb_option_find(spider->option, "home")); // init home if (!tb_url_init(&spider->home)) break; tb_url_set(&spider->home, tb_option_item_cstr(spider->option, "home")); tb_trace_d("home: %s", tb_url_get(&spider->home)); // init only home? if (tb_option_find(spider->option, "only")) spider->home_only = tb_option_item_bool(spider->option, "only"); // init root tb_char_t const* root = tb_option_item_cstr(spider->option, "directory"); // init user agent spider->user_agent = tb_option_item_cstr(spider->option, "agent"); // init timeout if (tb_option_find(spider->option, "timeout")) spider->timeout = tb_option_item_sint32(spider->option, "timeout"); // init limited rate if (tb_option_find(spider->option, "rate")) spider->limited_rate = tb_option_item_uint32(spider->option, "rate"); #else // check tb_assert_and_check_break(argv[1]); // init home if (!tb_url_init(&spider->home)) break; spider->home = argv[1]? argv[1] : tb_null; tb_trace_d("home: %s", tb_url_get(&spider->home)); // init root tb_char_t const* root = argv[2]; #endif // the home host tb_char_t const* host = tb_url_host_get(&spider->home); tb_assert_and_check_break(host); // init home domain tb_char_t const* domain = tb_strchr(host, '.'); if (domain) { tb_strlcpy(spider->home_domain, domain, sizeof(spider->home_domain) - 1); spider->home_domain[sizeof(spider->home_domain) - 1] = '\0'; } // using the default root if (root) tb_strlcpy(spider->root, root, sizeof(spider->root) - 1); else { // the temporary root tb_directory_temp(spider->root, sizeof(spider->root) - 1); // append spider tb_strcat(spider->root, "/spider"); } tb_trace_d("root: %s", spider->root); // using the default user agent if (!spider->user_agent) spider->user_agent = TB_DEMO_SPIDER_USER_AGENT; // using the default timeout if (!spider->timeout) spider->timeout = TB_DEMO_SPIDER_TASK_TIMEOUT; // using the default rate if (!spider->limited_rate) spider->limited_rate = TB_DEMO_SPIDER_TASK_RATE; // strip root tail: '/' or '\\' tb_size_t size = tb_strlen(spider->root); if (size && (spider->root[size - 1] == '/' || spider->root[size - 1] == '\\')) spider->root[size - 1] = '\0'; // init state spider->state = TB_STATE_OK; // init lock if (!tb_spinlock_init(&spider->lock)) break; // init pool spider->pool = tb_fixed_pool_init(tb_null, TB_DEMO_SPIDER_TASK_MAXN >> 2, sizeof(tb_demo_spider_task_t), tb_null, tb_null, tb_null); tb_assert_and_check_break(spider->pool); // init filter spider->filter = tb_bloom_filter_init(TB_BLOOM_FILTER_PROBABILITY_0_001, 3, TB_DEMO_SPIDER_FILTER_MAXN, tb_item_func_str(tb_true)); tb_assert_and_check_break(spider->filter); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&spider->lock, "spider"); #endif // ok ok = tb_true; } while (0); // failed? help it #ifdef TB_CONFIG_MODULE_HAVE_OBJECT if (!ok && spider->option) tb_option_help(spider->option); #endif // ok? return ok; }
tb_thread_pool_ref_t tb_thread_pool_init(tb_size_t worker_maxn, tb_size_t stack) { // done tb_bool_t ok = tb_false; tb_thread_pool_impl_t* impl = tb_null; do { // make pool impl = tb_malloc0_type(tb_thread_pool_impl_t); tb_assert_and_check_break(impl); // init lock if (!tb_spinlock_init(&impl->lock)) break; // computate the default worker maxn if be zero if (!worker_maxn) worker_maxn = tb_processor_count() << 2; tb_assert_and_check_break(worker_maxn); // init thread stack impl->stack = stack; // init workers impl->worker_size = 0; impl->worker_maxn = worker_maxn; // init jobs pool impl->jobs_pool = tb_fixed_pool_init(tb_null, TB_THREAD_POOL_JOBS_POOL_GROW, sizeof(tb_thread_pool_job_t), tb_null, tb_null, tb_null); tb_assert_and_check_break(impl->jobs_pool); // init jobs urgent tb_list_entry_init(&impl->jobs_urgent, tb_thread_pool_job_t, entry, tb_null); // init jobs waiting tb_list_entry_init(&impl->jobs_waiting, tb_thread_pool_job_t, entry, tb_null); // init jobs pending tb_list_entry_init(&impl->jobs_pending, tb_thread_pool_job_t, entry, tb_null); // init semaphore impl->semaphore = tb_semaphore_init(0); tb_assert_and_check_break(impl->semaphore); // register lock profiler #ifdef TB_LOCK_PROFILER_ENABLE tb_lock_profiler_register(tb_lock_profiler(), (tb_pointer_t)&impl->lock, TB_TRACE_MODULE_NAME); #endif // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_thread_pool_exit((tb_thread_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_thread_pool_ref_t)impl; }