tb_void_t tb_aiop_exit(tb_aiop_ref_t aiop) { // check tb_aiop_impl_t* impl = (tb_aiop_impl_t*)aiop; tb_assert_and_check_return(impl); // exit reactor if (impl->rtor && impl->rtor->exit) impl->rtor->exit(impl->rtor); // exit spak if (impl->spak[0]) tb_socket_exit(impl->spak[0]); if (impl->spak[1]) tb_socket_exit(impl->spak[1]); impl->spak[0] = tb_null; impl->spak[1] = tb_null; // exit pool tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // free impl tb_free(impl); }
tb_bool_t tb_transfer_pool_exit(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_transfer_pool_kill(pool); // wait all if (tb_transfer_pool_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } // enter tb_spinlock_enter(&impl->lock); // check tb_assert(!tb_list_entry_size(&impl->work)); // exit the work list tb_list_entry_exit(&impl->work); // exit the idle list tb_list_entry_exit(&impl->idle); // exit pool if (impl->pool) { // exit all task tb_fixed_pool_walk(impl->pool, tb_transfer_pool_walk_exit, tb_null); // exit it tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; } // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(pool); // trace tb_trace_d("exit: ok"); // ok return tb_true; }
static tb_void_t tb_demo_spider_exit(tb_demo_spider_t* spider) { // check tb_assert_and_check_return(spider); // trace tb_trace_d("exit: .."); // kill it tb_atomic_set(&spider->state, TB_STATE_KILLING); // kill all transfer tasks tb_transfer_pool_kill_all(tb_transfer_pool()); // kill all parser tasks tb_thread_pool_task_kill_all(tb_thread_pool()); // wait all transfer tasks exiting tb_transfer_pool_wait_all(tb_transfer_pool(), -1); // wait all parser tasks exiting tb_thread_pool_task_wait_all(tb_thread_pool(), -1); // enter tb_spinlock_enter(&spider->lock); // exit filter if (spider->filter) tb_bloom_filter_exit(spider->filter); spider->filter = tb_null; // exit pool if (spider->pool) tb_fixed_pool_exit(spider->pool); spider->pool = tb_null; // leave tb_spinlock_leave(&spider->lock); // exit lock tb_spinlock_exit(&spider->lock); // exit home tb_url_exit(&spider->home); // exit option #ifdef TB_CONFIG_MODULE_HAVE_OBJECT if (spider->option) tb_option_exit(spider->option); spider->option = tb_null; #endif // trace tb_trace_d("exit: ok"); }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_fixed_pool_ref_t tb_fixed_pool_init_(tb_large_pool_ref_t large_pool, tb_size_t slot_size, tb_size_t item_size, tb_bool_t for_small_pool, tb_fixed_pool_item_init_func_t item_init, tb_fixed_pool_item_exit_func_t item_exit, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_impl_t* impl = tb_null; do { // using the default large pool if (!large_pool) large_pool = tb_large_pool(); tb_assert_and_check_break(large_pool); // make pool impl = (tb_fixed_pool_impl_t*)tb_large_pool_malloc0(large_pool, sizeof(tb_fixed_pool_impl_t), tb_null); tb_assert_and_check_break(impl); // init pool impl->large_pool = large_pool; impl->slot_size = slot_size? slot_size : (tb_page_size() >> 4); impl->item_size = item_size; impl->func_init = item_init; impl->func_exit = item_exit; impl->func_priv = priv; impl->for_small_pool = for_small_pool; tb_assert_and_check_break(impl->slot_size); // init partial slots tb_list_entry_init(&impl->partial_slots, tb_fixed_pool_slot_t, entry, tb_null); // init full slots tb_list_entry_init(&impl->full_slots, tb_fixed_pool_slot_t, entry, tb_null); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (impl) tb_fixed_pool_exit((tb_fixed_pool_ref_t)impl); impl = tb_null; } // ok? return (tb_fixed_pool_ref_t)impl; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ tb_fixed_pool_ref_t tb_fixed_pool_init_(tb_allocator_ref_t large_allocator, tb_size_t slot_size, tb_size_t item_size, tb_bool_t for_small, tb_fixed_pool_item_init_func_t item_init, tb_fixed_pool_item_exit_func_t item_exit, tb_cpointer_t priv) { // check tb_assert_and_check_return_val(item_size, tb_null); // done tb_bool_t ok = tb_false; tb_fixed_pool_t* pool = tb_null; do { // no allocator? uses the global allocator if (!large_allocator) large_allocator = tb_allocator(); tb_assert_and_check_break(large_allocator); // make pool pool = (tb_fixed_pool_t*)tb_allocator_large_malloc0(large_allocator, sizeof(tb_fixed_pool_t), tb_null); tb_assert_and_check_break(pool); // init pool pool->large_allocator = large_allocator; pool->slot_size = slot_size? slot_size : (tb_page_size() >> 4); pool->item_size = item_size; pool->func_init = item_init; pool->func_exit = item_exit; pool->func_priv = priv; pool->for_small = for_small; tb_assert_and_check_break(pool->slot_size); // init partial slots tb_list_entry_init(&pool->partial_slots, tb_fixed_pool_slot_t, entry, tb_null); // init full slots tb_list_entry_init(&pool->full_slots, tb_fixed_pool_slot_t, entry, tb_null); // ok ok = tb_true; } while (0); // failed? if (!ok) { // exit it if (pool) tb_fixed_pool_exit((tb_fixed_pool_ref_t)pool); pool = tb_null; } // ok? return (tb_fixed_pool_ref_t)pool; }
tb_bool_t tb_aicp_exit(tb_aicp_ref_t aicp) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return_val(impl, tb_false); // kill all first tb_aicp_kill_all((tb_aicp_ref_t)impl); // wait all exiting if (tb_aicp_wait_all((tb_aicp_ref_t)impl, 5000) <= 0) { // wait failed, trace left aicos tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_walk(impl->pool, tb_aicp_walk_wait, tb_null); tb_spinlock_leave(&impl->lock); return tb_false; } // kill loop tb_aicp_kill((tb_aicp_ref_t)impl); // wait workers exiting tb_hong_t time = tb_mclock(); while (tb_atomic_get(&impl->work) && (tb_mclock() < time + 5000)) tb_msleep(500); // exit proactor if (impl->ptor) { tb_assert(impl->ptor && impl->ptor->exit); impl->ptor->exit(impl->ptor); impl->ptor = tb_null; } // exit aico pool tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // free impl tb_free(impl); // ok return tb_true; }
tb_void_t tb_list_exit(tb_list_ref_t self) { // check tb_list_t* list = (tb_list_t*)self; tb_assert_and_check_return(list); // clear data tb_list_clear((tb_list_ref_t)list); // exit pool if (list->pool) tb_fixed_pool_exit(list->pool); // exit it tb_free(list); }
tb_void_t gb_mesh_face_list_exit(gb_mesh_face_list_ref_t list) { // check gb_mesh_face_list_impl_t* impl = (gb_mesh_face_list_impl_t*)list; tb_assert_and_check_return(impl); // clear it first gb_mesh_face_list_clear(list); // exit pool if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; // exit it tb_free(impl); }
tb_void_t tb_timer_exit(tb_timer_ref_t timer) { // check tb_timer_impl_t* impl = (tb_timer_impl_t*)timer; tb_assert_and_check_return(impl); // stop it tb_atomic_set(&impl->stop, 1); // wait loop exit tb_size_t tryn = 10; while (tb_atomic_get(&impl->work) && tryn--) tb_msleep(500); // warning if (!tryn && tb_atomic_get(&impl->work)) tb_trace_w("[timer]: the loop has been not exited now!"); // post event tb_spinlock_enter(&impl->lock); tb_event_ref_t event = impl->event; tb_spinlock_leave(&impl->lock); if (event) tb_event_post(event); // enter tb_spinlock_enter(&impl->lock); // exit heap if (impl->heap) tb_heap_exit(impl->heap); impl->heap = tb_null; // exit pool if (impl->pool) tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; // exit event if (impl->event) tb_event_exit(impl->event); impl->event = tb_null; // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(impl); }
tb_void_t tb_small_pool_exit(tb_small_pool_ref_t pool) { // check tb_small_pool_impl_t* impl = (tb_small_pool_impl_t*)pool; tb_assert_and_check_return(impl && impl->large_pool); // exit fixed pool tb_size_t i = 0; tb_size_t n = tb_arrayn(impl->fixed_pool); for (i = 0; i < n; i++) { // exit it if (impl->fixed_pool[i]) tb_fixed_pool_exit(impl->fixed_pool[i]); impl->fixed_pool[i] = tb_null; } // exit pool tb_large_pool_free(impl->large_pool, impl); }
tb_bool_t tb_thread_pool_exit(tb_thread_pool_ref_t pool) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_thread_pool_kill(pool); // wait all if (tb_thread_pool_task_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } /* exit all workers * need not lock it because the worker size will not be increase d */ tb_size_t i = 0; tb_size_t n = impl->worker_size; for (i = 0; i < n; i++) { // the worker tb_thread_pool_worker_t* worker = &impl->worker_list[i]; // exit loop if (worker->loop) { // wait it tb_long_t wait = 0; if ((wait = tb_thread_wait(worker->loop, 5000)) <= 0) { // trace tb_trace_e("worker[%lu]: wait failed: %ld!", i, wait); } // exit it tb_thread_exit(worker->loop); worker->loop = tb_null; } } impl->worker_size = 0; // enter tb_spinlock_enter(&impl->lock); // exit pending jobs tb_list_entry_exit(&impl->jobs_pending); // exit waiting jobs tb_list_entry_exit(&impl->jobs_waiting); // exit urgent jobs tb_list_entry_exit(&impl->jobs_urgent); // exit jobs pool if (impl->jobs_pool) tb_fixed_pool_exit(impl->jobs_pool); impl->jobs_pool = tb_null; // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit semaphore if (impl->semaphore) tb_semaphore_exit(impl->semaphore); impl->semaphore = tb_null; // exit it tb_free(impl); // trace tb_trace_d("exit: ok"); // ok return tb_true; }