tb_bool_t tb_transfer_pool_exit(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_transfer_pool_kill(pool); // wait all if (tb_transfer_pool_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } // enter tb_spinlock_enter(&impl->lock); // check tb_assert(!tb_list_entry_size(&impl->work)); // exit the work list tb_list_entry_exit(&impl->work); // exit the idle list tb_list_entry_exit(&impl->idle); // exit pool if (impl->pool) { // exit all task tb_fixed_pool_walk(impl->pool, tb_transfer_pool_walk_exit, tb_null); // exit it tb_fixed_pool_exit(impl->pool); impl->pool = tb_null; } // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit it tb_free(pool); // trace tb_trace_d("exit: ok"); // ok return tb_true; }
tb_void_t tb_co_scheduler_exit(tb_co_scheduler_ref_t self) { // check tb_co_scheduler_t* scheduler = (tb_co_scheduler_t*)self; tb_assert_and_check_return(scheduler); // must be stopped tb_assert(scheduler->stopped); // exit io scheduler first if (scheduler->scheduler_io) tb_co_scheduler_io_exit(scheduler->scheduler_io); scheduler->scheduler_io = tb_null; // clear running scheduler->running = tb_null; // check coroutines tb_assert(!tb_list_entry_size(&scheduler->coroutines_ready)); tb_assert(!tb_list_entry_size(&scheduler->coroutines_suspend)); // free all dead coroutines tb_co_scheduler_free(&scheduler->coroutines_dead); // free all ready coroutines tb_co_scheduler_free(&scheduler->coroutines_ready); // free all suspend coroutines tb_co_scheduler_free(&scheduler->coroutines_suspend); // exit dead coroutines tb_list_entry_exit(&scheduler->coroutines_dead); // exit ready coroutines tb_list_entry_exit(&scheduler->coroutines_ready); // exit suspend coroutines tb_list_entry_exit(&scheduler->coroutines_suspend); // exit the scheduler tb_free(scheduler); }
tb_bool_t tb_thread_pool_exit(tb_thread_pool_ref_t pool) { // check tb_thread_pool_impl_t* impl = (tb_thread_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, tb_false); // trace tb_trace_d("exit: .."); // kill it first tb_thread_pool_kill(pool); // wait all if (tb_thread_pool_task_wait_all(pool, 5000) <= 0) { // trace tb_trace_e("exit: wait failed!"); return tb_false; } /* exit all workers * need not lock it because the worker size will not be increase d */ tb_size_t i = 0; tb_size_t n = impl->worker_size; for (i = 0; i < n; i++) { // the worker tb_thread_pool_worker_t* worker = &impl->worker_list[i]; // exit loop if (worker->loop) { // wait it tb_long_t wait = 0; if ((wait = tb_thread_wait(worker->loop, 5000)) <= 0) { // trace tb_trace_e("worker[%lu]: wait failed: %ld!", i, wait); } // exit it tb_thread_exit(worker->loop); worker->loop = tb_null; } } impl->worker_size = 0; // enter tb_spinlock_enter(&impl->lock); // exit pending jobs tb_list_entry_exit(&impl->jobs_pending); // exit waiting jobs tb_list_entry_exit(&impl->jobs_waiting); // exit urgent jobs tb_list_entry_exit(&impl->jobs_urgent); // exit jobs pool if (impl->jobs_pool) tb_fixed_pool_exit(impl->jobs_pool); impl->jobs_pool = tb_null; // leave tb_spinlock_leave(&impl->lock); // exit lock tb_spinlock_exit(&impl->lock); // exit semaphore if (impl->semaphore) tb_semaphore_exit(impl->semaphore); impl->semaphore = tb_null; // exit it tb_free(impl); // trace tb_trace_d("exit: ok"); // ok return tb_true; }