tb_void_t tb_transfer_pool_kill(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return(impl); // trace tb_trace_d("kill: .."); // kill it if (TB_STATE_OK == tb_atomic_fetch_and_set(&impl->state, TB_STATE_KILLING)) tb_transfer_pool_kill_all(pool); }
tb_void_t tb_aicp_kill_all(tb_aicp_ref_t aicp) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return(impl); // trace tb_trace_d("kill: all: .."); // kill all if (!tb_atomic_fetch_and_set(&impl->kill_all, 1)) { tb_spinlock_enter(&impl->lock); if (impl->pool) tb_fixed_pool_walk(impl->pool, tb_aicp_walk_kill, impl); tb_spinlock_leave(&impl->lock); } }
tb_bool_t tb_async_transfer_resume(tb_async_transfer_ref_t transfer) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return_val(impl && impl->aicp, tb_false); // done tb_bool_t ok = tb_false; tb_size_t state_pause = TB_STATE_OK; do { // must be opened? tb_check_break(TB_STATE_OPENED == tb_atomic_get(&impl->state)); // resume it tb_size_t state_pause = tb_atomic_fetch_and_set(&impl->state_pause, TB_STATE_OK); // pausing or ok? return ok directly tb_check_return_val(state_pause == TB_STATE_PAUSED, tb_true); // check tb_assert_and_check_break(impl->istream); tb_assert_and_check_break(impl->ostream); // init some rate info impl->done.base_time = tb_aicp_time(impl->aicp); impl->done.base_time1s = impl->done.base_time; impl->done.saved_size1s = 0; impl->done.current_rate = 0; // read it if (!tb_async_stream_read(impl->istream, (tb_size_t)tb_atomic_get(&impl->limited_rate), tb_async_transfer_istream_read_func, impl)) break; // ok ok = tb_true; } while (0); // failed? restore state if (!ok && state_pause != TB_STATE_OK) tb_atomic_pset(&impl->state_pause, TB_STATE_OK, state_pause); // ok? return ok; }
tb_void_t tb_aicp_kill(tb_aicp_ref_t aicp) { // check tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp; tb_assert_and_check_return(impl); // trace tb_trace_d("kill: .."); // kill all tb_aicp_kill_all(aicp); // kill it if (!tb_atomic_fetch_and_set(&impl->kill, 1)) { // kill proactor if (impl->ptor && impl->ptor->kill) impl->ptor->kill(impl->ptor); } }
tb_void_t tb_async_transfer_kill(tb_async_transfer_ref_t transfer) { // check tb_async_transfer_impl_t* impl = (tb_async_transfer_impl_t*)transfer; tb_assert_and_check_return(impl); // kill it tb_size_t state = tb_atomic_fetch_and_set(&impl->state, TB_STATE_KILLING); tb_check_return(state != TB_STATE_KILLING); // trace tb_trace_d("kill: .."); // kill istream if (impl->istream) tb_async_stream_kill(impl->istream); // kill ostream if (impl->ostream) tb_async_stream_kill(impl->ostream); }