static tb_bool_t tb_aiop_rtor_poll_post(tb_aiop_rtor_impl_t* rtor, tb_aioe_t const* aioe) { // check tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->pfds && impl->cfds && aioe, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)aioe->aioo; tb_assert_and_check_return_val(aioo, tb_false); // save aioo aioo->code = aioe->code; aioo->priv = aioe->priv; // sete it, TODO: sete by binary search tb_spinlock_enter(&impl->lock.pfds); tb_walk_all(impl->pfds, tb_poll_walk_sete, (tb_pointer_t)aioe); tb_spinlock_leave(&impl->lock.pfds); // spak it if (aiop->spak[0]) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok return tb_true; }
tb_void_t tb_transfer_pool_kill_all(tb_transfer_pool_ref_t pool) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return(impl); // check tb_check_return(TB_STATE_OK == tb_atomic_get(&impl->state)); // enter tb_spinlock_enter(&impl->lock); // trace tb_trace_d("kill_all: %lu, ..", tb_list_entry_size(&impl->work)); // kill it tb_walk_all(tb_list_entry_itor(&impl->work), tb_transfer_pool_work_kill, tb_null); // leave tb_spinlock_leave(&impl->lock); }
tb_bool_t tb_poller_modify(tb_poller_ref_t self, tb_socket_ref_t sock, tb_size_t events, tb_cpointer_t priv) { // check tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self; tb_assert_and_check_return_val(poller && poller->pfds && sock, tb_false); // oneshot is not supported now tb_assertf(!(events & TB_POLLER_EVENT_ONESHOT), "cannot insert events with oneshot, not supported!"); // modify events, TODO uses binary search tb_value_t tuple[2]; tuple[0].l = tb_sock2fd(sock); tuple[1].ul = events; tb_walk_all(poller->pfds, tb_poller_walk_modify, tuple); // modify user private data to socket tb_poller_hash_set(poller, sock, priv); // ok return tb_true; }
tb_long_t tb_transfer_pool_wait_all(tb_transfer_pool_ref_t pool, tb_long_t timeout) { // check tb_transfer_pool_impl_t* impl = (tb_transfer_pool_impl_t*)pool; tb_assert_and_check_return_val(impl, -1); // wait it tb_size_t size = 0; tb_hong_t time = tb_cache_time_spak(); while ((timeout < 0 || tb_cache_time_spak() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock); // the size tb_size_t size = tb_list_entry_size(&impl->work); // trace tb_trace_d("wait: %lu: ..", size); // trace work #ifdef __tb_debug__ if (size) tb_walk_all(tb_list_entry_itor(&impl->work), tb_transfer_pool_work_wait, tb_null); #endif // leave tb_spinlock_leave(&impl->lock); // ok? tb_check_break(size); // wait some time tb_msleep(200); } // ok? return !size? 1 : 0; }