static tb_bool_t tb_aiop_rtor_epoll_post(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t aioe) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0 && aioe, tb_false); // the code tb_size_t code = aioe->code; // the priv tb_cpointer_t priv = aioe->priv; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)aioe->aioo; tb_assert_and_check_return_val(aioo && aioo->sock, tb_false); // init event struct epoll_event e = {0}; if (code & TB_AIOE_CODE_RECV || code & TB_AIOE_CODE_ACPT) e.events |= EPOLLIN; if (code & TB_AIOE_CODE_SEND || code & TB_AIOE_CODE_CONN) e.events |= EPOLLOUT; if (code & TB_AIOE_CODE_CLEAR) e.events |= EPOLLET; #ifdef EPOLLONESHOT if (code & TB_AIOE_CODE_ONESHOT) e.events |= EPOLLONESHOT; #endif e.data.u64 = tb_p2u64(aioo); // save aioo tb_aioo_impl_t prev = *aioo; aioo->code = code; aioo->priv = priv; // sete if (epoll_ctl(impl->epfd, EPOLL_CTL_MOD, tb_sock2fd(aioo->sock), &e) < 0) { // re-add it #ifndef EPOLLONESHOT if (errno == ENOENT && epoll_ctl(impl->epfd, EPOLL_CTL_ADD, tb_sock2fd(aioo->sock), &e) >= 0) return tb_true; #endif // trace tb_trace_e("post aice code: %lu failed, errno: %d", code, errno); // restore aioo *aioo = prev; return tb_false; } // ok return tb_true; }
static tb_bool_t tb_aiop_rtor_select_delo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; tb_assert_and_check_return_val(impl && aioo && aioo->sock, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // fd tb_long_t fd = tb_sock2fd(aioo->sock); // enter tb_spinlock_enter(&impl->lock.pfds); // del fds FD_CLR(fd, &impl->rfdi); FD_CLR(fd, &impl->wfdi); // leave tb_spinlock_leave(&impl->lock.pfds); // del sock => aioo tb_spinlock_enter(&impl->lock.hash); if (impl->hash) tb_hash_map_remove(impl->hash, aioo->sock); tb_spinlock_leave(&impl->lock.hash); // spak it if (aiop->spak[0]) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok return tb_true; }
static tb_bool_t tb_aiop_rtor_kqueue_delo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && aioo && aioo->sock, tb_false); // the code tb_size_t code = aioo->code; // delete event struct kevent e[2]; tb_size_t n = 0; tb_int_t fd = tb_sock2fd(aioo->sock); if ((code & TB_AIOE_CODE_RECV) || (code & TB_AIOE_CODE_ACPT)) { EV_SET(&e[n], fd, EVFILT_READ, EV_DELETE, 0, 0, (tb_pointer_t)aioo); n++; } if ((code & TB_AIOE_CODE_SEND) || (code & TB_AIOE_CODE_CONN)) { EV_SET(&e[n], fd, EVFILT_WRITE, EV_DELETE, 0, 0, (tb_pointer_t)aioo); n++; } // ok? return n? tb_aiop_rtor_kqueue_sync(rtor, e, n) : tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_bool_t tb_aiop_rtor_epoll_addo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0 && aioo && aioo->sock, tb_false); // the code tb_size_t code = aioo->code; // init event struct epoll_event e = {0}; if (code & TB_AIOE_CODE_RECV || code & TB_AIOE_CODE_ACPT) e.events |= EPOLLIN; if (code & TB_AIOE_CODE_SEND || code & TB_AIOE_CODE_CONN) e.events |= EPOLLOUT; if (code & TB_AIOE_CODE_CLEAR) e.events |= EPOLLET; #ifdef EPOLLONESHOT if (code & TB_AIOE_CODE_ONESHOT) e.events |= EPOLLONESHOT; #endif e.data.u64 = tb_p2u64(aioo); // add aioo if (epoll_ctl(impl->epfd, EPOLL_CTL_ADD, tb_sock2fd(aioo->sock), &e) < 0) { // trace tb_trace_e("addo aioo[%p], code: %lu failed, errno: %d", aioo, code, errno); return tb_false; } // ok return tb_true; }
static tb_bool_t tb_aiop_rtor_kqueue_addo(tb_aiop_rtor_impl_t* rtor, tb_aioo_impl_t const* aioo) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && aioo && aioo->sock, tb_false); // the code tb_size_t code = aioo->code; // init the add event tb_size_t add_event = EV_ADD | EV_ENABLE; if (code & TB_AIOE_CODE_CLEAR) add_event |= EV_CLEAR; if (code & TB_AIOE_CODE_ONESHOT) add_event |= EV_ONESHOT; // add event struct kevent e[2]; tb_size_t n = 0; tb_int_t fd = tb_sock2fd(aioo->sock); if ((code & TB_AIOE_CODE_RECV) || (code & TB_AIOE_CODE_ACPT)) { EV_SET(&e[n], fd, EVFILT_READ, add_event, NOTE_EOF, 0, (tb_pointer_t)aioo); n++; } if ((code & TB_AIOE_CODE_SEND) || (code & TB_AIOE_CODE_CONN)) { EV_SET(&e[n], fd, EVFILT_WRITE, add_event, NOTE_EOF, 0, (tb_pointer_t)aioo); n++; } // ok? return n? tb_aiop_rtor_kqueue_sync(rtor, e, n) : tb_true; }
tb_void_t tb_sockdata_remove(tb_sockdata_ref_t sockdata, tb_socket_ref_t sock) { // check tb_long_t fd = tb_sock2fd(sock); tb_assert(sockdata && sockdata->data); tb_assert(fd > 0 && fd < TB_MAXS32); // remove the socket private data if (fd < sockdata->maxn) sockdata->data[fd] = tb_null; }
static __tb_inline__ tb_cpointer_t tb_poller_hash_get(tb_poller_poll_ref_t poller, tb_socket_ref_t sock) { // check tb_assert(poller && poller->hash && sock); // the socket fd tb_long_t fd = tb_sock2fd(sock); tb_assert(fd > 0 && fd < TB_MAXS32); // get the user private data return fd < poller->hash_size? poller->hash[fd] : tb_null; }
static __tb_inline__ tb_void_t tb_poller_hash_del(tb_poller_poll_ref_t poller, tb_socket_ref_t sock) { // check tb_assert(poller && poller->hash && sock); // the socket fd tb_long_t fd = tb_sock2fd(sock); tb_assert(fd > 0 && fd < TB_MAXS32); // remove the user private data if (fd < poller->hash_size) poller->hash[fd] = tb_null; }
static tb_bool_t tb_aiop_rtor_kqueue_post(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t aioe) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && aioe, tb_false); // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)aioe->aioo; tb_assert_and_check_return_val(aioo && aioo->sock, tb_false); // change tb_size_t adde = aioe->code & ~aioo->code; tb_size_t dele = ~aioe->code & aioo->code; // init the add event tb_size_t add_event = EV_ADD | EV_ENABLE; if (aioe->code & TB_AIOE_CODE_CLEAR) add_event |= EV_CLEAR; if (aioe->code & TB_AIOE_CODE_ONESHOT) add_event |= EV_ONESHOT; // save aioo aioo->code = aioe->code; aioo->priv = aioe->priv; // add event struct kevent e[2]; tb_size_t n = 0; tb_int_t fd = tb_sock2fd(aioo->sock); if (adde & TB_AIOE_CODE_RECV || adde & TB_AIOE_CODE_ACPT) { EV_SET(&e[n], fd, EVFILT_READ, add_event, NOTE_EOF, 0, aioo); n++; } else if (dele & TB_AIOE_CODE_RECV || dele & TB_AIOE_CODE_ACPT) { EV_SET(&e[n], fd, EVFILT_READ, EV_DELETE, 0, 0, aioo); n++; } if (adde & TB_AIOE_CODE_SEND || adde & TB_AIOE_CODE_CONN) { EV_SET(&e[n], fd, EVFILT_WRITE, add_event, NOTE_EOF, 0, aioo); n++; } else if (dele & TB_AIOE_CODE_SEND || dele & TB_AIOE_CODE_CONN) { EV_SET(&e[n], fd, EVFILT_WRITE, EV_DELETE, 0, 0, aioo); n++; } // ok? return n? tb_aiop_rtor_kqueue_sync(rtor, e, n) : tb_true; }
static tb_bool_t tb_aiop_rtor_select_post(tb_aiop_rtor_impl_t* rtor, tb_aioe_t const* aioe) { // check tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; tb_assert_and_check_return_val(impl && aioe, tb_false); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)aioe->aioo; tb_assert_and_check_return_val(aioo && aioo->sock, tb_false); // save aioo aioo->code = aioe->code; aioo->priv = aioe->priv; // fd tb_long_t fd = tb_sock2fd(aioo->sock); // enter tb_spinlock_enter(&impl->lock.pfds); // set fds if (aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_ACPT)) FD_SET(fd, &impl->rfdi); else FD_CLR(fd, &impl->rfdi); if (aioe->code & (TB_AIOE_CODE_SEND | TB_AIOE_CODE_CONN)) FD_SET(fd, &impl->wfdi); else FD_CLR(fd, &impl->wfdi); if ( (aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_ACPT)) || (aioe->code & (TB_AIOE_CODE_SEND | TB_AIOE_CODE_CONN))) { FD_SET(fd, &impl->efdi); } else { FD_CLR(fd, &impl->efdi); } // leave tb_spinlock_leave(&impl->lock.pfds); // spak it if (aiop->spak[0]) tb_socket_send(aiop->spak[0], (tb_byte_t const*)"p", 1); // ok return tb_true; }
tb_bool_t tb_poller_modify(tb_poller_ref_t self, tb_socket_ref_t sock, tb_size_t events, tb_cpointer_t priv) { // check tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self; tb_assert_and_check_return_val(poller && poller->pfds && sock, tb_false); // oneshot is not supported now tb_assertf(!(events & TB_POLLER_EVENT_ONESHOT), "cannot insert events with oneshot, not supported!"); // modify events, TODO uses binary search tb_value_t tuple[2]; tuple[0].l = tb_sock2fd(sock); tuple[1].ul = events; tb_walk_all(poller->pfds, tb_poller_walk_modify, tuple); // modify user private data to socket tb_poller_hash_set(poller, sock, priv); // ok return tb_true; }
static tb_void_t tb_poller_hash_set(tb_poller_poll_ref_t poller, tb_socket_ref_t sock, tb_cpointer_t priv) { // check tb_assert(poller && sock); // the socket fd tb_long_t fd = tb_sock2fd(sock); tb_assert(fd > 0 && fd < TB_MAXS32); // not null? if (priv) { // no hash? init it first tb_size_t need = fd + 1; if (!poller->hash) { // init hash poller->hash = tb_nalloc0_type(need, tb_cpointer_t); tb_assert_and_check_return(poller->hash); // init hash size poller->hash_size = need; } else if (need > poller->hash_size) { // grow hash poller->hash = (tb_cpointer_t*)tb_ralloc(poller->hash, need * sizeof(tb_cpointer_t)); tb_assert_and_check_return(poller->hash); // init growed space tb_memset(poller->hash + poller->hash_size, 0, (need - poller->hash_size) * sizeof(tb_cpointer_t)); // grow hash size poller->hash_size = need; } // save the user private data poller->hash[fd] = priv; } }
tb_void_t tb_sockdata_insert(tb_sockdata_ref_t sockdata, tb_socket_ref_t sock, tb_cpointer_t priv) { // check tb_long_t fd = tb_sock2fd(sock); tb_assert(sockdata && fd > 0 && fd < TB_MAXS32); // not null? if (priv) { // no data? init it first tb_size_t need = fd + 1; if (!sockdata->data) { // init data need += TB_SOCKDATA_GROW; sockdata->data = tb_nalloc0_type(need, tb_cpointer_t); tb_assert_and_check_return(sockdata->data); // init data size sockdata->maxn = need; } else if (need > sockdata->maxn) { // grow data need += TB_SOCKDATA_GROW; sockdata->data = (tb_cpointer_t*)tb_ralloc(sockdata->data, need * sizeof(tb_cpointer_t)); tb_assert_and_check_return(sockdata->data); // init growed space tb_memset(sockdata->data + sockdata->maxn, 0, (need - sockdata->maxn) * sizeof(tb_cpointer_t)); // grow data size sockdata->maxn = need; } // save the socket private data sockdata->data[fd] = priv; } }
tb_bool_t tb_poller_insert(tb_poller_ref_t self, tb_socket_ref_t sock, tb_size_t events, tb_cpointer_t priv) { // check tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self; tb_assert_and_check_return_val(poller && poller->pfds && sock, tb_false); // oneshot is not supported now tb_assertf(!(events & TB_POLLER_EVENT_ONESHOT), "cannot insert events with oneshot, not supported!"); // init events struct pollfd pfd = {0}; if (events & TB_POLLER_EVENT_RECV) pfd.events |= POLLIN; if (events & TB_POLLER_EVENT_SEND) pfd.events |= POLLOUT; // save fd, TODO uses binary search pfd.fd = tb_sock2fd(sock); tb_vector_insert_tail(poller->pfds, &pfd); // bind user private data to socket tb_poller_hash_set(poller, sock, priv); // ok return tb_true; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_long_t tb_aioo_rtor_select_wait(tb_socket_ref_t sock, tb_size_t code, tb_long_t timeout) { // check tb_assert_and_check_return_val(sock, -1); // fd tb_long_t fd = tb_sock2fd(sock); tb_assert_and_check_return_val(fd >= 0, -1); // init time struct timeval t = {0}; if (timeout > 0) { #ifdef TB_CONFIG_OS_WINDOWS t.tv_sec = (LONG)(timeout / 1000); #else t.tv_sec = (timeout / 1000); #endif t.tv_usec = (timeout % 1000) * 1000; } // init fds fd_set rfds; fd_set wfds; fd_set* prfds = (code & TB_AIOE_CODE_RECV || code & TB_AIOE_CODE_ACPT)? &rfds : tb_null; fd_set* pwfds = (code & TB_AIOE_CODE_SEND || code & TB_AIOE_CODE_CONN)? &wfds : tb_null; if (prfds) { FD_ZERO(prfds); FD_SET(fd, prfds); } if (pwfds) { FD_ZERO(pwfds); FD_SET(fd, pwfds); } // select #ifdef TB_CONFIG_OS_WINDOWS tb_long_t r = tb_ws2_32()->select((tb_int_t)fd + 1, prfds, pwfds, tb_null, timeout >= 0? &t : tb_null); #else tb_long_t r = select(fd + 1, prfds, pwfds, tb_null, timeout >= 0? &t : tb_null); #endif tb_assert_and_check_return_val(r >= 0, -1); // timeout? tb_check_return_val(r, 0); // error? tb_int_t o = 0; #ifdef TB_CONFIG_OS_WINDOWS tb_int_t n = sizeof(tb_int_t); tb_ws2_32()->getsockopt(fd, SOL_SOCKET, SO_ERROR, (tb_char_t*)&o, &n); #else socklen_t n = sizeof(socklen_t); getsockopt(fd, SOL_SOCKET, SO_ERROR, (tb_char_t*)&o, &n); #endif if (o) return -1; // ok tb_long_t e = 0; if (prfds && FD_ISSET(fd, &rfds)) { e |= TB_AIOE_CODE_RECV; if (code & TB_AIOE_CODE_ACPT) e |= TB_AIOE_CODE_ACPT; } if (pwfds && FD_ISSET(fd, &wfds)) { e |= TB_AIOE_CODE_SEND; if (code & TB_AIOE_CODE_CONN) e |= TB_AIOE_CODE_CONN; } return e; }
tb_bool_t tb_poller_remove(tb_poller_ref_t self, tb_socket_ref_t sock) { // check tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self; tb_assert_and_check_return_val(poller && poller->pfds && sock, tb_false); // remove this socket and events, TODO uses binary search tb_remove_first_if(poller->pfds, tb_poller_walk_remove, (tb_cpointer_t)(tb_long_t)tb_sock2fd(sock)); // remove user private data from this socket tb_poller_hash_del(poller, sock); // ok return tb_true; }
static tb_long_t tb_aiop_rtor_epoll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = epoll_wait(impl->epfd, impl->evts, impl->evtn, timeout); // interrupted?(for gdb?) continue it if (evtn < 0 && errno == EINTR) return 0; // check error? tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_u2p(impl->evts[i].data.u64); tb_assert_and_check_return_val(aioo, -1); // the sock tb_socket_ref_t sock = aioo->sock; tb_assert_and_check_return_val(sock, -1); // the events tb_size_t events = impl->evts[i].events; // spak? if (sock == aiop->spak[1] && (events & EPOLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // save aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->priv = aioo->priv; aioe->aioo = (tb_aioo_ref_t)aioo; if (events & EPOLLIN) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (events & EPOLLOUT) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if (events & (EPOLLHUP | EPOLLERR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear code aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events manually if no epoll oneshot #ifndef EPOLLONESHOT struct epoll_event e = {0}; if (epoll_ctl(impl->epfd, EPOLL_CTL_DEL, tb_sock2fd(aioo->sock), &e) < 0) { // trace tb_trace_e("clear aioo[%p] failed manually for oneshot, error: %d", aioo, errno); } #endif } } // ok return wait; }