tb_bool_t tb_socket_brecv(tb_socket_ref_t sock, tb_byte_t* data, tb_size_t size) { // recv data tb_size_t recv = 0; tb_long_t wait = 0; while (recv < size) { // recv it tb_long_t real = tb_socket_recv(sock, data + recv, size - recv); // has data? if (real > 0) { recv += real; wait = 0; } // no data? wait it else if (!real && !wait) { // wait it wait = tb_socket_wait(sock, TB_SOCKET_EVENT_RECV, -1); tb_check_break(wait > 0); } // failed or end? else break; } return recv == size; }
static tb_long_t tb_ssl_sock_read(tb_cpointer_t priv, tb_byte_t* data, tb_size_t size) { // check tb_assert_and_check_return_val(priv, -1); // recv it return tb_socket_recv((tb_socket_ref_t)priv, data, size); }
static tb_long_t tb_aiop_spak_recv(tb_aiop_ptor_impl_t* impl, tb_aice_ref_t aice) { // check tb_assert_and_check_return_val(impl && aice, -1); tb_assert_and_check_return_val(aice->code == TB_AICE_CODE_RECV, -1); tb_assert_and_check_return_val(aice->u.recv.data && aice->u.recv.size, -1); // the aico tb_aiop_aico_t* aico = (tb_aiop_aico_t*)aice->aico; tb_assert_and_check_return_val(aico && aico->base.handle, -1); // try to recv it tb_size_t recv = 0; tb_long_t real = 0; while (recv < aice->u.recv.size) { // recv it real = tb_socket_recv(aico->base.handle, aice->u.recv.data + recv, aice->u.recv.size - recv); // save recv if (real > 0) recv += real; else break; } // trace tb_trace_d("recv[%p]: %lu", aico, recv); // no recv? if (!recv) { // wait it if (!real && !aico->waiting) { // wait ok? if (tb_aiop_spak_wait(impl, aice)) return 0; // wait failed else aice->state = TB_STATE_FAILED; } // closed else aice->state = TB_STATE_CLOSED; } else { // ok or closed? aice->state = TB_STATE_OK; // save the recv size aice->u.recv.real = recv; } // reset wait aico->waiting = 0; aico->aice.code = TB_AICE_CODE_NONE; // ok return 1; }
static tb_bool_t tb_demo_http_session_head_recv(tb_demo_http_session_ref_t session) { // check tb_assert_and_check_return_val(session && session->sock, tb_false); // read data tb_long_t wait = 0; tb_long_t ok = 0; while (!ok) { // read it tb_long_t real = tb_socket_recv(session->sock, session->data, sizeof(session->data)); // has data? if (real > 0) { // get the header line ok = tb_demo_http_session_head_line(session, session->data, real); // clear wait events wait = 0; } // no data? wait it else if (!real && !wait) { // wait it wait = tb_socket_wait(session->sock, TB_SOCKET_EVENT_RECV, TB_DEMO_TIMEOUT); tb_assert_and_check_break(wait >= 0); } // failed or end? else break; } // ok? return ok > 0; }
static tb_long_t tb_aiop_rtor_kqueue_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && rtor->aiop && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init time struct timespec t = {0}; if (timeout > 0) { t.tv_sec = timeout / 1000; t.tv_nsec = (timeout % 1000) * 1000000; } // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = kevent(impl->kqfd, tb_null, 0, impl->evts, impl->evtn, timeout >= 0? &t : tb_null); tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct kevent)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the kevents struct kevent* e = impl->evts + i; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)e->udata; tb_assert_and_check_return_val(aioo && aioo->sock, -1); // the sock tb_socket_ref_t sock = aioo->sock; // spak? if (sock == aiop->spak[1] && e->filter == EVFILT_READ) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // init the aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->aioo = (tb_aioo_ref_t)aioo; aioe->priv = aioo->priv; if (e->filter == EVFILT_READ) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (e->filter == EVFILT_WRITE) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if ((e->flags & EV_ERROR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; } } // ok return wait; }
static tb_long_t tb_aiop_rtor_epoll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->epfd > 0, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, -1); // init grow tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1); // init events if (!impl->evts) { impl->evtn = grow; impl->evts = tb_nalloc0(impl->evtn, sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } // wait events tb_long_t evtn = epoll_wait(impl->epfd, impl->evts, impl->evtn, timeout); // interrupted?(for gdb?) continue it if (evtn < 0 && errno == EINTR) return 0; // check error? tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1); // timeout? tb_check_return_val(evtn, 0); // grow it if events is full if (evtn == impl->evtn) { // grow size impl->evtn += grow; if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn; // grow data impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct epoll_event)); tb_assert_and_check_return_val(impl->evts, -1); } tb_assert(evtn <= impl->evtn); // limit evtn = tb_min(evtn, maxn); // sync tb_size_t i = 0; tb_size_t wait = 0; for (i = 0; i < evtn; i++) { // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_u2p(impl->evts[i].data.u64); tb_assert_and_check_return_val(aioo, -1); // the sock tb_socket_ref_t sock = aioo->sock; tb_assert_and_check_return_val(sock, -1); // the events tb_size_t events = impl->evts[i].events; // spak? if (sock == aiop->spak[1] && (events & EPOLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // save aioe tb_aioe_ref_t aioe = &list[wait++]; aioe->code = TB_AIOE_CODE_NONE; aioe->priv = aioo->priv; aioe->aioo = (tb_aioo_ref_t)aioo; if (events & EPOLLIN) { aioe->code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT; } if (events & EPOLLOUT) { aioe->code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN; } if (events & (EPOLLHUP | EPOLLERR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear code aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events manually if no epoll oneshot #ifndef EPOLLONESHOT struct epoll_event e = {0}; if (epoll_ctl(impl->epfd, EPOLL_CTL_DEL, tb_sock2fd(aioo->sock), &e) < 0) { // trace tb_trace_e("clear aioo[%p] failed manually for oneshot, error: %d", aioo, errno); } #endif } } // ok return wait; }
/* ////////////////////////////////////////////////////////////////////////////////////// * implementation */ static tb_void_t tb_demo_coroutine_pull(tb_cpointer_t priv) { // done tb_socket_ref_t sock = tb_null; do { // init socket sock = tb_socket_init(TB_SOCKET_TYPE_TCP, TB_IPADDR_FAMILY_IPV4); tb_assert_and_check_break(sock); // init address tb_ipaddr_t addr; tb_ipaddr_set(&addr, "127.0.0.1", TB_DEMO_PORT, TB_IPADDR_FAMILY_IPV4); // trace tb_trace_d("[%p]: connecting %{ipaddr} ..", sock, &addr); // connect socket tb_long_t ok; while (!(ok = tb_socket_connect(sock, &addr))) { // wait it if (tb_socket_wait(sock, TB_SOCKET_EVENT_CONN, TB_DEMO_TIMEOUT) <= 0) break; } // connect ok? tb_check_break(ok > 0); // trace tb_trace_d("[%p]: recving ..", sock); // recv data tb_byte_t data[8192]; tb_hize_t recv = 0; tb_long_t wait = 0; tb_hong_t time = tb_mclock(); while (1) { // read it tb_long_t real = tb_socket_recv(sock, data, sizeof(data)); // trace tb_trace_d("[%p]: recv: %ld, total: %lu", sock, real, recv + (real > 0? real : 0)); // has data? if (real > 0) { recv += real; wait = 0; } // no data? wait it else if (!real && !wait) { // wait it wait = tb_socket_wait(sock, TB_SOCKET_EVENT_RECV, TB_DEMO_TIMEOUT); tb_assert_and_check_break(wait >= 0); } // failed or end? else break; } // trace tb_trace_i("[%p]: recv %llu bytes %lld ms", sock, recv, tb_mclock() - time); } while (0); // exit socket if (sock) tb_socket_exit(sock); sock = tb_null; }
static tb_long_t tb_aiop_rtor_select_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor; tb_assert_and_check_return_val(impl && rtor->aiop && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // init time struct timeval t = {0}; if (timeout > 0) { #ifdef TB_CONFIG_OS_WINDOWS t.tv_sec = (LONG)(timeout / 1000); #else t.tv_sec = (timeout / 1000); #endif t.tv_usec = (timeout % 1000) * 1000; } // loop tb_long_t wait = 0; tb_bool_t stop = tb_false; tb_hong_t time = tb_mclock(); while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout)) { // enter tb_spinlock_enter(&impl->lock.pfds); // init fdo tb_size_t sfdm = impl->sfdm; tb_memcpy(&impl->rfdo, &impl->rfdi, sizeof(fd_set)); tb_memcpy(&impl->wfdo, &impl->wfdi, sizeof(fd_set)); // leave tb_spinlock_leave(&impl->lock.pfds); // wait #ifdef TB_CONFIG_OS_WINDOWS tb_long_t sfdn = tb_ws2_32()->select((tb_int_t)sfdm + 1, &impl->rfdo, &impl->wfdo, tb_null, timeout >= 0? &t : tb_null); #else tb_long_t sfdn = select(sfdm + 1, &impl->rfdo, &impl->wfdo, tb_null, timeout >= 0? &t : tb_null); #endif tb_assert_and_check_return_val(sfdn >= 0, -1); // timeout? tb_check_return_val(sfdn, 0); // enter tb_spinlock_enter(&impl->lock.hash); // sync tb_size_t itor = tb_iterator_head(impl->hash); tb_size_t tail = tb_iterator_tail(impl->hash); for (; itor != tail && wait >= 0 && (tb_size_t)wait < maxn; itor = tb_iterator_next(impl->hash, itor)) { tb_hash_map_item_ref_t item = (tb_hash_map_item_ref_t)tb_iterator_item(impl->hash, itor); if (item) { // the sock tb_socket_ref_t sock = (tb_socket_ref_t)item->name; tb_assert_and_check_return_val(sock, -1); // spak? if (sock == aiop->spak[1] && FD_ISSET(((tb_long_t)aiop->spak[1] - 1), &impl->rfdo)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) wait = -1; // killed? if (spak == 'k') wait = -1; tb_check_break(wait >= 0); // stop to wait stop = tb_true; // continue it continue ; } // filter spak tb_check_continue(sock != aiop->spak[1]); // the fd tb_long_t fd = (tb_long_t)item->name - 1; // the aioo tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)item->data; tb_assert_and_check_return_val(aioo && aioo->sock == sock, -1); // init aioe tb_aioe_t aioe = {0}; aioe.priv = aioo->priv; aioe.aioo = (tb_aioo_ref_t)aioo; if (FD_ISSET(fd, &impl->rfdo)) { aioe.code |= TB_AIOE_CODE_RECV; if (aioo->code & TB_AIOE_CODE_ACPT) aioe.code |= TB_AIOE_CODE_ACPT; } if (FD_ISSET(fd, &impl->wfdo)) { aioe.code |= TB_AIOE_CODE_SEND; if (aioo->code & TB_AIOE_CODE_CONN) aioe.code |= TB_AIOE_CODE_CONN; } // ok? if (aioe.code) { // save aioe list[wait++] = aioe; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { // clear aioo aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; // clear events tb_spinlock_enter(&impl->lock.pfds); FD_CLR(fd, &impl->rfdi); FD_CLR(fd, &impl->wfdi); tb_spinlock_leave(&impl->lock.pfds); } } } } // leave tb_spinlock_leave(&impl->lock.hash); } // ok return wait; }
static tb_long_t tb_aiop_rtor_poll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_t* list, tb_size_t maxn, tb_long_t timeout) { // check tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor; tb_assert_and_check_return_val(impl && impl->pfds && impl->cfds && list && maxn, -1); // the aiop tb_aiop_impl_t* aiop = rtor->aiop; tb_assert_and_check_return_val(aiop, tb_false); // loop tb_long_t wait = 0; tb_bool_t stop = tb_false; tb_hong_t time = tb_mclock(); while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout)) { // copy pfds tb_spinlock_enter(&impl->lock.pfds); tb_vector_copy(impl->cfds, impl->pfds); tb_spinlock_leave(&impl->lock.pfds); // cfds struct pollfd* cfds = (struct pollfd*)tb_vector_data(impl->cfds); tb_size_t cfdm = tb_vector_size(impl->cfds); tb_assert_and_check_return_val(cfds && cfdm, -1); // wait tb_long_t cfdn = poll(cfds, cfdm, timeout); tb_assert_and_check_return_val(cfdn >= 0, -1); // timeout? tb_check_return_val(cfdn, 0); // sync tb_size_t i = 0; for (i = 0; i < cfdm && wait < maxn; i++) { // the sock tb_socket_ref_t sock = tb_fd2sock(cfds[i].fd); tb_assert_and_check_return_val(sock, -1); // the events tb_size_t events = cfds[i].revents; tb_check_continue(events); // spak? if (sock == aiop->spak[1] && (events & POLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // stop to wait stop = tb_true; // continue it continue ; } // skip spak tb_check_continue(sock != aiop->spak[1]); // the aioo tb_size_t code = TB_AIOE_CODE_NONE; tb_cpointer_t priv = tb_null; tb_aioo_impl_t* aioo = tb_null; tb_spinlock_enter(&impl->lock.hash); if (impl->hash) { aioo = (tb_aioo_impl_t*)tb_hash_get(impl->hash, sock); if (aioo) { // save code & data code = aioo->code; priv = aioo->priv; // oneshot? clear it if (aioo->code & TB_AIOE_CODE_ONESHOT) { aioo->code = TB_AIOE_CODE_NONE; aioo->priv = tb_null; } } } tb_spinlock_leave(&impl->lock.hash); tb_check_continue(aioo && code); // init aioe tb_aioe_t aioe = {0}; aioe.priv = priv; aioe.aioo = (tb_aioo_ref_t)aioo; if (events & POLLIN) { aioe.code |= TB_AIOE_CODE_RECV; if (code & TB_AIOE_CODE_ACPT) aioe.code |= TB_AIOE_CODE_ACPT; } if (events & POLLOUT) { aioe.code |= TB_AIOE_CODE_SEND; if (code & TB_AIOE_CODE_CONN) aioe.code |= TB_AIOE_CODE_CONN; } if ((events & POLLHUP) && !(code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) aioe.code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND; // save aioe list[wait++] = aioe; // oneshot? if (code & TB_AIOE_CODE_ONESHOT) { tb_spinlock_enter(&impl->lock.pfds); struct pollfd* pfds = (struct pollfd*)tb_vector_data(impl->pfds); if (pfds) pfds[i].events = 0; tb_spinlock_leave(&impl->lock.pfds); } } } // ok return wait; }
tb_long_t tb_poller_wait(tb_poller_ref_t self, tb_poller_event_func_t func, tb_long_t timeout) { // check tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self; tb_assert_and_check_return_val(poller && poller->pfds && poller->cfds && func, -1); // loop tb_long_t wait = 0; tb_bool_t stop = tb_false; tb_hong_t time = tb_mclock(); while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout)) { // pfds struct pollfd* pfds = (struct pollfd*)tb_vector_data(poller->pfds); tb_size_t pfdm = tb_vector_size(poller->pfds); tb_assert_and_check_return_val(pfds && pfdm, -1); // wait tb_long_t pfdn = poll(pfds, pfdm, timeout); tb_assert_and_check_return_val(pfdn >= 0, -1); // timeout? tb_check_return_val(pfdn, 0); // copy fds tb_vector_copy(poller->cfds, poller->pfds); // walk the copied fds pfds = (struct pollfd*)tb_vector_data(poller->cfds); pfdm = tb_vector_size(poller->cfds); // sync tb_size_t i = 0; for (i = 0; i < pfdm; i++) { // the sock tb_socket_ref_t sock = tb_fd2sock(pfds[i].fd); tb_assert_and_check_return_val(sock, -1); // the poll events tb_size_t poll_events = pfds[i].revents; tb_check_continue(poll_events); // spak? if (sock == poller->pair[1] && (poll_events & POLLIN)) { // read spak tb_char_t spak = '\0'; if (1 != tb_socket_recv(poller->pair[1], (tb_byte_t*)&spak, 1)) return -1; // killed? if (spak == 'k') return -1; // stop to wait stop = tb_true; // continue it continue ; } // skip spak tb_check_continue(sock != poller->pair[1]); // init events tb_size_t events = TB_POLLER_EVENT_NONE; if (poll_events & POLLIN) events |= TB_POLLER_EVENT_RECV; if (poll_events & POLLOUT) events |= TB_POLLER_EVENT_SEND; if ((poll_events & POLLHUP) && !(events & (TB_POLLER_EVENT_RECV | TB_POLLER_EVENT_SEND))) events |= TB_POLLER_EVENT_RECV | TB_POLLER_EVENT_SEND; // call event function func(self, sock, events, tb_poller_hash_get(poller, sock)); // update the events count wait++; } } // ok return wait; }