Beispiel #1
0
static tb_long_t tb_aiop_rtor_poll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_t* list, tb_size_t maxn, tb_long_t timeout)
{   
    // check
    tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor;
    tb_assert_and_check_return_val(impl && impl->pfds && impl->cfds && list && maxn, -1);

    // the aiop
    tb_aiop_impl_t* aiop = rtor->aiop;
    tb_assert_and_check_return_val(aiop, tb_false);

    // loop
    tb_long_t wait = 0;
    tb_bool_t stop = tb_false;
    tb_hong_t time = tb_mclock();
    while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout))
    {
        // copy pfds
        tb_spinlock_enter(&impl->lock.pfds);
        tb_vector_copy(impl->cfds, impl->pfds);
        tb_spinlock_leave(&impl->lock.pfds);

        // cfds
        struct pollfd*  cfds = (struct pollfd*)tb_vector_data(impl->cfds);
        tb_size_t       cfdm = tb_vector_size(impl->cfds);
        tb_assert_and_check_return_val(cfds && cfdm, -1);

        // wait
        tb_long_t cfdn = poll(cfds, cfdm, timeout);
        tb_assert_and_check_return_val(cfdn >= 0, -1);

        // timeout?
        tb_check_return_val(cfdn, 0);

        // sync
        tb_size_t i = 0;
        for (i = 0; i < cfdm && wait < maxn; i++)
        {
            // the sock
            tb_socket_ref_t sock = tb_fd2sock(cfds[i].fd);
            tb_assert_and_check_return_val(sock, -1);

            // the events
            tb_size_t events = cfds[i].revents;
            tb_check_continue(events);

            // spak?
            if (sock == aiop->spak[1] && (events & POLLIN))
            {
                // read spak
                tb_char_t spak = '\0';
                if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1;

                // killed?
                if (spak == 'k') return -1;

                // stop to wait
                stop = tb_true;

                // continue it
                continue ;
            }

            // skip spak
            tb_check_continue(sock != aiop->spak[1]);

            // the aioo
            tb_size_t       code = TB_AIOE_CODE_NONE;
            tb_cpointer_t   priv = tb_null;
            tb_aioo_impl_t*      aioo = tb_null;
            tb_spinlock_enter(&impl->lock.hash);
            if (impl->hash)
            {
                aioo = (tb_aioo_impl_t*)tb_hash_get(impl->hash, sock);
                if (aioo) 
                {
                    // save code & data
                    code = aioo->code;
                    priv = aioo->priv;

                    // oneshot? clear it
                    if (aioo->code & TB_AIOE_CODE_ONESHOT)
                    {
                        aioo->code = TB_AIOE_CODE_NONE;
                        aioo->priv = tb_null;
                    }
                }
            }
            tb_spinlock_leave(&impl->lock.hash);
            tb_check_continue(aioo && code);
            
            // init aioe
            tb_aioe_t   aioe = {0};
            aioe.priv   = priv;
            aioe.aioo   = (tb_aioo_ref_t)aioo;
            if (events & POLLIN)
            {
                aioe.code |= TB_AIOE_CODE_RECV;
                if (code & TB_AIOE_CODE_ACPT) aioe.code |= TB_AIOE_CODE_ACPT;
            }
            if (events & POLLOUT) 
            {
                aioe.code |= TB_AIOE_CODE_SEND;
                if (code & TB_AIOE_CODE_CONN) aioe.code |= TB_AIOE_CODE_CONN;
            }
            if ((events & POLLHUP) && !(code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) 
                aioe.code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND;

            // save aioe
            list[wait++] = aioe;

            // oneshot?
            if (code & TB_AIOE_CODE_ONESHOT)
            {
                tb_spinlock_enter(&impl->lock.pfds);
                struct pollfd* pfds = (struct pollfd*)tb_vector_data(impl->pfds);
                if (pfds) pfds[i].events = 0;
                tb_spinlock_leave(&impl->lock.pfds);
            }
        }
    }

    // ok
    return wait;
}
Beispiel #2
0
tb_long_t tb_poller_wait(tb_poller_ref_t self, tb_poller_event_func_t func, tb_long_t timeout)
{
    // check
    tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self;
    tb_assert_and_check_return_val(poller && poller->pfds && poller->cfds && func, -1);

    // loop
    tb_long_t wait = 0;
    tb_bool_t stop = tb_false;
    tb_hong_t time = tb_mclock();
    while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout))
    {
        // pfds
        struct pollfd*  pfds = (struct pollfd*)tb_vector_data(poller->pfds);
        tb_size_t       pfdm = tb_vector_size(poller->pfds);
        tb_assert_and_check_return_val(pfds && pfdm, -1);

        // wait
        tb_long_t pfdn = poll(pfds, pfdm, timeout);
        tb_assert_and_check_return_val(pfdn >= 0, -1);

        // timeout?
        tb_check_return_val(pfdn, 0);

        // copy fds
        tb_vector_copy(poller->cfds, poller->pfds);

        // walk the copied fds
        pfds = (struct pollfd*)tb_vector_data(poller->cfds);
        pfdm = tb_vector_size(poller->cfds);

        // sync
        tb_size_t i = 0;
        for (i = 0; i < pfdm; i++)
        {
            // the sock
            tb_socket_ref_t sock = tb_fd2sock(pfds[i].fd);
            tb_assert_and_check_return_val(sock, -1);

            // the poll events
            tb_size_t poll_events = pfds[i].revents;
            tb_check_continue(poll_events);

            // spak?
            if (sock == poller->pair[1] && (poll_events & POLLIN))
            {
                // read spak
                tb_char_t spak = '\0';
                if (1 != tb_socket_recv(poller->pair[1], (tb_byte_t*)&spak, 1)) return -1;

                // killed?
                if (spak == 'k') return -1;

                // stop to wait
                stop = tb_true;

                // continue it
                continue ;
            }

            // skip spak
            tb_check_continue(sock != poller->pair[1]);

            // init events
            tb_size_t events = TB_POLLER_EVENT_NONE;
            if (poll_events & POLLIN) events |= TB_POLLER_EVENT_RECV;
            if (poll_events & POLLOUT) events |= TB_POLLER_EVENT_SEND;
            if ((poll_events & POLLHUP) && !(events & (TB_POLLER_EVENT_RECV | TB_POLLER_EVENT_SEND))) 
                events |= TB_POLLER_EVENT_RECV | TB_POLLER_EVENT_SEND;

            // call event function
            func(self, sock, events, tb_poller_hash_get(poller, sock));

            // update the events count
            wait++;
        }
    }

    // ok
    return wait;
}