示例#1
0
/* //////////////////////////////////////////////////////////////////////////////////////
 * implementation
 */
tb_poller_ref_t tb_poller_init(tb_cpointer_t priv)
{
    // done
    tb_bool_t               ok = tb_false;
    tb_poller_poll_ref_t    poller = tb_null;
    do
    {
        // make poller
        poller = tb_malloc0_type(tb_poller_poll_t);
        tb_assert_and_check_break(poller);

        // init poll fds
        poller->pfds = tb_vector_init(0, tb_element_mem(sizeof(struct pollfd), tb_null, tb_null));
        tb_assert_and_check_break(poller->pfds);

        // init copied poll fds
        poller->cfds = tb_vector_init(0, tb_element_mem(sizeof(struct pollfd), tb_null, tb_null));
        tb_assert_and_check_break(poller->cfds);

        // init user private data
        poller->priv = priv;

        // init pair sockets
        if (!tb_socket_pair(TB_SOCKET_TYPE_TCP, poller->pair)) break;

        // insert pair socket first
        if (!tb_poller_insert((tb_poller_ref_t)poller, poller->pair[1], TB_POLLER_EVENT_RECV, tb_null)) break;  

        // ok
        ok = tb_true;

    } while (0);

    // failed?
    if (!ok)
    {
        // exit it
        if (poller) tb_poller_exit((tb_poller_ref_t)poller);
        poller = tb_null;
    }

    // ok?
    return (tb_poller_ref_t)poller;
}
示例#2
0
tb_bool_t tb_lo_scheduler_io_wait(tb_lo_scheduler_io_ref_t scheduler_io, tb_socket_ref_t sock, tb_size_t events, tb_long_t timeout)
{
    // check
    tb_assert(scheduler_io && sock && scheduler_io->poller && scheduler_io->scheduler && events);

    // get the current coroutine
    tb_lo_coroutine_t* coroutine = tb_lo_scheduler_running(scheduler_io->scheduler);
    tb_assert(coroutine);

    // get the poller
    tb_poller_ref_t poller = scheduler_io->poller;
    tb_assert(poller);

    // trace
    tb_trace_d("coroutine(%p): wait events(%lu) with %ld ms for socket(%p) ..", coroutine, events, timeout, sock);

    // enable edge-trigger mode if be supported
    if (tb_poller_support(poller, TB_POLLER_EVENT_CLEAR))
        events |= TB_POLLER_EVENT_CLEAR;

    // exists this socket? only modify events 
    tb_socket_ref_t sock_prev = coroutine->rs.wait.sock;
    if (sock_prev == sock)
    {
        // return the cached events directly if the waiting events exists cache
        tb_size_t events_prev   = coroutine->rs.wait.events;
        tb_size_t events_cache  = coroutine->rs.wait.events_cache;
        if (events_cache && (events_prev & events))
        {
            // check error?
            if (events_cache & TB_POLLER_EVENT_ERROR)
            {
                coroutine->rs.wait.events_cache = 0;
                return -1;
            }

            // clear cache events
            coroutine->rs.wait.events_cache &= ~events;

            // return the cached events
            coroutine->rs.wait.events_result = events_cache & events;
            return tb_false;
        }

        // modify socket from poller for waiting events if the waiting events has been changed 
        if (events_prev != events && !tb_poller_modify(poller, sock, events, coroutine))
        {
            // trace
            tb_trace_e("failed to modify sock(%p) to poller on coroutine(%p)!", sock, coroutine);

            // failed
            coroutine->rs.wait.events_result = -1;
            return tb_false;
        }
    }
    else
    {
        // remove the previous socket first if exists
        if (sock_prev && !tb_poller_remove(poller, sock_prev))
        {
            // trace
            tb_trace_e("failed to remove sock(%p) to poller on coroutine(%p)!", sock_prev, coroutine);

            // failed
            coroutine->rs.wait.events_result = -1;
            return tb_false;
        }

        // insert socket to poller for waiting events
        if (!tb_poller_insert(poller, sock, events, coroutine))
        {
            // trace
            tb_trace_e("failed to insert sock(%p) to poller on coroutine(%p)!", sock, coroutine);

            // failed
            coroutine->rs.wait.events_result = -1;
            return tb_false;
        }
    }

#ifndef TB_CONFIG_MICRO_ENABLE
    // exists timeout?
    tb_cpointer_t   task = tb_null;
    tb_bool_t       is_ltimer = tb_false;
    if (timeout >= 0)
    {
        // high-precision interval?
        if (timeout % 1000)
        {
            // init task for timer
            task = tb_timer_task_init(scheduler_io->timer, timeout, tb_false, tb_lo_scheduler_io_timeout, coroutine);
            tb_assert_and_check_return_val(task, tb_false);
        }
        // low-precision interval?
        else
        {
            // init task for ltimer (faster)
            task = tb_ltimer_task_init(scheduler_io->ltimer, timeout, tb_false, tb_lo_scheduler_io_timeout, coroutine);
            tb_assert_and_check_return_val(task, tb_false);

            // mark as low-precision timer
            is_ltimer = tb_true;
        }
    }

    // check
    tb_assert(!((tb_size_t)(task) & 0x1));

    // save the timer task to coroutine
    coroutine->rs.wait.task = (is_ltimer || !task)? task : (tb_cpointer_t)((tb_size_t)(task) | 0x1);
#endif

    // save the socket to coroutine for the timer function
    coroutine->rs.wait.sock = sock;

    // save waiting events to coroutine
    coroutine->rs.wait.events        = (tb_sint32_t)events;
    coroutine->rs.wait.events_cache  = 0;
    coroutine->rs.wait.events_result = 0;

    // mark as waiting state
    coroutine->rs.wait.waiting       = 1;

    // suspend it
    return tb_true;
}