Пример #1
0
static tb_bool_t xm_semver_select_from_branches(lua_State* lua, tb_int_t fromidx, tb_char_t const* range_str, tb_size_t range_len)
{
    lua_Integer i = 0;
    luaL_checktype(lua, fromidx, LUA_TTABLE);
    for (i = lua_objlen(lua, fromidx); i > 0; --i) 
    {
        lua_pushinteger(lua, i);
        lua_gettable(lua, fromidx);

        tb_char_t const* source_str = luaL_checkstring(lua, -1);
        tb_check_continue(source_str);

        tb_size_t source_len = tb_strlen(source_str);
        if (source_len == range_len && tb_memcmp(source_str, range_str, source_len) == 0) 
        {
            lua_createtable(lua, 0, 2);

            lua_pushlstring(lua, source_str, source_len);
            lua_setfield(lua, -2, "version");

            lua_pushstring(lua, "branches");
            lua_setfield(lua, -2, "source");

            // ok
            return tb_true;
        }
    }

    // no matches
    return tb_false;
}
Пример #2
0
/* //////////////////////////////////////////////////////////////////////////////////////
 * interfaces
 */
tb_bool_t tb_dns_init()
{
    // done
    tb_size_t count = 0;
    if (tb_file_info("/etc/resolv.conf", tb_null)) 
    {
        /* try get list from "/etc/resolv.conf"
         *
         * # Generated by NetworkManager
         * nameserver 10.1.20.10
         * nameserver 8.8.8.8
         *
         */
        tb_stream_ref_t stream = tb_stream_init_from_url("/etc/resolv.conf");
        if (stream)
        {
            // open
            if (tb_stream_open(stream)) 
            {
                // read
                tb_long_t size = 0;
                tb_char_t line[8192];
                while ((size = tb_stream_bread_line(stream, line, 8192)) >= 0)
                {
                    if (size && !tb_strnicmp(line, "nameserver", 10))
                    {
                        // seek to server
                        tb_char_t const* p = line + 10;
                        while (*p && !tb_isdigit(*p)) p++;
                        tb_check_continue(*p);

                        // add server
                        tb_dns_server_add(p);

                        // count++
                        count++;
                    }
                }
            }
    
            // exit
            tb_stream_exit(stream);
        }
    }

    // no server? add the default server
    if (!count) 
    {
        tb_dns_server_add("8.8.8.8");
        tb_dns_server_add("8.8.8.4");
    }

    // ok
    return tb_true;
}
Пример #3
0
static tb_pointer_t tb_demo_loop(tb_cpointer_t priv)
{
    // check
    tb_demo_loop_t* loop = (tb_demo_loop_t*)priv;

    // done
    do
    {
        // check
        tb_assert_and_check_break(loop);

        // trace
        tb_trace_i("[thread: %lu]: init", loop->index);

        // loop
        while (!tb_atomic_get(&loop->bstoped))
        {
            // wait
            tb_long_t wait = tb_semaphore_wait(loop->semaphore, -1);
            tb_assert_and_check_break(wait >= 0);

            // timeout?
            tb_check_continue(wait);

            // trace
            tb_trace_i("[semaphore: %lu]: wait: ok", loop->index);
        }

    } while (0);

    // trace
    tb_trace_i("[thread: %lu]: exit", loop? loop->index : 0);

    // end
    tb_thread_return(tb_null);
    return tb_null;
}
Пример #4
0
static tb_long_t tb_aiop_rtor_kqueue_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout)
{   
    // check
    tb_aiop_rtor_kqueue_impl_t* impl = (tb_aiop_rtor_kqueue_impl_t*)rtor;
    tb_assert_and_check_return_val(impl && impl->kqfd >= 0 && rtor->aiop && list && maxn, -1);

    // the aiop
    tb_aiop_impl_t* aiop = rtor->aiop;
    tb_assert_and_check_return_val(aiop, -1);

    // init time
    struct timespec t = {0};
    if (timeout > 0)
    {
        t.tv_sec = timeout / 1000;
        t.tv_nsec = (timeout % 1000) * 1000000;
    }

    // init grow
    tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1);

    // init events
    if (!impl->evts)
    {
        impl->evtn = grow;
        impl->evts = tb_nalloc0(impl->evtn, sizeof(struct kevent));
        tb_assert_and_check_return_val(impl->evts, -1);
    }

    // wait events
    tb_long_t evtn = kevent(impl->kqfd, tb_null, 0, impl->evts, impl->evtn, timeout >= 0? &t : tb_null);
    tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1);
    
    // timeout?
    tb_check_return_val(evtn, 0);

    // grow it if events is full
    if (evtn == impl->evtn)
    {
        // grow size
        impl->evtn += grow;
        if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn;

        // grow data
        impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct kevent));
        tb_assert_and_check_return_val(impl->evts, -1);
    }
    tb_assert(evtn <= impl->evtn);

    // limit 
    evtn = tb_min(evtn, maxn);

    // sync
    tb_size_t i = 0;
    tb_size_t wait = 0;
    for (i = 0; i < evtn; i++)
    {
        // the kevents 
        struct kevent* e = impl->evts + i;

        // the aioo
        tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)e->udata;
        tb_assert_and_check_return_val(aioo && aioo->sock, -1);
        
        // the sock 
        tb_socket_ref_t sock = aioo->sock;

        // spak?
        if (sock == aiop->spak[1] && e->filter == EVFILT_READ) 
        {
            // read spak
            tb_char_t spak = '\0';
            if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1;

            // killed?
            if (spak == 'k') return -1;

            // continue it
            continue ;
        }

        // skip spak
        tb_check_continue(sock != aiop->spak[1]);

        // init the aioe
        tb_aioe_ref_t aioe = &list[wait++];
        aioe->code = TB_AIOE_CODE_NONE;
        aioe->aioo = (tb_aioo_ref_t)aioo;
        aioe->priv = aioo->priv;
        if (e->filter == EVFILT_READ) 
        {
            aioe->code |= TB_AIOE_CODE_RECV;
            if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT;
        }
        if (e->filter == EVFILT_WRITE) 
        {
            aioe->code |= TB_AIOE_CODE_SEND;
            if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN;
        }
        if ((e->flags & EV_ERROR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) 
            aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND;

        // oneshot? clear it
        if (aioo->code & TB_AIOE_CODE_ONESHOT) 
        {
            aioo->code = TB_AIOE_CODE_NONE;
            aioo->priv = tb_null;
        }
    }

    // ok
    return wait;
}
Пример #5
0
static tb_long_t tb_aiop_rtor_epoll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout)
{   
    // check
    tb_aiop_rtor_epoll_impl_t* impl = (tb_aiop_rtor_epoll_impl_t*)rtor;
    tb_assert_and_check_return_val(impl && impl->epfd > 0, -1);

    // the aiop
    tb_aiop_impl_t* aiop = rtor->aiop;
    tb_assert_and_check_return_val(aiop, -1);

    // init grow
    tb_size_t grow = tb_align8((rtor->aiop->maxn >> 3) + 1);

    // init events
    if (!impl->evts)
    {
        impl->evtn = grow;
        impl->evts = tb_nalloc0(impl->evtn, sizeof(struct epoll_event));
        tb_assert_and_check_return_val(impl->evts, -1);
    }
    
    // wait events
    tb_long_t evtn = epoll_wait(impl->epfd, impl->evts, impl->evtn, timeout);

    // interrupted?(for gdb?) continue it
    if (evtn < 0 && errno == EINTR) return 0;

    // check error?
    tb_assert_and_check_return_val(evtn >= 0 && evtn <= impl->evtn, -1);
    
    // timeout?
    tb_check_return_val(evtn, 0);

    // grow it if events is full
    if (evtn == impl->evtn)
    {
        // grow size
        impl->evtn += grow;
        if (impl->evtn > rtor->aiop->maxn) impl->evtn = rtor->aiop->maxn;

        // grow data
        impl->evts = tb_ralloc(impl->evts, impl->evtn * sizeof(struct epoll_event));
        tb_assert_and_check_return_val(impl->evts, -1);
    }
    tb_assert(evtn <= impl->evtn);

    // limit 
    evtn = tb_min(evtn, maxn);

    // sync
    tb_size_t i = 0;
    tb_size_t wait = 0; 
    for (i = 0; i < evtn; i++)
    {
        // the aioo
        tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)tb_u2p(impl->evts[i].data.u64);
        tb_assert_and_check_return_val(aioo, -1);

        // the sock 
        tb_socket_ref_t sock = aioo->sock;
        tb_assert_and_check_return_val(sock, -1);

        // the events
        tb_size_t events = impl->evts[i].events;

        // spak?
        if (sock == aiop->spak[1] && (events & EPOLLIN)) 
        {
            // read spak
            tb_char_t spak = '\0';
            if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1;

            // killed?
            if (spak == 'k') return -1;

            // continue it
            continue ;
        }

        // skip spak
        tb_check_continue(sock != aiop->spak[1]);

        // save aioe
        tb_aioe_ref_t aioe = &list[wait++];
        aioe->code = TB_AIOE_CODE_NONE;
        aioe->priv = aioo->priv;
        aioe->aioo = (tb_aioo_ref_t)aioo;
        if (events & EPOLLIN) 
        {
            aioe->code |= TB_AIOE_CODE_RECV;
            if (aioo->code & TB_AIOE_CODE_ACPT) aioe->code |= TB_AIOE_CODE_ACPT;
        }
        if (events & EPOLLOUT) 
        {
            aioe->code |= TB_AIOE_CODE_SEND;
            if (aioo->code & TB_AIOE_CODE_CONN) aioe->code |= TB_AIOE_CODE_CONN;
        }
        if (events & (EPOLLHUP | EPOLLERR) && !(aioe->code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) 
            aioe->code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND;

        // oneshot? clear it
        if (aioo->code & TB_AIOE_CODE_ONESHOT)
        {
            // clear code
            aioo->code = TB_AIOE_CODE_NONE;
            aioo->priv = tb_null;

            // clear events manually if no epoll oneshot
#ifndef EPOLLONESHOT
            struct epoll_event e = {0};
            if (epoll_ctl(impl->epfd, EPOLL_CTL_DEL, tb_sock2fd(aioo->sock), &e) < 0) 
            {
                // trace
                tb_trace_e("clear aioo[%p] failed manually for oneshot, error: %d", aioo, errno);
            }
#endif
        }
    }

    // ok
    return wait;
}
Пример #6
0
tb_void_t tb_aicp_loop_util(tb_aicp_ref_t aicp, tb_bool_t (*stop)(tb_cpointer_t priv), tb_cpointer_t priv)
{   
    // check
    tb_aicp_impl_t* impl = (tb_aicp_impl_t*)aicp;
    tb_assert_and_check_return(impl);
   
    // the ptor 
    tb_aicp_ptor_impl_t* ptor = impl->ptor;
    tb_assert_and_check_return(ptor && ptor->loop_spak);

    // the loop spak
    tb_long_t (*loop_spak)(tb_aicp_ptor_impl_t* , tb_handle_t, tb_aice_ref_t , tb_long_t ) = ptor->loop_spak;

    // worker++
    tb_atomic_fetch_and_inc(&impl->work);

    // init loop
    tb_handle_t loop = ptor->loop_init? ptor->loop_init(ptor) : tb_null;
 
    // trace
    tb_trace_d("loop[%p]: init", loop);

    // spak ctime
    tb_cache_time_spak();

    // loop
    while (1)
    {
        // spak
        tb_aice_t   resp = {0};
        tb_long_t   ok = loop_spak(ptor, loop, &resp, -1);

        // spak ctime
        tb_cache_time_spak();

        // failed?
        tb_check_break(ok >= 0);

        // timeout?
        tb_check_continue(ok);

        // check aico
        tb_aico_impl_t* aico = (tb_aico_impl_t*)resp.aico;
        tb_assert_and_check_continue(aico);

        // trace
        tb_trace_d("loop[%p]: spak: code: %lu, aico: %p, state: %s: %ld", loop, resp.code, aico, aico? tb_state_cstr(tb_atomic_get(&aico->state)) : "null", ok);

        // pending? clear state if be not accept or accept failed
        tb_size_t state = TB_STATE_OPENED;
        state = (resp.code != TB_AICE_CODE_ACPT || resp.state != TB_STATE_OK)? tb_atomic_fetch_and_pset(&aico->state, TB_STATE_PENDING, state) : tb_atomic_get(&aico->state);

        // killed or killing?
        if (state == TB_STATE_KILLED || state == TB_STATE_KILLING)
        {
            // update the aice state 
            resp.state = TB_STATE_KILLED;

            // killing? update to the killed state
            tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED);
        }

        // done func, @note maybe the aico exit will be called
        if (resp.func && !resp.func(&resp)) 
        {
            // trace
#ifdef __tb_debug__
            tb_trace_e("loop[%p]: done aice func failed with code: %lu at line: %lu, func: %s, file: %s!", loop, resp.code, aico->line, aico->func, aico->file);
#else
            tb_trace_e("loop[%p]: done aice func failed with code: %lu!", loop, resp.code);
#endif
        }

        // killing? update to the killed state
        tb_atomic_fetch_and_pset(&aico->state, TB_STATE_KILLING, TB_STATE_KILLED);

        // stop it?
        if (stop && stop(priv)) tb_aicp_kill(aicp);
    }

    // exit loop
    if (ptor->loop_exit) ptor->loop_exit(ptor, loop);

    // worker--
    tb_atomic_fetch_and_dec(&impl->work);

    // trace
    tb_trace_d("loop[%p]: exit", loop);
}
Пример #7
0
static tb_pointer_t tb_aiop_spak_loop(tb_cpointer_t priv)
{
    // check
    tb_aiop_ptor_impl_t*    impl = (tb_aiop_ptor_impl_t*)priv;
    tb_aicp_impl_t*         aicp = impl? impl->base.aicp : tb_null;

    // done
    do
    {
        // check
        tb_assert_and_check_break(impl && impl->aiop && impl->list && impl->timer && impl->ltimer && aicp);

        // trace
        tb_trace_d("loop: init");

        // loop
        while (!tb_atomic_get(&aicp->kill))
        {
            // the delay
            tb_size_t delay = tb_timer_delay(impl->timer);

            // the ldelay
            tb_size_t ldelay = tb_ltimer_delay(impl->ltimer);
            tb_assert_and_check_break(ldelay != -1);

            // trace
            tb_trace_d("loop: wait: ..");

            // wait aioe
            tb_long_t real = tb_aiop_wait(impl->aiop, impl->list, impl->maxn, tb_min(delay, ldelay));

            // trace
            tb_trace_d("loop: wait: %ld", real);

            // spak ctime
            tb_cache_time_spak();

            // spak timer
            if (!tb_timer_spak(impl->timer)) break;

            // spak ltimer
            if (!tb_ltimer_spak(impl->ltimer)) break;

            // killed?
            tb_check_break(real >= 0);

            // error? out of range
            tb_assert_and_check_break(real <= impl->maxn);

            // timeout?
            tb_check_continue(real);

            // grow it if aioe is full
            if (real == impl->maxn)
            {
                // grow size
                impl->maxn += (aicp->maxn >> 4) + 16;
                if (impl->maxn > aicp->maxn) impl->maxn = aicp->maxn;

                // grow list
                impl->list = tb_ralloc(impl->list, impl->maxn * sizeof(tb_aioe_t));
                tb_assert_and_check_break(impl->list);
            }

            // walk aioe list
            tb_size_t i = 0;
            tb_bool_t end = tb_false;
            for (i = 0; i < real && !end; i++)
            {
                // the aioe
                tb_aioe_ref_t aioe = &impl->list[i];
                tb_assert_and_check_break_state(aioe, end, tb_true);

                // the aice
                tb_aice_ref_t aice = (tb_aice_ref_t)aioe->priv;
                tb_assert_and_check_break_state(aice, end, tb_true);

                // the aico
                tb_aiop_aico_t* aico = (tb_aiop_aico_t*)aice->aico;
                tb_assert_and_check_break_state(aico, end, tb_true);

                // have wait?
                tb_check_continue(aice->code);

                // have been waited ok for the timer timeout/killed func? need not spak it repeatly
                tb_check_continue(!aico->wait_ok);

                // sock?
                if (aico->base.type == TB_AICO_TYPE_SOCK)
                {
                    // push the acpt aice
                    if (aice->code == TB_AICE_CODE_ACPT) end = tb_aiop_push_acpt(impl, aice)? tb_false : tb_true;
                    // push the sock aice
                    else end = tb_aiop_push_sock(impl, aice)? tb_false : tb_true;
                }
                else if (aico->base.type == TB_AICO_TYPE_FILE)
                {
                    // poll file
                    tb_aicp_file_poll(impl);
                }
                else tb_assert(0);
            }

            // end?
            tb_check_break(!end);

            // work it
            tb_aiop_spak_work(impl);
        }

    } while (0);

    // trace
    tb_trace_d("loop: exit");

    // kill
    tb_aicp_kill((tb_aicp_ref_t)aicp);

    // exit
    tb_thread_return(tb_null);
    return tb_null;
}
Пример #8
0
static tb_long_t tb_aiop_rtor_select_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_ref_t list, tb_size_t maxn, tb_long_t timeout)
{   
    // check
    tb_aiop_rtor_select_impl_t* impl = (tb_aiop_rtor_select_impl_t*)rtor;
    tb_assert_and_check_return_val(impl && rtor->aiop && list && maxn, -1);

    // the aiop
    tb_aiop_impl_t* aiop = rtor->aiop;
    tb_assert_and_check_return_val(aiop, tb_false);

    // init time
    struct timeval t = {0};
    if (timeout > 0)
    {
#ifdef TB_CONFIG_OS_WINDOWS
        t.tv_sec = (LONG)(timeout / 1000);
#else
        t.tv_sec = (timeout / 1000);
#endif
        t.tv_usec = (timeout % 1000) * 1000;
    }

    // loop
    tb_long_t wait = 0;
    tb_bool_t stop = tb_false;
    tb_hong_t time = tb_mclock();
    while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout))
    {
        // enter
        tb_spinlock_enter(&impl->lock.pfds);

        // init fdo
        tb_size_t sfdm = impl->sfdm;
        tb_memcpy(&impl->rfdo, &impl->rfdi, sizeof(fd_set));
        tb_memcpy(&impl->wfdo, &impl->wfdi, sizeof(fd_set));

        // leave
        tb_spinlock_leave(&impl->lock.pfds);

        // wait
#ifdef TB_CONFIG_OS_WINDOWS
        tb_long_t sfdn = tb_ws2_32()->select((tb_int_t)sfdm + 1, &impl->rfdo, &impl->wfdo, tb_null, timeout >= 0? &t : tb_null);
#else
        tb_long_t sfdn = select(sfdm + 1, &impl->rfdo, &impl->wfdo, tb_null, timeout >= 0? &t : tb_null);
#endif
        tb_assert_and_check_return_val(sfdn >= 0, -1);

        // timeout?
        tb_check_return_val(sfdn, 0);
        
        // enter
        tb_spinlock_enter(&impl->lock.hash);

        // sync
        tb_size_t itor = tb_iterator_head(impl->hash);
        tb_size_t tail = tb_iterator_tail(impl->hash);
        for (; itor != tail && wait >= 0 && (tb_size_t)wait < maxn; itor = tb_iterator_next(impl->hash, itor))
        {
            tb_hash_map_item_ref_t item = (tb_hash_map_item_ref_t)tb_iterator_item(impl->hash, itor);
            if (item)
            {
                // the sock
                tb_socket_ref_t sock = (tb_socket_ref_t)item->name;
                tb_assert_and_check_return_val(sock, -1);

                // spak?
                if (sock == aiop->spak[1] && FD_ISSET(((tb_long_t)aiop->spak[1] - 1), &impl->rfdo))
                {
                    // read spak
                    tb_char_t spak = '\0';
                    if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) wait = -1;

                    // killed?
                    if (spak == 'k') wait = -1;
                    tb_check_break(wait >= 0);

                    // stop to wait
                    stop = tb_true;

                    // continue it
                    continue ;
                }

                // filter spak
                tb_check_continue(sock != aiop->spak[1]);

                // the fd
                tb_long_t fd = (tb_long_t)item->name - 1;

                // the aioo
                tb_aioo_impl_t* aioo = (tb_aioo_impl_t*)item->data;
                tb_assert_and_check_return_val(aioo && aioo->sock == sock, -1);

                // init aioe
                tb_aioe_t aioe = {0};
                aioe.priv   = aioo->priv;
                aioe.aioo   = (tb_aioo_ref_t)aioo;
                if (FD_ISSET(fd, &impl->rfdo)) 
                {
                    aioe.code |= TB_AIOE_CODE_RECV;
                    if (aioo->code & TB_AIOE_CODE_ACPT) aioe.code |= TB_AIOE_CODE_ACPT;
                }
                if (FD_ISSET(fd, &impl->wfdo)) 
                {
                    aioe.code |= TB_AIOE_CODE_SEND;
                    if (aioo->code & TB_AIOE_CODE_CONN) aioe.code |= TB_AIOE_CODE_CONN;
                }
                    
                // ok?
                if (aioe.code) 
                {
                    // save aioe
                    list[wait++] = aioe;

                    // oneshot? clear it
                    if (aioo->code & TB_AIOE_CODE_ONESHOT)
                    {
                        // clear aioo
                        aioo->code = TB_AIOE_CODE_NONE;
                        aioo->priv = tb_null;

                        // clear events
                        tb_spinlock_enter(&impl->lock.pfds);
                        FD_CLR(fd, &impl->rfdi);
                        FD_CLR(fd, &impl->wfdi);
                        tb_spinlock_leave(&impl->lock.pfds);
                    }
                }
            }
        }

        // leave
        tb_spinlock_leave(&impl->lock.hash);
    }

    // ok
    return wait;
}
Пример #9
0
tb_iterator_ref_t tb_ifaddrs_itor(tb_ifaddrs_ref_t ifaddrs, tb_bool_t reload)
{
    // check
    tb_list_ref_t interfaces = (tb_list_ref_t)ifaddrs;
    tb_assert_and_check_return_val(interfaces, tb_null);

    // uses the cached interfaces?
    tb_check_return_val(reload, (tb_iterator_ref_t)interfaces); 

    // clear interfaces first
    tb_list_clear(interfaces);

    // query the list of interfaces.
    struct ifaddrs* list = tb_null;
    if (!getifaddrs(&list) && list)
    {
#if 0
        // init sock
        tb_long_t sock = socket(AF_INET, SOCK_DGRAM, 0);
#endif

        // done
        struct ifaddrs* item = tb_null;
        for (item = list; item; item = item->ifa_next)
        {
            // check
            tb_check_continue(item->ifa_addr && item->ifa_name);

            /* attempt to get the interface from the cached interfaces
             * and make a new interface if no the cached interface
             */
            tb_ifaddrs_interface_t      interface_new = {0};
            tb_ifaddrs_interface_ref_t  interface = tb_ifaddrs_interface_find((tb_iterator_ref_t)interfaces, item->ifa_name);
            if (!interface) interface = &interface_new;

            // check
            tb_assert(interface == &interface_new || interface->name);

            // done
            switch (item->ifa_addr->sa_family)
            {
            case AF_INET:
                {
                    // the address
                    struct sockaddr_storage const* addr = (struct sockaddr_storage const*)item->ifa_addr;

                    // save ipaddr4
                    tb_ipaddr_t ipaddr4;
                    if (!tb_sockaddr_save(&ipaddr4, addr)) break;
                    interface->ipaddr4 = ipaddr4.u.ipv4;

                    // save flags
                    interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR4;
                    if ((item->ifa_flags & IFF_LOOPBACK) || tb_ipaddr_ip_is_loopback(&ipaddr4)) 
                        interface->flags |= TB_IFADDRS_INTERFACE_FLAG_IS_LOOPBACK;

#if 0
                    // no hwaddr? get it
                    if (!(interface->flags & TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR))
                    {
                        // attempt get the hwaddr
                        struct ifreq ifr;
                        tb_memset(&ifr, 0, sizeof(ifr));
                        tb_strcpy(ifr.ifr_name, item->ifa_name);
                        if (!ioctl(sock, SIOCGIFHWADDR, &ifr))
                        {
                            // have hwaddr
                            interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR;

                            // save hwaddr
                            tb_memcpy(interface->hwaddr.u8, ifr.ifr_hwaddr.sa_data, sizeof(interface->hwaddr.u8));
                        }
                    }
#endif

                    // new interface? save it
                    if (interface == &interface_new)
                    {
                        // save interface name
                        interface->name = tb_strdup(item->ifa_name);
                        tb_assert(interface->name);

                        // save interface
                        tb_list_insert_tail(interfaces, interface);
                    }
                }
                break;
            case AF_INET6:
                {
                    // the address
                    struct sockaddr_storage const* addr = (struct sockaddr_storage const*)item->ifa_addr;

                    // save ipaddr6
                    tb_ipaddr_t ipaddr6;
                    if (!tb_sockaddr_save(&ipaddr6, addr)) break;
                    interface->ipaddr6 = ipaddr6.u.ipv6;

                    // save flags
                    interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_IPADDR6;
                    if ((item->ifa_flags & IFF_LOOPBACK) || tb_ipaddr_ip_is_loopback(&ipaddr6))
                        interface->flags |= TB_IFADDRS_INTERFACE_FLAG_IS_LOOPBACK;

#if 0
                    // no hwaddr? get it
                    if (!(interface->flags & TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR))
                    {
                        // attempt get the hwaddr
                        struct ifreq ifr;
                        tb_memset(&ifr, 0, sizeof(ifr));
                        tb_strcpy(ifr.ifr_name, item->ifa_name);
                        if (!ioctl(sock, SIOCGIFHWADDR, &ifr))
                        {
                            // have hwaddr
                            interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR;

                            // save hwaddr
                            tb_memcpy(interface->hwaddr.u8, ifr.ifr_hwaddr.sa_data, sizeof(interface->hwaddr.u8));
                        }
                    }
#endif

                    // new interface? save it
                    if (interface == &interface_new)
                    {
                        // save interface name
                        interface->name = tb_strdup(item->ifa_name);
                        tb_assert(interface->name);

                        // save interface
                        tb_list_insert_tail(interfaces, interface);
                    }
                }
                break;
            case AF_PACKET:
                {
                    // the address
                    struct sockaddr_ll const* addr = (struct sockaddr_ll const*)item->ifa_addr;

                    // check
                    tb_check_break(addr->sll_halen == sizeof(interface->hwaddr.u8));

                    // no hwaddr? get it
                    if (!(interface->flags & TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR))
                    {
                        // have hwaddr
                        interface->flags |= TB_IFADDRS_INTERFACE_FLAG_HAVE_HWADDR;

                        // save hwaddr
                        tb_memcpy(interface->hwaddr.u8, addr->sll_addr, sizeof(interface->hwaddr.u8));

                        // new interface? save it
                        if (interface == &interface_new)
                        {
                            // save interface name
                            interface->name = tb_strdup(item->ifa_name);
                            tb_assert(interface->name);

                            // save interface
                            tb_list_insert_tail(interfaces, interface);
                        }
                    }
                }
                break;
            default:
                {
                    // trace
                    tb_trace_d("unknown family: %d", item->ifa_addr->sa_family);
                }
                break;
            }
        }

#if 0
        // exit socket
        if (sock) close(sock);
        sock = 0;
#endif

        // exit the interface list
        freeifaddrs(list);
    }

    // ok?
    return (tb_iterator_ref_t)interfaces;
}
Пример #10
0
static tb_void_t tb_hash_map_itor_remove_range(tb_iterator_ref_t iterator, tb_size_t prev, tb_size_t next, tb_size_t size)
{
    // check
    tb_hash_map_impl_t* impl = (tb_hash_map_impl_t*)iterator;
    tb_assert_return(impl && impl->hash_list && impl->hash_size);

    // no size
    tb_check_return(size);

    // the step
    tb_size_t step = impl->element_name.size + impl->element_data.size;
    tb_assert_return(step);

    // the first itor
    tb_size_t itor = prev? tb_hash_map_itor_next(iterator, prev) : tb_hash_map_itor_head(iterator);

    // the head buck and item
    tb_size_t buck_head = tb_hash_map_index_buck(itor);
    tb_size_t item_head = tb_hash_map_index_item(itor);
    tb_assert_return(buck_head && item_head);

    // compute index
    buck_head--;
    item_head--;
    tb_assert_return(buck_head < impl->hash_size && item_head < TB_HASH_MAP_BUCKET_ITEM_MAXN);

    // the last buck and the tail item
    tb_size_t buck_last;
    tb_size_t item_tail;
    if (next)
    {
        // next => buck and item
        buck_last = tb_hash_map_index_buck(next);
        item_tail = tb_hash_map_index_item(next);
        tb_assert_return(buck_last && item_tail);

        // compute index
        buck_last--;
        item_tail--;
        tb_assert_return(buck_last < impl->hash_size && item_tail < TB_HASH_MAP_BUCKET_ITEM_MAXN);
    }
    else 
    {
        buck_last = impl->hash_size - 1;
        item_tail = -1;
    }

    // remove items: [itor, next)
    tb_size_t buck;
    tb_size_t item;
    tb_element_free_func_t name_free = impl->element_name.free;
    tb_element_free_func_t data_free = impl->element_data.free;
    for (buck = buck_head, item = item_head; buck <= buck_last; buck++, item = 0)
    {
        // the list
        tb_hash_map_item_list_t* list = impl->hash_list[buck];
        tb_check_continue(list && list->size);

        // the tail
        tb_size_t tail = (buck == buck_last && next)? item_tail : list->size;
        tb_assert_abort(tail != -1);
        tb_check_continue(item < tail);

        // the data
        tb_byte_t* data = (tb_byte_t*)&list[1];

        // free items
        tb_size_t i = 0;
        for (i = item; i < tail; i++)
        {
            if (name_free) name_free(&impl->element_name, data + i * step);
            if (data_free) data_free(&impl->element_data, data + i * step + impl->element_name.size);
        }

        // move items
        if (buck == buck_last && tail < list->size) tb_memmov(data + item * step, data + tail * step, (list->size - tail) * step);

        // update the list size
        list->size -= tail - item;

        // update the item size
        impl->item_size -= tail - item;
    }
}
Пример #11
0
static tb_long_t tb_aiop_rtor_poll_wait(tb_aiop_rtor_impl_t* rtor, tb_aioe_t* list, tb_size_t maxn, tb_long_t timeout)
{   
    // check
    tb_aiop_rtor_poll_impl_t* impl = (tb_aiop_rtor_poll_impl_t*)rtor;
    tb_assert_and_check_return_val(impl && impl->pfds && impl->cfds && list && maxn, -1);

    // the aiop
    tb_aiop_impl_t* aiop = rtor->aiop;
    tb_assert_and_check_return_val(aiop, tb_false);

    // loop
    tb_long_t wait = 0;
    tb_bool_t stop = tb_false;
    tb_hong_t time = tb_mclock();
    while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout))
    {
        // copy pfds
        tb_spinlock_enter(&impl->lock.pfds);
        tb_vector_copy(impl->cfds, impl->pfds);
        tb_spinlock_leave(&impl->lock.pfds);

        // cfds
        struct pollfd*  cfds = (struct pollfd*)tb_vector_data(impl->cfds);
        tb_size_t       cfdm = tb_vector_size(impl->cfds);
        tb_assert_and_check_return_val(cfds && cfdm, -1);

        // wait
        tb_long_t cfdn = poll(cfds, cfdm, timeout);
        tb_assert_and_check_return_val(cfdn >= 0, -1);

        // timeout?
        tb_check_return_val(cfdn, 0);

        // sync
        tb_size_t i = 0;
        for (i = 0; i < cfdm && wait < maxn; i++)
        {
            // the sock
            tb_socket_ref_t sock = tb_fd2sock(cfds[i].fd);
            tb_assert_and_check_return_val(sock, -1);

            // the events
            tb_size_t events = cfds[i].revents;
            tb_check_continue(events);

            // spak?
            if (sock == aiop->spak[1] && (events & POLLIN))
            {
                // read spak
                tb_char_t spak = '\0';
                if (1 != tb_socket_recv(aiop->spak[1], (tb_byte_t*)&spak, 1)) return -1;

                // killed?
                if (spak == 'k') return -1;

                // stop to wait
                stop = tb_true;

                // continue it
                continue ;
            }

            // skip spak
            tb_check_continue(sock != aiop->spak[1]);

            // the aioo
            tb_size_t       code = TB_AIOE_CODE_NONE;
            tb_cpointer_t   priv = tb_null;
            tb_aioo_impl_t*      aioo = tb_null;
            tb_spinlock_enter(&impl->lock.hash);
            if (impl->hash)
            {
                aioo = (tb_aioo_impl_t*)tb_hash_get(impl->hash, sock);
                if (aioo) 
                {
                    // save code & data
                    code = aioo->code;
                    priv = aioo->priv;

                    // oneshot? clear it
                    if (aioo->code & TB_AIOE_CODE_ONESHOT)
                    {
                        aioo->code = TB_AIOE_CODE_NONE;
                        aioo->priv = tb_null;
                    }
                }
            }
            tb_spinlock_leave(&impl->lock.hash);
            tb_check_continue(aioo && code);
            
            // init aioe
            tb_aioe_t   aioe = {0};
            aioe.priv   = priv;
            aioe.aioo   = (tb_aioo_ref_t)aioo;
            if (events & POLLIN)
            {
                aioe.code |= TB_AIOE_CODE_RECV;
                if (code & TB_AIOE_CODE_ACPT) aioe.code |= TB_AIOE_CODE_ACPT;
            }
            if (events & POLLOUT) 
            {
                aioe.code |= TB_AIOE_CODE_SEND;
                if (code & TB_AIOE_CODE_CONN) aioe.code |= TB_AIOE_CODE_CONN;
            }
            if ((events & POLLHUP) && !(code & (TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND))) 
                aioe.code |= TB_AIOE_CODE_RECV | TB_AIOE_CODE_SEND;

            // save aioe
            list[wait++] = aioe;

            // oneshot?
            if (code & TB_AIOE_CODE_ONESHOT)
            {
                tb_spinlock_enter(&impl->lock.pfds);
                struct pollfd* pfds = (struct pollfd*)tb_vector_data(impl->pfds);
                if (pfds) pfds[i].events = 0;
                tb_spinlock_leave(&impl->lock.pfds);
            }
        }
    }

    // ok
    return wait;
}
Пример #12
0
tb_long_t tb_poller_wait(tb_poller_ref_t self, tb_poller_event_func_t func, tb_long_t timeout)
{
    // check
    tb_poller_poll_ref_t poller = (tb_poller_poll_ref_t)self;
    tb_assert_and_check_return_val(poller && poller->pfds && poller->cfds && func, -1);

    // loop
    tb_long_t wait = 0;
    tb_bool_t stop = tb_false;
    tb_hong_t time = tb_mclock();
    while (!wait && !stop && (timeout < 0 || tb_mclock() < time + timeout))
    {
        // pfds
        struct pollfd*  pfds = (struct pollfd*)tb_vector_data(poller->pfds);
        tb_size_t       pfdm = tb_vector_size(poller->pfds);
        tb_assert_and_check_return_val(pfds && pfdm, -1);

        // wait
        tb_long_t pfdn = poll(pfds, pfdm, timeout);
        tb_assert_and_check_return_val(pfdn >= 0, -1);

        // timeout?
        tb_check_return_val(pfdn, 0);

        // copy fds
        tb_vector_copy(poller->cfds, poller->pfds);

        // walk the copied fds
        pfds = (struct pollfd*)tb_vector_data(poller->cfds);
        pfdm = tb_vector_size(poller->cfds);

        // sync
        tb_size_t i = 0;
        for (i = 0; i < pfdm; i++)
        {
            // the sock
            tb_socket_ref_t sock = tb_fd2sock(pfds[i].fd);
            tb_assert_and_check_return_val(sock, -1);

            // the poll events
            tb_size_t poll_events = pfds[i].revents;
            tb_check_continue(poll_events);

            // spak?
            if (sock == poller->pair[1] && (poll_events & POLLIN))
            {
                // read spak
                tb_char_t spak = '\0';
                if (1 != tb_socket_recv(poller->pair[1], (tb_byte_t*)&spak, 1)) return -1;

                // killed?
                if (spak == 'k') return -1;

                // stop to wait
                stop = tb_true;

                // continue it
                continue ;
            }

            // skip spak
            tb_check_continue(sock != poller->pair[1]);

            // init events
            tb_size_t events = TB_POLLER_EVENT_NONE;
            if (poll_events & POLLIN) events |= TB_POLLER_EVENT_RECV;
            if (poll_events & POLLOUT) events |= TB_POLLER_EVENT_SEND;
            if ((poll_events & POLLHUP) && !(events & (TB_POLLER_EVENT_RECV | TB_POLLER_EVENT_SEND))) 
                events |= TB_POLLER_EVENT_RECV | TB_POLLER_EVENT_SEND;

            // call event function
            func(self, sock, events, tb_poller_hash_get(poller, sock));

            // update the events count
            wait++;
        }
    }

    // ok
    return wait;
}