Ejemplo n.º 1
0
Archivo: ctx.cpp Proyecto: 5igm4/libzmq
zmq::ctx_t::~ctx_t ()
{
    //  Check that there are no remaining sockets.
    zmq_assert (sockets.empty ());

    //  Ask I/O threads to terminate. If stop signal wasn't sent to I/O
    //  thread subsequent invocation of destructor would hang-up.
    for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
        io_threads [i]->stop ();
    }

    //  Wait till I/O threads actually terminate.
    for (io_threads_t::size_type i = 0; i != io_threads.size (); i++) {
        LIBZMQ_DELETE(io_threads [i]);
    }

    //  Deallocate the reaper thread object.
    LIBZMQ_DELETE(reaper);

    //  Deallocate the array of mailboxes. No special work is
    //  needed as mailboxes themselves were deallocated with their
    //  corresponding io_thread/socket objects.
    free (slots);

    //  If we've done any Curve encryption, we may have a file handle
    //  to /dev/urandom open that needs to be cleaned up.
#ifdef ZMQ_HAVE_CURVE
    randombytes_close ();
#endif

    //  Remove the tag, so that the object is considered dead.
    tag = ZMQ_CTX_TAG_VALUE_BAD;
}
Ejemplo n.º 2
0
zmq::stream_engine_t::~stream_engine_t ()
{
    zmq_assert (!plugged);

    if (s != retired_fd) {
#ifdef ZMQ_HAVE_WINDOWS
        int rc = closesocket (s);
        wsa_assert (rc != SOCKET_ERROR);
#else
        int rc = close (s);
        errno_assert (rc == 0);
#endif
        s = retired_fd;
    }

    int rc = tx_msg.close ();
    errno_assert (rc == 0);

    //  Drop reference to metadata and destroy it if we are
    //  the only user.
    if (metadata != NULL) {
        if (metadata->drop_ref ()) {
            LIBZMQ_DELETE(metadata);
        }
    }

    LIBZMQ_DELETE(encoder);
    LIBZMQ_DELETE(decoder);
    LIBZMQ_DELETE(mechanism);
}
Ejemplo n.º 3
0
zmq::ctx_t::~ctx_t ()
{
    //  Check that there are no remaining _sockets.
    zmq_assert (_sockets.empty ());

    //  Ask I/O threads to terminate. If stop signal wasn't sent to I/O
    //  thread subsequent invocation of destructor would hang-up.
    for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
        _io_threads[i]->stop ();
    }

    //  Wait till I/O threads actually terminate.
    for (io_threads_t::size_type i = 0; i != _io_threads.size (); i++) {
        LIBZMQ_DELETE (_io_threads[i]);
    }

    //  Deallocate the reaper thread object.
    LIBZMQ_DELETE (_reaper);

    //  The mailboxes in _slots themselves were deallocated with their
    //  corresponding io_thread/socket objects.

    //  De-initialise crypto library, if needed.
    zmq::random_close ();

    //  Remove the tag, so that the object is considered dead.
    _tag = ZMQ_CTX_TAG_VALUE_BAD;
}
Ejemplo n.º 4
0
zmq::address_t::~address_t ()
{
    if (protocol == "tcp") {
        if (resolved.tcp_addr) {
            LIBZMQ_DELETE(resolved.tcp_addr);
        }
    }
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
    else
    if (protocol == "ipc") {
        if (resolved.ipc_addr) {
            LIBZMQ_DELETE(resolved.ipc_addr);
        }
    }
#endif
#if defined ZMQ_HAVE_TIPC
    else
    if (protocol == "tipc") {
        if (resolved.tipc_addr) {
            LIBZMQ_DELETE(resolved.tipc_addr);
        }
    }
#endif
#if defined ZMQ_HAVE_VMCI
    else
    if (protocol == "vmci") {
        if (resolved.vmci_addr) {
            LIBZMQ_DELETE(resolved.vmci_addr);
        }
    }
#endif
}
Ejemplo n.º 5
0
zmq::socket_base_t::~socket_base_t ()
{
    LIBZMQ_DELETE(mailbox);
    
    if (reaper_signaler) {
        LIBZMQ_DELETE(reaper_signaler);
    }
    
    stop_monitor ();
    zmq_assert (destroyed);
}
Ejemplo n.º 6
0
zmq::socket_base_t::~socket_base_t ()
{
    if (mailbox)
        LIBZMQ_DELETE(mailbox);

    if (reaper_signaler)
        LIBZMQ_DELETE(reaper_signaler);

    scoped_lock_t lock(monitor_sync);
    stop_monitor ();

    zmq_assert (destroyed);
}
Ejemplo n.º 7
0
zmq::trie_t::~trie_t ()
{
    if (count == 1) {
        zmq_assert (next.node);
        LIBZMQ_DELETE(next.node);
    }
    else if (count > 1) {
        for (unsigned short i = 0; i != count; ++i) {
            LIBZMQ_DELETE(next.table[i]);
        }
        free (next.table);
    }
}
Ejemplo n.º 8
0
template <typename T> zmq::generic_mtrie_t<T>::~generic_mtrie_t ()
{
    LIBZMQ_DELETE (pipes);

    if (count == 1) {
        zmq_assert (next.node);
        LIBZMQ_DELETE (next.node);
    } else if (count > 1) {
        for (unsigned short i = 0; i != count; ++i) {
            LIBZMQ_DELETE (next.table[i]);
        }
        free (next.table);
    }
}
Ejemplo n.º 9
0
zmq::socket_base_t::socket_base_t (ctx_t *parent_, uint32_t tid_, int sid_, bool thread_safe_) :
    own_t (parent_, tid_),
    tag (0xbaddecaf),
    ctx_terminated (false),
    destroyed (false),
    poller(NULL),
    handle((poller_t::handle_t)NULL),
    last_tsc (0),
    ticks (0),
    rcvmore (false),
    monitor_socket (NULL),
    monitor_events (0),
    thread_safe (thread_safe_),
    reaper_signaler (NULL),
    sync(),
    monitor_sync()
{
    options.socket_id = sid_;
    options.ipv6 = (parent_->get (ZMQ_IPV6) != 0);
    options.linger = parent_->get (ZMQ_BLOCKY)? -1: 0;

    if (thread_safe)
        mailbox = new mailbox_safe_t(&sync);
    else {
        mailbox_t *m = new mailbox_t();
        if (m->get_fd () != retired_fd)
            mailbox = m;
        else {
            LIBZMQ_DELETE (m);
            mailbox = NULL;
        }
    }
}
Ejemplo n.º 10
0
void zmq::pipe_t::process_pipe_term_ack ()
{
    //  Notify the user that all the references to the pipe should be dropped.
    zmq_assert (_sink);
    _sink->pipe_terminated (this);

    //  In term_ack_sent and term_req_sent2 states there's nothing to do.
    //  Simply deallocate the pipe. In term_req_sent1 state we have to ack
    //  the peer before deallocating this side of the pipe.
    //  All the other states are invalid.
    if (_state == term_req_sent1) {
        _out_pipe = NULL;
        send_pipe_term_ack (_peer);
    } else
        zmq_assert (_state == term_ack_sent || _state == term_req_sent2);

    //  We'll deallocate the inbound pipe, the peer will deallocate the outbound
    //  pipe (which is an inbound pipe from its point of view).
    //  First, delete all the unread messages in the pipe. We have to do it by
    //  hand because msg_t doesn't have automatic destructor. Then deallocate
    //  the ypipe itself.

    if (!_conflate) {
        msg_t msg;
        while (_in_pipe->read (&msg)) {
            const int rc = msg.close ();
            errno_assert (rc == 0);
        }
    }

    LIBZMQ_DELETE (_in_pipe);

    //  Deallocate the pipe object
    delete this;
}
Ejemplo n.º 11
0
void zmq::msg_t::reset_metadata ()
{
    if (u.base.metadata) {
        if (u.base.metadata->drop_ref ()) {
            LIBZMQ_DELETE(u.base.metadata);
        }
        u.base.metadata = NULL;
    }
}
Ejemplo n.º 12
0
zmq::pollset_t::~pollset_t ()
{
    //  Wait till the worker thread exits.
    worker.stop ();

    pollset_destroy (pollset_fd);
    for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it)
        LIBZMQ_DELETE(*it);
}
Ejemplo n.º 13
0
int zmq::msg_t::close ()
{
    //  Check the validity of the message.
    if (unlikely (!check ())) {
        errno = EFAULT;
        return -1;
    }

    if (u.base.type == type_lmsg) {

        //  If the content is not shared, or if it is shared and the reference
        //  count has dropped to zero, deallocate it.
        if (!(u.lmsg.flags & msg_t::shared) ||
              !u.lmsg.content->refcnt.sub (1)) {

            //  We used "placement new" operator to initialize the reference
            //  counter so we call the destructor explicitly now.
            u.lmsg.content->refcnt.~atomic_counter_t ();

            if (u.lmsg.content->ffn)
                u.lmsg.content->ffn (u.lmsg.content->data,
                    u.lmsg.content->hint);
            free (u.lmsg.content);
        }
    }

    if (is_zcmsg())
    {
        zmq_assert( u.zclmsg.content->ffn );

        //  If the content is not shared, or if it is shared and the reference
        //  count has dropped to zero, deallocate it.
        if (!(u.zclmsg.flags & msg_t::shared) ||
            !u.zclmsg.content->refcnt.sub (1)) {

            //  We used "placement new" operator to initialize the reference
            //  counter so we call the destructor explicitly now.
            u.zclmsg.content->refcnt.~atomic_counter_t ();

            u.zclmsg.content->ffn (u.zclmsg.content->data,
                          u.zclmsg.content->hint);
        }
    }

    if (u.base.metadata != NULL) {
        if (u.base.metadata->drop_ref ()) {
            LIBZMQ_DELETE(u.base.metadata);
        }
        u.base.metadata = NULL;
    }

    //  Make the message invalid.
    u.base.type = 0;

    return 0;
}
Ejemplo n.º 14
0
zmq::epoll_t::~epoll_t ()
{
    //  Wait till the worker thread exits.
    worker.stop ();

    close (epoll_fd);
    for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it) {
        LIBZMQ_DELETE(*it);
    }
}
Ejemplo n.º 15
0
zmq::stream_engine_t::~stream_engine_t ()
{
    zmq_assert (!plugged);

    if (s != retired_fd) {
#ifdef ZMQ_HAVE_WINDOWS
        int rc = closesocket (s);
        wsa_assert (rc != SOCKET_ERROR);
#else
        int rc = close (s);
#ifdef __FreeBSD_kernel__
        // FreeBSD may return ECONNRESET on close() under load but this is not
        // an error.
        if (rc == -1 && errno == ECONNRESET)
            rc = 0;
#endif
        errno_assert (rc == 0);
#endif
        s = retired_fd;
    }

    int rc = tx_msg.close ();
    errno_assert (rc == 0);

    //  Drop reference to metadata and destroy it if we are
    //  the only user.
    if (metadata != NULL) {
        if (metadata->drop_ref ()) {
            LIBZMQ_DELETE(metadata);
        }
    }

    LIBZMQ_DELETE(encoder);
    LIBZMQ_DELETE(decoder);
    LIBZMQ_DELETE(mechanism);
}
Ejemplo n.º 16
0
zmq::session_base_t::~session_base_t ()
{
    zmq_assert (!pipe);
    zmq_assert (!zap_pipe);

    //  If there's still a pending linger timer, remove it.
    if (has_linger_timer) {
        cancel_timer (linger_timer_id);
        has_linger_timer = false;
    }

    //  Close the engine.
    if (engine)
        engine->terminate ();

    LIBZMQ_DELETE(addr);
}
Ejemplo n.º 17
0
void zmq::epoll_t::loop ()
{
    epoll_event ev_buf [max_io_events];

    while (!stopping) {

        //  Execute any due timers.
        int timeout = (int) execute_timers ();

        //  Wait for events.
        int n = epoll_wait (epoll_fd, &ev_buf [0], max_io_events,
            timeout ? timeout : -1);
        if (n == -1) {
            errno_assert (errno == EINTR);
            continue;
        }

        for (int i = 0; i < n; i ++) {
            poll_entry_t *pe = ((poll_entry_t*) ev_buf [i].data.ptr);

            if (pe->fd == retired_fd)
                continue;
            if (ev_buf [i].events & (EPOLLERR | EPOLLHUP))
                pe->events->in_event ();
            if (pe->fd == retired_fd)
               continue;
            if (ev_buf [i].events & EPOLLOUT)
                pe->events->out_event ();
            if (pe->fd == retired_fd)
                continue;
            if (ev_buf [i].events & EPOLLIN)
                pe->events->in_event ();
        }

        //  Destroy retired event sources.
        retired_sync.lock ();
        for (retired_t::iterator it = retired.begin (); it != retired.end (); ++it) {
            LIBZMQ_DELETE(*it);
        }
        retired.clear ();
        retired_sync.unlock ();
    }
}
Ejemplo n.º 18
0
void zmq::pollset_t::loop ()
{
    struct pollfd polldata_array[max_io_events];

    while (!stopping) {

        //  Execute any due timers.
        int timeout = (int) execute_timers ();

        //  Wait for events.
        int n = pollset_poll(pollset_fd, polldata_array, max_io_events, 
            timeout ? timeout : -1);
        if (n == -1) {
            errno_assert (errno == EINTR);
            continue;
        }

        for (int i = 0; i < n; i ++) {
            poll_entry_t *pe = fd_table [polldata_array [i].fd];
            if (!pe)
                continue;

            if (pe->fd == retired_fd)
                continue;
            if (polldata_array [i].revents & (POLLERR | POLLHUP))
                pe->events->in_event ();
            if (pe->fd == retired_fd)
               continue;
            if (polldata_array [i].revents & POLLOUT)
                pe->events->out_event ();
            if (pe->fd == retired_fd)
                continue;
            if (polldata_array [i].revents & POLLIN)
                pe->events->in_event ();
        }

        //  Destroy retired event sources.
        for (retired_t::iterator it = retired.begin (); it != retired.end ();
              ++it)
            LIBZMQ_DELETE(*it);
        retired.clear ();
    }
}
Ejemplo n.º 19
0
void zmq::pgm_receiver_t::unplug ()
{
    //  Delete decoders.
    for (peers_t::iterator it = peers.begin (); it != peers.end (); ++it) {
        if (it->second.decoder != NULL) {
            LIBZMQ_DELETE(it->second.decoder);
        }
    }
    peers.clear ();
    active_tsi = NULL;

    if (has_rx_timer) {
        cancel_timer (rx_timer_id);
        has_rx_timer = false;
    }

    rm_fd (socket_handle);
    rm_fd (pipe_handle);

    session = NULL;
}
Ejemplo n.º 20
0
void zmq::pipe_t::process_hiccup (void *pipe_)
{
    //  Destroy old outpipe. Note that the read end of the pipe was already
    //  migrated to this thread.
    zmq_assert (outpipe);
    outpipe->flush ();
    msg_t msg;
    while (outpipe->read (&msg)) {
       if (!(msg.flags () & msg_t::more))
            msgs_written--;
       int rc = msg.close ();
       errno_assert (rc == 0);
    }
    LIBZMQ_DELETE(outpipe);

    //  Plug in the new outpipe.
    zmq_assert (pipe_);
    outpipe = (upipe_t*) pipe_;
    out_active = true;

    //  If appropriate, notify the user about the hiccup.
    if (state == active)
        sink->hiccuped (this);
}
Ejemplo n.º 21
0
void zmq::pgm_receiver_t::restart_input ()
{
    zmq_assert (session != NULL);
    zmq_assert (active_tsi != NULL);

    const peers_t::iterator it = peers.find (*active_tsi);
    zmq_assert (it != peers.end ());
    zmq_assert (it->second.joined);

    //  Push the pending message into the session.
    int rc = session->push_msg (it->second.decoder->msg ());
    errno_assert (rc == 0);

    if (insize > 0) {
        rc = process_input (it->second.decoder);
        if (rc == -1) {
            //  HWM reached; we will try later.
            if (errno == EAGAIN) {
                session->flush ();
                return;
            }
            //  Data error. Delete message decoder, mark the
            //  peer as not joined and drop remaining data.
            it->second.joined = false;
            LIBZMQ_DELETE(it->second.decoder);
            insize = 0;
        }
    }

    //  Resume polling.
    set_pollin (pipe_handle);
    set_pollin (socket_handle);

    active_tsi = NULL;
    in_event ();
}
Ejemplo n.º 22
0
zmq::socket_base_t *zmq::socket_base_t::create (int type_, class ctx_t *parent_,
    uint32_t tid_, int sid_)
{
    socket_base_t *s = NULL;
    switch (type_) {
        case ZMQ_PAIR:
            s = new (std::nothrow) pair_t (parent_, tid_, sid_);
            break;
        case ZMQ_PUB:
            s = new (std::nothrow) pub_t (parent_, tid_, sid_);
            break;
        case ZMQ_SUB:
            s = new (std::nothrow) sub_t (parent_, tid_, sid_);
            break;
        case ZMQ_REQ:
            s = new (std::nothrow) req_t (parent_, tid_, sid_);
            break;
        case ZMQ_REP:
            s = new (std::nothrow) rep_t (parent_, tid_, sid_);
            break;
        case ZMQ_DEALER:
            s = new (std::nothrow) dealer_t (parent_, tid_, sid_);
            break;
        case ZMQ_ROUTER:
            s = new (std::nothrow) router_t (parent_, tid_, sid_);
            break;
        case ZMQ_PULL:
            s = new (std::nothrow) pull_t (parent_, tid_, sid_);
            break;
        case ZMQ_PUSH:
            s = new (std::nothrow) push_t (parent_, tid_, sid_);
            break;
        case ZMQ_XPUB:
            s = new (std::nothrow) xpub_t (parent_, tid_, sid_);
            break;
        case ZMQ_XSUB:
            s = new (std::nothrow) xsub_t (parent_, tid_, sid_);
            break;
        case ZMQ_STREAM:
            s = new (std::nothrow) stream_t (parent_, tid_, sid_);
            break;
        case ZMQ_SERVER:
            s = new (std::nothrow) server_t (parent_, tid_, sid_);
            break;
        case ZMQ_CLIENT:
            s = new (std::nothrow) client_t (parent_, tid_, sid_);
            break;
        default:
            errno = EINVAL;
            return NULL;
    }

    alloc_assert (s);

    mailbox_t *mailbox = dynamic_cast<mailbox_t*> (s->mailbox);

    if (mailbox != NULL && mailbox->get_fd () == retired_fd) {
        s->destroyed = true;
        LIBZMQ_DELETE(s);
        return NULL;
    }

    return s;
}
Ejemplo n.º 23
0
bool zmq::trie_t::rm (unsigned char *prefix_, size_t size_)
{
    //  TODO: Shouldn't an error be reported if the key does not exist?
    if (!size_) {
        if (!refcnt)
            return false;
        refcnt--;
        return refcnt == 0;
    }
    unsigned char c = *prefix_;
    if (!count || c < min || c >= min + count)
        return false;

    trie_t *next_node =
        count == 1 ? next.node : next.table [c - min];

    if (!next_node)
        return false;

    bool ret = next_node->rm (prefix_ + 1, size_ - 1);

    //  Prune redundant nodes
    if (next_node->is_redundant ()) {
        LIBZMQ_DELETE(next_node);
        zmq_assert (count > 0);

        if (count == 1) {
            //  The just pruned node is was the only live node
            next.node = 0;
            count = 0;
            --live_nodes;
            zmq_assert (live_nodes == 0);
        }
        else {
            next.table [c - min] = 0;
            zmq_assert (live_nodes > 1);
            --live_nodes;

            //  Compact the table if possible
            if (live_nodes == 1) {
                //  We can switch to using the more compact single-node
                //  representation since the table only contains one live node
                trie_t *node = 0;
                //  Since we always compact the table the pruned node must
                //  either be the left-most or right-most ptr in the node
                //  table
                if (c == min) {
                    //  The pruned node is the left-most node ptr in the
                    //  node table => keep the right-most node
                    node = next.table [count - 1];
                    min += count - 1;
                }
                else
                if (c == min + count - 1) {
                    //  The pruned node is the right-most node ptr in the
                    //  node table => keep the left-most node
                    node = next.table [0];
                }
                zmq_assert (node);
                free (next.table);
                next.node = node;
                count = 1;
            }
            else
            if (c == min) {
                //  We can compact the table "from the left".
                //  Find the left-most non-null node ptr, which we'll use as
                //  our new min
                unsigned char new_min = min;
                for (unsigned short i = 1; i < count; ++i) {
                    if (next.table [i]) {
                        new_min = i + min;
                        break;
                    }
                }
                zmq_assert (new_min != min);

                trie_t **old_table = next.table;
                zmq_assert (new_min > min);
                zmq_assert (count > new_min - min);

                count = count - (new_min - min);
                next.table = (trie_t**) malloc (sizeof (trie_t*) * count);
                alloc_assert (next.table);

                memmove (next.table, old_table + (new_min - min),
                        sizeof (trie_t*) * count);
                free (old_table);

                min = new_min;
            }
            else
            if (c == min + count - 1) {
                //  We can compact the table "from the right".
                //  Find the right-most non-null node ptr, which we'll use to
                //  determine the new table size
                unsigned short new_count = count;
                for (unsigned short i = 1; i < count; ++i) {
                    if (next.table [count - 1 - i]) {
                        new_count = count - i;
                        break;
                    }
                }
                zmq_assert (new_count != count);
                count = new_count;

                trie_t **old_table = next.table;
                next.table = (trie_t**) malloc (sizeof (trie_t*) * count);
                alloc_assert (next.table);

                memmove (next.table, old_table, sizeof (trie_t*) * count);
                free (old_table);
            }
        }
    }
    return ret;
}
Ejemplo n.º 24
0
void zmq::mtrie_t::rm_helper (pipe_t *pipe_,
                              unsigned char **buff_,
                              size_t buffsize_,
                              size_t maxbuffsize_,
                              void (*func_) (unsigned char *data_,
                                             size_t size_,
                                             void *arg_),
                              void *arg_,
                              bool call_on_uniq_)
{
    //  Remove the subscription from this node.
    if (pipes && pipes->erase (pipe_)) {
        if (!call_on_uniq_ || pipes->empty ()) {
            func_ (*buff_, buffsize_, arg_);
        }

        if (pipes->empty ()) {
            LIBZMQ_DELETE (pipes);
        }
    }

    //  Adjust the buffer.
    if (buffsize_ >= maxbuffsize_) {
        maxbuffsize_ = buffsize_ + 256;
        *buff_ = (unsigned char *) realloc (*buff_, maxbuffsize_);
        alloc_assert (*buff_);
    }

    //  If there are no subnodes in the trie, return.
    if (count == 0)
        return;

    //  If there's one subnode (optimisation).
    if (count == 1) {
        (*buff_)[buffsize_] = min;
        buffsize_++;
        next.node->rm_helper (pipe_, buff_, buffsize_, maxbuffsize_, func_,
                              arg_, call_on_uniq_);

        //  Prune the node if it was made redundant by the removal
        if (next.node->is_redundant ()) {
            LIBZMQ_DELETE (next.node);
            count = 0;
            --live_nodes;
            zmq_assert (live_nodes == 0);
        }
        return;
    }

    //  If there are multiple subnodes.
    //
    //  New min non-null character in the node table after the removal
    unsigned char new_min = min + count - 1;
    //  New max non-null character in the node table after the removal
    unsigned char new_max = min;
    for (unsigned short c = 0; c != count; c++) {
        (*buff_)[buffsize_] = min + c;
        if (next.table[c]) {
            next.table[c]->rm_helper (pipe_, buff_, buffsize_ + 1, maxbuffsize_,
                                      func_, arg_, call_on_uniq_);

            //  Prune redundant nodes from the mtrie
            if (next.table[c]->is_redundant ()) {
                LIBZMQ_DELETE (next.table[c]);

                zmq_assert (live_nodes > 0);
                --live_nodes;
            } else {
                //  The node is not redundant, so it's a candidate for being
                //  the new min/max node.
                //
                //  We loop through the node array from left to right, so the
                //  first non-null, non-redundant node encountered is the new
                //  minimum index. Conversely, the last non-redundant, non-null
                //  node encountered is the new maximum index.
                if (c + min < new_min)
                    new_min = c + min;
                if (c + min > new_max)
                    new_max = c + min;
            }
        }
    }

    zmq_assert (count > 1);

    //  Free the node table if it's no longer used.
    if (live_nodes == 0) {
        free (next.table);
        next.table = NULL;
        count = 0;
    }
    //  Compact the node table if possible
    else if (live_nodes == 1) {
        //  If there's only one live node in the table we can
        //  switch to using the more compact single-node
        //  representation
        zmq_assert (new_min == new_max);
        zmq_assert (new_min >= min && new_min < min + count);
        mtrie_t *node = next.table[new_min - min];
        zmq_assert (node);
        free (next.table);
        next.node = node;
        count = 1;
        min = new_min;
    } else if (new_min > min || new_max < min + count - 1) {
        zmq_assert (new_max - new_min + 1 > 1);

        mtrie_t **old_table = next.table;
        zmq_assert (new_min > min || new_max < min + count - 1);
        zmq_assert (new_min >= min);
        zmq_assert (new_max <= min + count - 1);
        zmq_assert (new_max - new_min + 1 < count);

        count = new_max - new_min + 1;
        next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
        alloc_assert (next.table);

        memmove (next.table, old_table + (new_min - min),
                 sizeof (mtrie_t *) * count);
        free (old_table);

        min = new_min;
    }
}
Ejemplo n.º 25
0
bool zmq::mtrie_t::rm_helper (unsigned char *prefix_,
                              size_t size_,
                              pipe_t *pipe_)
{
    if (!size_) {
        if (pipes) {
            pipes_t::size_type erased = pipes->erase (pipe_);
            zmq_assert (erased == 1);
            if (pipes->empty ()) {
                LIBZMQ_DELETE (pipes);
            }
        }
        return !pipes;
    }

    unsigned char c = *prefix_;
    if (!count || c < min || c >= min + count)
        return false;

    mtrie_t *next_node = count == 1 ? next.node : next.table[c - min];

    if (!next_node)
        return false;

    bool ret = next_node->rm_helper (prefix_ + 1, size_ - 1, pipe_);

    if (next_node->is_redundant ()) {
        LIBZMQ_DELETE (next_node);
        zmq_assert (count > 0);

        if (count == 1) {
            next.node = 0;
            count = 0;
            --live_nodes;
            zmq_assert (live_nodes == 0);
        } else {
            next.table[c - min] = 0;
            zmq_assert (live_nodes > 1);
            --live_nodes;

            //  Compact the table if possible
            if (live_nodes == 1) {
                //  If there's only one live node in the table we can
                //  switch to using the more compact single-node
                //  representation
                unsigned short i;
                for (i = 0; i < count; ++i)
                    if (next.table[i])
                        break;

                zmq_assert (i < count);
                min += i;
                count = 1;
                mtrie_t *oldp = next.table[i];
                free (next.table);
                next.node = oldp;
            } else if (c == min) {
                //  We can compact the table "from the left"
                unsigned short i;
                for (i = 1; i < count; ++i)
                    if (next.table[i])
                        break;

                zmq_assert (i < count);
                min += i;
                count -= i;
                mtrie_t **old_table = next.table;
                next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
                alloc_assert (next.table);
                memmove (next.table, old_table + i, sizeof (mtrie_t *) * count);
                free (old_table);
            } else if (c == min + count - 1) {
                //  We can compact the table "from the right"
                unsigned short i;
                for (i = 1; i < count; ++i)
                    if (next.table[count - 1 - i])
                        break;

                zmq_assert (i < count);
                count -= i;
                mtrie_t **old_table = next.table;
                next.table = (mtrie_t **) malloc (sizeof (mtrie_t *) * count);
                alloc_assert (next.table);
                memmove (next.table, old_table, sizeof (mtrie_t *) * count);
                free (old_table);
            }
        }
    }

    return ret;
}
Ejemplo n.º 26
0
void zmq_threadclose(void* thread)
{
    zmq::thread_t* pThread = static_cast<zmq::thread_t*>(thread);
    pThread->stop();
    LIBZMQ_DELETE(pThread);
}
Ejemplo n.º 27
0
zmq::socks_connecter_t::~socks_connecter_t ()
{
    zmq_assert (s == retired_fd);
    LIBZMQ_DELETE(proxy_addr);
}
Ejemplo n.º 28
0
int zmq::socks_connecter_t::connect_to_proxy ()
{
    zmq_assert (s == retired_fd);

    //  Resolve the address
    LIBZMQ_DELETE(proxy_addr->resolved.tcp_addr);
    proxy_addr->resolved.tcp_addr = new (std::nothrow) tcp_address_t ();
    alloc_assert (proxy_addr->resolved.tcp_addr);

    int rc = proxy_addr->resolved.tcp_addr->resolve (
        proxy_addr->address.c_str (), false, options.ipv6);
    if (rc != 0) {
        LIBZMQ_DELETE(proxy_addr->resolved.tcp_addr);
        return -1;
    }
    zmq_assert (proxy_addr->resolved.tcp_addr != NULL);
    const tcp_address_t *tcp_addr = proxy_addr->resolved.tcp_addr;

    //  Create the socket.
    s = open_socket (tcp_addr->family (), SOCK_STREAM, IPPROTO_TCP);
#ifdef ZMQ_HAVE_WINDOWS
    if (s == INVALID_SOCKET)
        return -1;
#else
    if (s == -1)
        return -1;
#endif

    //  On some systems, IPv4 mapping in IPv6 sockets is disabled by default.
    //  Switch it on in such cases.
    if (tcp_addr->family () == AF_INET6)
        enable_ipv4_mapping (s);

    // Set the IP Type-Of-Service priority for this socket
    if (options.tos != 0)
        set_ip_type_of_service (s, options.tos);

    // Set the socket to non-blocking mode so that we get async connect().
    unblock_socket (s);

    //  Set the socket buffer limits for the underlying socket.
    if (options.sndbuf >= 0)
        set_tcp_send_buffer (s, options.sndbuf);
    if (options.rcvbuf >= 0)
        set_tcp_receive_buffer (s, options.rcvbuf);

    // Set the IP Type-Of-Service for the underlying socket
    if (options.tos != 0)
        set_ip_type_of_service (s, options.tos);

    // Set a source address for conversations
    if (tcp_addr->has_src_addr ()) {
        rc = ::bind (s, tcp_addr->src_addr (), tcp_addr->src_addrlen ());
        if (rc == -1) {
            close ();
            return -1;
        }
    }

    //  Connect to the remote peer.
    rc = ::connect (s, tcp_addr->addr (), tcp_addr->addrlen ());

    //  Connect was successful immediately.
    if (rc == 0)
        return 0;

    //  Translate error codes indicating asynchronous connect has been
    //  launched to a uniform EINPROGRESS.
#ifdef ZMQ_HAVE_WINDOWS
    const int last_error = WSAGetLastError();
    if (last_error == WSAEINPROGRESS || last_error == WSAEWOULDBLOCK)
        errno = EINPROGRESS;
    else {
        errno = wsa_error_to_errno (last_error);
        close ();
    }
#else
    if (errno == EINTR)
        errno = EINPROGRESS;
#endif
    return -1;
}
Ejemplo n.º 29
0
int zmq::socket_base_t::connect (const char *addr_)
{
    ENTER_MUTEX();

    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Process pending commands, if any.
    int rc = process_commands (0, false);
    if (unlikely (rc != 0)) {
        EXIT_MUTEX();
        return -1;
    }

    //  Parse addr_ string.
    std::string protocol;
    std::string address;
    if (parse_uri (addr_, protocol, address) || check_protocol (protocol)) {
        EXIT_MUTEX();
        return -1;
    }

    if (protocol == "inproc") {

        //  TODO: inproc connect is specific with respect to creating pipes
        //  as there's no 'reconnect' functionality implemented. Once that
        //  is in place we should follow generic pipe creation algorithm.

        //  Find the peer endpoint.
        endpoint_t peer = find_endpoint (addr_);

        // The total HWM for an inproc connection should be the sum of
        // the binder's HWM and the connector's HWM.
        int sndhwm = 0;
        if (peer.socket == NULL)
            sndhwm = options.sndhwm;
        else if (options.sndhwm != 0 && peer.options.rcvhwm != 0)
            sndhwm = options.sndhwm + peer.options.rcvhwm;
        int rcvhwm = 0;
        if (peer.socket == NULL)
            rcvhwm = options.rcvhwm;
        else
        if (options.rcvhwm != 0 && peer.options.sndhwm != 0)
            rcvhwm = options.rcvhwm + peer.options.sndhwm;

        //  Create a bi-directional pipe to connect the peers.
        object_t *parents [2] = {this, peer.socket == NULL ? this : peer.socket};
        pipe_t *new_pipes [2] = {NULL, NULL};

        bool conflate = options.conflate &&
            (options.type == ZMQ_DEALER ||
             options.type == ZMQ_PULL ||
             options.type == ZMQ_PUSH ||
             options.type == ZMQ_PUB ||
             options.type == ZMQ_SUB);

        int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm};
        bool conflates [2] = {conflate, conflate};
        int rc = pipepair (parents, new_pipes, hwms, conflates);
        if (!conflate) {
            new_pipes[0]->set_hwms_boost(peer.options.sndhwm, peer.options.rcvhwm);
            new_pipes[1]->set_hwms_boost(options.sndhwm, options.rcvhwm);
        }

        errno_assert (rc == 0);

        if (!peer.socket) {
            //  The peer doesn't exist yet so we don't know whether
            //  to send the identity message or not. To resolve this,
            //  we always send our identity and drop it later if
            //  the peer doesn't expect it.
            msg_t id;
            rc = id.init_size (options.identity_size);
            errno_assert (rc == 0);
            memcpy (id.data (), options.identity, options.identity_size);
            id.set_flags (msg_t::identity);
            bool written = new_pipes [0]->write (&id);
            zmq_assert (written);
            new_pipes [0]->flush ();

            const endpoint_t endpoint = {this, options};
            pend_connection (std::string (addr_), endpoint, new_pipes);
        }
        else {
            //  If required, send the identity of the local socket to the peer.
            if (peer.options.recv_identity) {
                msg_t id;
                rc = id.init_size (options.identity_size);
                errno_assert (rc == 0);
                memcpy (id.data (), options.identity, options.identity_size);
                id.set_flags (msg_t::identity);
                bool written = new_pipes [0]->write (&id);
                zmq_assert (written);
                new_pipes [0]->flush ();
            }

            //  If required, send the identity of the peer to the local socket.
            if (options.recv_identity) {
                msg_t id;
                rc = id.init_size (peer.options.identity_size);
                errno_assert (rc == 0);
                memcpy (id.data (), peer.options.identity, peer.options.identity_size);
                id.set_flags (msg_t::identity);
                bool written = new_pipes [1]->write (&id);
                zmq_assert (written);
                new_pipes [1]->flush ();
            }

            //  Attach remote end of the pipe to the peer socket. Note that peer's
            //  seqnum was incremented in find_endpoint function. We don't need it
            //  increased here.
            send_bind (peer.socket, new_pipes [1], false);
        }

        //  Attach local end of the pipe to this socket object.
        attach_pipe (new_pipes [0]);

        // Save last endpoint URI
        last_endpoint.assign (addr_);

        // remember inproc connections for disconnect
        inprocs.insert (inprocs_t::value_type (std::string (addr_), new_pipes [0]));

        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }
    bool is_single_connect = (options.type == ZMQ_DEALER ||
                              options.type == ZMQ_SUB ||
                              options.type == ZMQ_REQ);
    if (unlikely (is_single_connect)) {
        const endpoints_t::iterator it = endpoints.find (addr_);
        if (it != endpoints.end ()) {
            // There is no valid use for multiple connects for SUB-PUB nor
            // DEALER-ROUTER nor REQ-REP. Multiple connects produces
            // nonsensical results.
            EXIT_MUTEX();
            return 0;
        }
    }

    //  Choose the I/O thread to run the session in.
    io_thread_t *io_thread = choose_io_thread (options.affinity);
    if (!io_thread) {
        errno = EMTHREAD;
        EXIT_MUTEX();
        return -1;
    }

    address_t *paddr = new (std::nothrow) address_t (protocol, address);
    alloc_assert (paddr);

    //  Resolve address (if needed by the protocol)
    if (protocol == "tcp") {
        //  Do some basic sanity checks on tcp:// address syntax
        //  - hostname starts with digit or letter, with embedded '-' or '.'
        //  - IPv6 address may contain hex chars and colons.
        //  - IPv4 address may contain decimal digits and dots.
        //  - Address must end in ":port" where port is *, or numeric
        //  - Address may contain two parts separated by ':'
        //  Following code is quick and dirty check to catch obvious errors,
        //  without trying to be fully accurate.
        const char *check = address.c_str ();
        if (isalnum (*check) || isxdigit (*check) || *check == '[') {
            check++;
            while (isalnum  (*check)
                || isxdigit (*check)
                || *check == '.' || *check == '-' || *check == ':'|| *check == ';'
                || *check == ']')
                check++;
        }
        //  Assume the worst, now look for success
        rc = -1;
        //  Did we reach the end of the address safely?
        if (*check == 0) {
            //  Do we have a valid port string? (cannot be '*' in connect
            check = strrchr (address.c_str (), ':');
            if (check) {
                check++;
                if (*check && (isdigit (*check)))
                    rc = 0;     //  Valid
            }
        }
        if (rc == -1) {
            errno = EINVAL;
            LIBZMQ_DELETE(paddr);
            EXIT_MUTEX();
            return -1;
        }
        //  Defer resolution until a socket is opened
        paddr->resolved.tcp_addr = NULL;
    }
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
    else
    if (protocol == "ipc") {
        paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t ();
        alloc_assert (paddr->resolved.ipc_addr);
        int rc = paddr->resolved.ipc_addr->resolve (address.c_str ());
        if (rc != 0) {
            LIBZMQ_DELETE(paddr);
            EXIT_MUTEX();
            return -1;
        }
    }
#endif

// TBD - Should we check address for ZMQ_HAVE_NORM???

#ifdef ZMQ_HAVE_OPENPGM
    if (protocol == "pgm" || protocol == "epgm") {
        struct pgm_addrinfo_t *res = NULL;
        uint16_t port_number = 0;
        int rc = pgm_socket_t::init_address(address.c_str(), &res, &port_number);
        if (res != NULL)
            pgm_freeaddrinfo (res);
        if (rc != 0 || port_number == 0) {
          EXIT_MUTEX();
          return -1;
        }
    }
#endif
#if defined ZMQ_HAVE_TIPC
    else
    if (protocol == "tipc") {
        paddr->resolved.tipc_addr = new (std::nothrow) tipc_address_t ();
        alloc_assert (paddr->resolved.tipc_addr);
        int rc = paddr->resolved.tipc_addr->resolve (address.c_str());
        if (rc != 0) {
            LIBZMQ_DELETE(paddr);
            EXIT_MUTEX();
            return -1;
        }
    }
#endif

    //  Create session.
    session_base_t *session = session_base_t::create (io_thread, true, this,
        options, paddr);
    errno_assert (session);

    //  PGM does not support subscription forwarding; ask for all data to be
    //  sent to this pipe. (same for NORM, currently?)
    bool subscribe_to_all = protocol == "pgm" || protocol == "epgm" || protocol == "norm";
    pipe_t *newpipe = NULL;

    if (options.immediate != 1 || subscribe_to_all) {
        //  Create a bi-directional pipe.
        object_t *parents [2] = {this, session};
        pipe_t *new_pipes [2] = {NULL, NULL};

        bool conflate = options.conflate &&
            (options.type == ZMQ_DEALER ||
             options.type == ZMQ_PULL ||
             options.type == ZMQ_PUSH ||
             options.type == ZMQ_PUB ||
             options.type == ZMQ_SUB);

        int hwms [2] = {conflate? -1 : options.sndhwm,
            conflate? -1 : options.rcvhwm};
        bool conflates [2] = {conflate, conflate};
        rc = pipepair (parents, new_pipes, hwms, conflates);
        errno_assert (rc == 0);

        //  Attach local end of the pipe to the socket object.
        attach_pipe (new_pipes [0], subscribe_to_all);
        newpipe = new_pipes [0];

        //  Attach remote end of the pipe to the session object later on.
        session->attach_pipe (new_pipes [1]);
    }

    //  Save last endpoint URI
    paddr->to_string (last_endpoint);

    add_endpoint (addr_, (own_t *) session, newpipe);
    EXIT_MUTEX();
    return 0;
}
Ejemplo n.º 30
0
int zmq::socket_base_t::bind (const char *addr_)
{
    ENTER_MUTEX();

    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Process pending commands, if any.
    int rc = process_commands (0, false);
    if (unlikely (rc != 0)) {
        EXIT_MUTEX();
        return -1;
    }

    //  Parse addr_ string.
    std::string protocol;
    std::string address;
    if (parse_uri (addr_, protocol, address) || check_protocol (protocol)) {
        EXIT_MUTEX();
        return -1;
    }

    if (protocol == "inproc") {
        const endpoint_t endpoint = { this, options };
        const int rc = register_endpoint (addr_, endpoint);
        if (rc == 0) {
            connect_pending (addr_, this);
            last_endpoint.assign (addr_);
            options.connected = true;
        }
        EXIT_MUTEX();
        return rc;
    }

    if (protocol == "pgm" || protocol == "epgm" || protocol == "norm") {
        //  For convenience's sake, bind can be used interchangeable with
        //  connect for PGM, EPGM and NORM transports.
        EXIT_MUTEX();
        rc = connect (addr_);
        if (rc != -1)
            options.connected = true;
        return rc;
    }

    //  Remaining transports require to be run in an I/O thread, so at this
    //  point we'll choose one.
    io_thread_t *io_thread = choose_io_thread (options.affinity);
    if (!io_thread) {
        errno = EMTHREAD;
        EXIT_MUTEX();
        return -1;
    }

    if (protocol == "tcp") {
        tcp_listener_t *listener = new (std::nothrow) tcp_listener_t (
            io_thread, this, options);
        alloc_assert (listener);
        int rc = listener->set_address (address.c_str ());
        if (rc != 0) {
            LIBZMQ_DELETE(listener);
            event_bind_failed (address, zmq_errno());
            EXIT_MUTEX();
            return -1;
        }

        // Save last endpoint URI
        listener->get_address (last_endpoint);

        add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }

#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
    if (protocol == "ipc") {
        ipc_listener_t *listener = new (std::nothrow) ipc_listener_t (
            io_thread, this, options);
        alloc_assert (listener);
        int rc = listener->set_address (address.c_str ());
        if (rc != 0) {
            LIBZMQ_DELETE(listener);
            event_bind_failed (address, zmq_errno());
            EXIT_MUTEX();
            return -1;
        }

        // Save last endpoint URI
        listener->get_address (last_endpoint);

        add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }
#endif
#if defined ZMQ_HAVE_TIPC
    if (protocol == "tipc") {
         tipc_listener_t *listener = new (std::nothrow) tipc_listener_t (
              io_thread, this, options);
         alloc_assert (listener);
         int rc = listener->set_address (address.c_str ());
         if (rc != 0) {
             LIBZMQ_DELETE(listener);
             event_bind_failed (address, zmq_errno());
             EXIT_MUTEX();
             return -1;
         }

        // Save last endpoint URI
        listener->get_address (last_endpoint);

        add_endpoint (addr_, (own_t *) listener, NULL);
        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }
#endif

    EXIT_MUTEX();
    zmq_assert (false);
    return -1;
}