Exemple #1
0
int zmq::socket_base_t::setsockopt (int option_, const void *optval_,
    size_t optvallen_)
{
    ENTER_MUTEX();

    if (!options.is_valid(option_)) {
        errno = EINVAL;
        EXIT_MUTEX();
        return -1;
    }

    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  First, check whether specific socket type overloads the option.
    int rc = xsetsockopt (option_, optval_, optvallen_);
    if (rc == 0 || errno != EINVAL) {
        EXIT_MUTEX();
        return rc;
    }

    //  If the socket type doesn't support the option, pass it to
    //  the generic option parser.
    rc = options.setsockopt (option_, optval_, optvallen_);
    update_pipe_options(option_);

    EXIT_MUTEX();
    return rc;
}
Exemple #2
0
void zmq::socket_base_t::start_reaping (poller_t *poller_)
{
    //  Plug the socket to the reaper thread.
    poller = poller_;

    fd_t fd;

    if (!thread_safe)
        fd = ((mailbox_t*)mailbox)->get_fd();
    else {
        ENTER_MUTEX();

        reaper_signaler =  new signaler_t();

        //  Add signaler to the safe mailbox
        fd = reaper_signaler->get_fd();
        ((mailbox_safe_t*)mailbox)->add_signaler(reaper_signaler);

        //  Send a signal to make sure reaper handle existing commands
        reaper_signaler->send();

        EXIT_MUTEX();
    }

    handle = poller->add_fd (fd, this);
    poller->set_pollin (handle);

    //  Initialise the termination and check whether it can be deallocated
    //  immediately.
    terminate ();
    check_destroy ();
}
Exemple #3
0
NTSTATUS
NpfsServerWriteFile(
    PNPFS_CCB pSCB,
    PNPFS_IRP_CONTEXT pIrpContext
    )
{
    NTSTATUS ntStatus = 0;
    PNPFS_PIPE pPipe = NULL;

    pPipe = pSCB->pPipe;
    ENTER_MUTEX(&pPipe->PipeMutex);

    switch(pPipe->PipeClientState)
    {
    case PIPE_CLIENT_CONNECTED:
        ntStatus = NpfsServerWriteFile_Connected(pSCB, pIrpContext);
        pthread_cond_signal(&pPipe->PipeCondition);
        BAIL_ON_NT_STATUS(ntStatus);
        break;
    case PIPE_CLIENT_INIT_STATE:
    case PIPE_CLIENT_CLOSED:
        ntStatus = STATUS_PIPE_DISCONNECTED;
        BAIL_ON_NT_STATUS(ntStatus);
        break;
    }

error:

    pIrpContext->pIrp->IoStatusBlock.Status = ntStatus;
    LEAVE_MUTEX(&pPipe->PipeMutex);

    return ntStatus;
}
Exemple #4
0
int zmq::socket_base_t::leave (const char* group_)
{
    ENTER_MUTEX ();

    int rc = xleave (group_);

    EXIT_MUTEX();

    return rc;
}
Exemple #5
0
int zmq::socket_base_t::remove_signaler(signaler_t *s_)
{
    ENTER_MUTEX();

    if (!thread_safe) {
        errno = EINVAL;
        EXIT_MUTEX();
        return -1;
    }

    ((mailbox_safe_t*)mailbox)->remove_signaler(s_);

    EXIT_MUTEX();
    return 0;
}
Exemple #6
0
void zmq::socket_base_t::in_event ()
{
    //  This function is invoked only once the socket is running in the context
    //  of the reaper thread. Process any commands from other threads/sockets
    //  that may be available at the moment. Ultimately, the socket will
    //  be destroyed.
    ENTER_MUTEX ();

    //  If the socket is thread safe we need to unsignal the reaper signaler
    if (thread_safe)
        reaper_signaler->recv();

    process_commands (0, false);
    EXIT_MUTEX ();
    check_destroy ();
}
Exemple #7
0
NTSTATUS
NpfsClientCloseHandle(
    PNPFS_CCB pCCB
    )
{
    NTSTATUS ntStatus = 0;
    PNPFS_PIPE pPipe = NULL;
    PNPFS_CCB pSCB = NULL;
    PLW_LIST_LINKS pLink = NULL;
    PNPFS_IRP_CONTEXT pReadContext = NULL;

    pPipe = pCCB->pPipe;

    ENTER_MUTEX(&pPipe->PipeMutex);

    pSCB = pPipe->pSCB;

    pPipe->PipeClientState = PIPE_CLIENT_CLOSED;

    while (pSCB && !LwListIsEmpty(&pSCB->ReadIrpList))
    {
        pLink = pSCB->ReadIrpList.Next;
        LwListRemove(pLink);

        pReadContext = LW_STRUCT_FROM_FIELD(pLink, NPFS_IRP_CONTEXT, Link);

        NpfsServerCompleteReadFile(pSCB, pReadContext);
    }

    pthread_cond_signal(&pPipe->PipeCondition);

    if (pPipe->PipeServerState == PIPE_SERVER_CLOSED)
    {
        ntStatus = NpfsFreePipeContext(pPipe);
        BAIL_ON_NT_STATUS(ntStatus);
    }

error:

    pPipe->pCCB = NULL;

    LEAVE_MUTEX(&pPipe->PipeMutex);

    NpfsReleaseCCB(pCCB);

    return(ntStatus);
}
Exemple #8
0
int zmq::socket_base_t::send (msg_t *msg_, int flags_)
{
    ENTER_MUTEX();

    //  Check whether the library haven't been shut down yet.
    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Check whether message passed to the function is valid.
    if (unlikely (!msg_ || !msg_->check ())) {
        errno = EFAULT;
        EXIT_MUTEX();
        return -1;
    }

    //  Process pending commands, if any.
    int rc = process_commands (0, true);
    if (unlikely (rc != 0)) {
        EXIT_MUTEX();
        return -1;
    }

    //  Clear any user-visible flags that are set on the message.
    msg_->reset_flags (msg_t::more);

    //  At this point we impose the flags on the message.
    if (flags_ & ZMQ_SNDMORE)
        msg_->set_flags (msg_t::more);

    msg_->reset_metadata ();

    //  Try to send the message.
    rc = xsend (msg_);
    if (rc == 0) {
        EXIT_MUTEX();
        return 0;
    }
    if (unlikely (errno != EAGAIN)) {
        EXIT_MUTEX();
        return -1;
    }

    //  In case of non-blocking send we'll simply propagate
    //  the error - including EAGAIN - up the stack.
    if (flags_ & ZMQ_DONTWAIT || options.sndtimeo == 0) {
        EXIT_MUTEX();
        return -1;
    }

    //  Compute the time when the timeout should occur.
    //  If the timeout is infinite, don't care.
    int timeout = options.sndtimeo;
    uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);

    //  Oops, we couldn't send the message. Wait for the next
    //  command, process it and try to send the message again.
    //  If timeout is reached in the meantime, return EAGAIN.
    while (true) {
        if (unlikely (process_commands (timeout, false) != 0)) {
            EXIT_MUTEX();
            return -1;
        }
        rc = xsend (msg_);
        if (rc == 0)
            break;
        if (unlikely (errno != EAGAIN)) {
            EXIT_MUTEX();
            return -1;
        }
        if (timeout > 0) {
            timeout = (int) (end - clock.now_ms ());
            if (timeout <= 0) {
                errno = EAGAIN;
                EXIT_MUTEX();
                return -1;
            }
        }
    }

    EXIT_MUTEX();
    return 0;
}
Exemple #9
0
int zmq::socket_base_t::term_endpoint (const char *addr_)
{
    ENTER_MUTEX();

    //  Check whether the library haven't been shut down yet.
    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Check whether endpoint address passed to the function is valid.
    if (unlikely (!addr_)) {
        errno = EINVAL;
        EXIT_MUTEX();
        return -1;
    }

    //  Process pending commands, if any, since there could be pending unprocessed process_own()'s
    //  (from launch_child() for example) we're asked to terminate now.
    int rc = process_commands (0, false);
    if (unlikely(rc != 0)) {
        EXIT_MUTEX();
        return -1;
    }

    //  Parse addr_ string.
    std::string protocol;
    std::string address;
    if (parse_uri(addr_, protocol, address) || check_protocol(protocol)) {
        EXIT_MUTEX();
        return -1;
    }

    // Disconnect an inproc socket
    if (protocol == "inproc") {
        if (unregister_endpoint (std::string(addr_), this) == 0) {
            EXIT_MUTEX();
            return 0;
        }
        std::pair <inprocs_t::iterator, inprocs_t::iterator> range = inprocs.equal_range (std::string (addr_));
        if (range.first == range.second) {
            errno = ENOENT;
            EXIT_MUTEX();
            return -1;
        }

        for (inprocs_t::iterator it = range.first; it != range.second; ++it)
            it->second->terminate (true);
        inprocs.erase (range.first, range.second);
        EXIT_MUTEX();
        return 0;
    }

    //  Find the endpoints range (if any) corresponding to the addr_ string.
    std::pair <endpoints_t::iterator, endpoints_t::iterator> range = endpoints.equal_range (std::string (addr_));
    if (range.first == range.second) {
        errno = ENOENT;
        EXIT_MUTEX();
        return -1;
    }

    for (endpoints_t::iterator it = range.first; it != range.second; ++it) {
        //  If we have an associated pipe, terminate it.
        if (it->second.second != NULL)
            it->second.second->terminate (false);
        term_child (it->second.first);
    }
    endpoints.erase (range.first, range.second);
    EXIT_MUTEX();
    return 0;
}
Exemple #10
0
int zmq::socket_base_t::connect (const char *addr_)
{
    ENTER_MUTEX();

    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Process pending commands, if any.
    int rc = process_commands (0, false);
    if (unlikely (rc != 0)) {
        EXIT_MUTEX();
        return -1;
    }

    //  Parse addr_ string.
    std::string protocol;
    std::string address;
    if (parse_uri (addr_, protocol, address) || check_protocol (protocol)) {
        EXIT_MUTEX();
        return -1;
    }

    if (protocol == "inproc") {

        //  TODO: inproc connect is specific with respect to creating pipes
        //  as there's no 'reconnect' functionality implemented. Once that
        //  is in place we should follow generic pipe creation algorithm.

        //  Find the peer endpoint.
        endpoint_t peer = find_endpoint (addr_);

        // The total HWM for an inproc connection should be the sum of
        // the binder's HWM and the connector's HWM.
        int sndhwm = 0;
        if (peer.socket == NULL)
            sndhwm = options.sndhwm;
        else if (options.sndhwm != 0 && peer.options.rcvhwm != 0)
            sndhwm = options.sndhwm + peer.options.rcvhwm;
        int rcvhwm = 0;
        if (peer.socket == NULL)
            rcvhwm = options.rcvhwm;
        else
        if (options.rcvhwm != 0 && peer.options.sndhwm != 0)
            rcvhwm = options.rcvhwm + peer.options.sndhwm;

        //  Create a bi-directional pipe to connect the peers.
        object_t *parents [2] = {this, peer.socket == NULL ? this : peer.socket};
        pipe_t *new_pipes [2] = {NULL, NULL};

        bool conflate = options.conflate &&
            (options.type == ZMQ_DEALER ||
             options.type == ZMQ_PULL ||
             options.type == ZMQ_PUSH ||
             options.type == ZMQ_PUB ||
             options.type == ZMQ_SUB);

        int hwms [2] = {conflate? -1 : sndhwm, conflate? -1 : rcvhwm};
        bool conflates [2] = {conflate, conflate};
        int rc = pipepair (parents, new_pipes, hwms, conflates);
        if (!conflate) {
            new_pipes[0]->set_hwms_boost(peer.options.sndhwm, peer.options.rcvhwm);
            new_pipes[1]->set_hwms_boost(options.sndhwm, options.rcvhwm);
        }

        errno_assert (rc == 0);

        if (!peer.socket) {
            //  The peer doesn't exist yet so we don't know whether
            //  to send the identity message or not. To resolve this,
            //  we always send our identity and drop it later if
            //  the peer doesn't expect it.
            msg_t id;
            rc = id.init_size (options.identity_size);
            errno_assert (rc == 0);
            memcpy (id.data (), options.identity, options.identity_size);
            id.set_flags (msg_t::identity);
            bool written = new_pipes [0]->write (&id);
            zmq_assert (written);
            new_pipes [0]->flush ();

            const endpoint_t endpoint = {this, options};
            pend_connection (std::string (addr_), endpoint, new_pipes);
        }
        else {
            //  If required, send the identity of the local socket to the peer.
            if (peer.options.recv_identity) {
                msg_t id;
                rc = id.init_size (options.identity_size);
                errno_assert (rc == 0);
                memcpy (id.data (), options.identity, options.identity_size);
                id.set_flags (msg_t::identity);
                bool written = new_pipes [0]->write (&id);
                zmq_assert (written);
                new_pipes [0]->flush ();
            }

            //  If required, send the identity of the peer to the local socket.
            if (options.recv_identity) {
                msg_t id;
                rc = id.init_size (peer.options.identity_size);
                errno_assert (rc == 0);
                memcpy (id.data (), peer.options.identity, peer.options.identity_size);
                id.set_flags (msg_t::identity);
                bool written = new_pipes [1]->write (&id);
                zmq_assert (written);
                new_pipes [1]->flush ();
            }

            //  Attach remote end of the pipe to the peer socket. Note that peer's
            //  seqnum was incremented in find_endpoint function. We don't need it
            //  increased here.
            send_bind (peer.socket, new_pipes [1], false);
        }

        //  Attach local end of the pipe to this socket object.
        attach_pipe (new_pipes [0]);

        // Save last endpoint URI
        last_endpoint.assign (addr_);

        // remember inproc connections for disconnect
        inprocs.insert (inprocs_t::value_type (std::string (addr_), new_pipes [0]));

        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }
    bool is_single_connect = (options.type == ZMQ_DEALER ||
                              options.type == ZMQ_SUB ||
                              options.type == ZMQ_REQ);
    if (unlikely (is_single_connect)) {
        const endpoints_t::iterator it = endpoints.find (addr_);
        if (it != endpoints.end ()) {
            // There is no valid use for multiple connects for SUB-PUB nor
            // DEALER-ROUTER nor REQ-REP. Multiple connects produces
            // nonsensical results.
            EXIT_MUTEX();
            return 0;
        }
    }

    //  Choose the I/O thread to run the session in.
    io_thread_t *io_thread = choose_io_thread (options.affinity);
    if (!io_thread) {
        errno = EMTHREAD;
        EXIT_MUTEX();
        return -1;
    }

    address_t *paddr = new (std::nothrow) address_t (protocol, address);
    alloc_assert (paddr);

    //  Resolve address (if needed by the protocol)
    if (protocol == "tcp") {
        //  Do some basic sanity checks on tcp:// address syntax
        //  - hostname starts with digit or letter, with embedded '-' or '.'
        //  - IPv6 address may contain hex chars and colons.
        //  - IPv4 address may contain decimal digits and dots.
        //  - Address must end in ":port" where port is *, or numeric
        //  - Address may contain two parts separated by ':'
        //  Following code is quick and dirty check to catch obvious errors,
        //  without trying to be fully accurate.
        const char *check = address.c_str ();
        if (isalnum (*check) || isxdigit (*check) || *check == '[') {
            check++;
            while (isalnum  (*check)
                || isxdigit (*check)
                || *check == '.' || *check == '-' || *check == ':'|| *check == ';'
                || *check == ']')
                check++;
        }
        //  Assume the worst, now look for success
        rc = -1;
        //  Did we reach the end of the address safely?
        if (*check == 0) {
            //  Do we have a valid port string? (cannot be '*' in connect
            check = strrchr (address.c_str (), ':');
            if (check) {
                check++;
                if (*check && (isdigit (*check)))
                    rc = 0;     //  Valid
            }
        }
        if (rc == -1) {
            errno = EINVAL;
            LIBZMQ_DELETE(paddr);
            EXIT_MUTEX();
            return -1;
        }
        //  Defer resolution until a socket is opened
        paddr->resolved.tcp_addr = NULL;
    }
#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
    else
    if (protocol == "ipc") {
        paddr->resolved.ipc_addr = new (std::nothrow) ipc_address_t ();
        alloc_assert (paddr->resolved.ipc_addr);
        int rc = paddr->resolved.ipc_addr->resolve (address.c_str ());
        if (rc != 0) {
            LIBZMQ_DELETE(paddr);
            EXIT_MUTEX();
            return -1;
        }
    }
#endif

// TBD - Should we check address for ZMQ_HAVE_NORM???

#ifdef ZMQ_HAVE_OPENPGM
    if (protocol == "pgm" || protocol == "epgm") {
        struct pgm_addrinfo_t *res = NULL;
        uint16_t port_number = 0;
        int rc = pgm_socket_t::init_address(address.c_str(), &res, &port_number);
        if (res != NULL)
            pgm_freeaddrinfo (res);
        if (rc != 0 || port_number == 0) {
          EXIT_MUTEX();
          return -1;
        }
    }
#endif
#if defined ZMQ_HAVE_TIPC
    else
    if (protocol == "tipc") {
        paddr->resolved.tipc_addr = new (std::nothrow) tipc_address_t ();
        alloc_assert (paddr->resolved.tipc_addr);
        int rc = paddr->resolved.tipc_addr->resolve (address.c_str());
        if (rc != 0) {
            LIBZMQ_DELETE(paddr);
            EXIT_MUTEX();
            return -1;
        }
    }
#endif

    //  Create session.
    session_base_t *session = session_base_t::create (io_thread, true, this,
        options, paddr);
    errno_assert (session);

    //  PGM does not support subscription forwarding; ask for all data to be
    //  sent to this pipe. (same for NORM, currently?)
    bool subscribe_to_all = protocol == "pgm" || protocol == "epgm" || protocol == "norm";
    pipe_t *newpipe = NULL;

    if (options.immediate != 1 || subscribe_to_all) {
        //  Create a bi-directional pipe.
        object_t *parents [2] = {this, session};
        pipe_t *new_pipes [2] = {NULL, NULL};

        bool conflate = options.conflate &&
            (options.type == ZMQ_DEALER ||
             options.type == ZMQ_PULL ||
             options.type == ZMQ_PUSH ||
             options.type == ZMQ_PUB ||
             options.type == ZMQ_SUB);

        int hwms [2] = {conflate? -1 : options.sndhwm,
            conflate? -1 : options.rcvhwm};
        bool conflates [2] = {conflate, conflate};
        rc = pipepair (parents, new_pipes, hwms, conflates);
        errno_assert (rc == 0);

        //  Attach local end of the pipe to the socket object.
        attach_pipe (new_pipes [0], subscribe_to_all);
        newpipe = new_pipes [0];

        //  Attach remote end of the pipe to the session object later on.
        session->attach_pipe (new_pipes [1]);
    }

    //  Save last endpoint URI
    paddr->to_string (last_endpoint);

    add_endpoint (addr_, (own_t *) session, newpipe);
    EXIT_MUTEX();
    return 0;
}
Exemple #11
0
int zmq::socket_base_t::bind (const char *addr_)
{
    ENTER_MUTEX();

    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Process pending commands, if any.
    int rc = process_commands (0, false);
    if (unlikely (rc != 0)) {
        EXIT_MUTEX();
        return -1;
    }

    //  Parse addr_ string.
    std::string protocol;
    std::string address;
    if (parse_uri (addr_, protocol, address) || check_protocol (protocol)) {
        EXIT_MUTEX();
        return -1;
    }

    if (protocol == "inproc") {
        const endpoint_t endpoint = { this, options };
        const int rc = register_endpoint (addr_, endpoint);
        if (rc == 0) {
            connect_pending (addr_, this);
            last_endpoint.assign (addr_);
            options.connected = true;
        }
        EXIT_MUTEX();
        return rc;
    }

    if (protocol == "pgm" || protocol == "epgm" || protocol == "norm") {
        //  For convenience's sake, bind can be used interchangeable with
        //  connect for PGM, EPGM and NORM transports.
        EXIT_MUTEX();
        rc = connect (addr_);
        if (rc != -1)
            options.connected = true;
        return rc;
    }

    //  Remaining transports require to be run in an I/O thread, so at this
    //  point we'll choose one.
    io_thread_t *io_thread = choose_io_thread (options.affinity);
    if (!io_thread) {
        errno = EMTHREAD;
        EXIT_MUTEX();
        return -1;
    }

    if (protocol == "tcp") {
        tcp_listener_t *listener = new (std::nothrow) tcp_listener_t (
            io_thread, this, options);
        alloc_assert (listener);
        int rc = listener->set_address (address.c_str ());
        if (rc != 0) {
            LIBZMQ_DELETE(listener);
            event_bind_failed (address, zmq_errno());
            EXIT_MUTEX();
            return -1;
        }

        // Save last endpoint URI
        listener->get_address (last_endpoint);

        add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }

#if !defined ZMQ_HAVE_WINDOWS && !defined ZMQ_HAVE_OPENVMS
    if (protocol == "ipc") {
        ipc_listener_t *listener = new (std::nothrow) ipc_listener_t (
            io_thread, this, options);
        alloc_assert (listener);
        int rc = listener->set_address (address.c_str ());
        if (rc != 0) {
            LIBZMQ_DELETE(listener);
            event_bind_failed (address, zmq_errno());
            EXIT_MUTEX();
            return -1;
        }

        // Save last endpoint URI
        listener->get_address (last_endpoint);

        add_endpoint (last_endpoint.c_str (), (own_t *) listener, NULL);
        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }
#endif
#if defined ZMQ_HAVE_TIPC
    if (protocol == "tipc") {
         tipc_listener_t *listener = new (std::nothrow) tipc_listener_t (
              io_thread, this, options);
         alloc_assert (listener);
         int rc = listener->set_address (address.c_str ());
         if (rc != 0) {
             LIBZMQ_DELETE(listener);
             event_bind_failed (address, zmq_errno());
             EXIT_MUTEX();
             return -1;
         }

        // Save last endpoint URI
        listener->get_address (last_endpoint);

        add_endpoint (addr_, (own_t *) listener, NULL);
        options.connected = true;
        EXIT_MUTEX();
        return 0;
    }
#endif

    EXIT_MUTEX();
    zmq_assert (false);
    return -1;
}
Exemple #12
0
int zmq::socket_base_t::getsockopt (int option_, void *optval_,
    size_t *optvallen_)
{
    ENTER_MUTEX();

    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    if (option_ == ZMQ_RCVMORE) {
        if (*optvallen_ < sizeof (int)) {
            errno = EINVAL;
            EXIT_MUTEX();
            return -1;
        }
        memset(optval_, 0, *optvallen_);
        *((int*) optval_) = rcvmore ? 1 : 0;
        *optvallen_ = sizeof (int);
        EXIT_MUTEX();
        return 0;
    }

    if (option_ == ZMQ_FD) {
        if (*optvallen_ < sizeof (fd_t)) {
            errno = EINVAL;
            EXIT_MUTEX();
            return -1;
        }

        if (thread_safe) {
            // thread safe socket doesn't provide file descriptor
            errno = EINVAL;
            EXIT_MUTEX();
            return -1;
        }

        *((fd_t*)optval_) = ((mailbox_t*)mailbox)->get_fd();
        *optvallen_ = sizeof(fd_t);

        EXIT_MUTEX();
        return 0;
    }

    if (option_ == ZMQ_EVENTS) {
        if (*optvallen_ < sizeof (int)) {
            errno = EINVAL;
            EXIT_MUTEX();
            return -1;
        }
        int rc = process_commands (0, false);
        if (rc != 0 && (errno == EINTR || errno == ETERM)) {
            EXIT_MUTEX();
            return -1;
        }
        errno_assert (rc == 0);
        *((int*) optval_) = 0;
        if (has_out ())
            *((int*) optval_) |= ZMQ_POLLOUT;
        if (has_in ())
            *((int*) optval_) |= ZMQ_POLLIN;
        *optvallen_ = sizeof (int);
        EXIT_MUTEX();
        return 0;
    }

    if (option_ == ZMQ_LAST_ENDPOINT) {
        if (*optvallen_ < last_endpoint.size () + 1) {
            errno = EINVAL;
            EXIT_MUTEX();
            return -1;
        }
        strcpy (static_cast <char *> (optval_), last_endpoint.c_str ());
        *optvallen_ = last_endpoint.size () + 1;
        EXIT_MUTEX();
        return 0;
    }

    if (option_ == ZMQ_THREAD_SAFE) {
        if (*optvallen_ < sizeof (int)) {
            errno = EINVAL;
            EXIT_MUTEX();
            return -1;
        }
        memset(optval_, 0, *optvallen_);
        *((int*) optval_) = thread_safe	? 1 : 0;
        *optvallen_ = sizeof (int);
        EXIT_MUTEX();
        return 0;
    }  

    int rc = options.getsockopt (option_, optval_, optvallen_);
    EXIT_MUTEX();
    return rc;
}
Exemple #13
0
void zmq::socket_base_t::flush_commands ()
{
    ENTER_MUTEX();
    process_commands (0, false);
    EXIT_MUTEX();
}
Exemple #14
0
int zmq::socket_base_t::recv (msg_t *msg_, int flags_)
{
    ENTER_MUTEX();

    //  Check whether the library haven't been shut down yet.
    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX();
        return -1;
    }

    //  Check whether message passed to the function is valid.
    if (unlikely (!msg_ || !msg_->check ())) {
        errno = EFAULT;
        EXIT_MUTEX();
        return -1;
    }

    //  Once every inbound_poll_rate messages check for signals and process
    //  incoming commands. This happens only if we are not polling altogether
    //  because there are messages available all the time. If poll occurs,
    //  ticks is set to zero and thus we avoid this code.
    //
    //  Note that 'recv' uses different command throttling algorithm (the one
    //  described above) from the one used by 'send'. This is because counting
    //  ticks is more efficient than doing RDTSC all the time.
    if (++ticks == inbound_poll_rate) {
        if (unlikely (process_commands (0, false) != 0)) {
            EXIT_MUTEX();
            return -1;
        }
        ticks = 0;
    }

    //  Get the message.
    int rc = xrecv (msg_);
    if (unlikely (rc != 0 && errno != EAGAIN)) {
        EXIT_MUTEX();
        return -1;
    }

    //  If we have the message, return immediately.
    if (rc == 0) {
        if (file_desc != retired_fd)
            msg_->set_fd(file_desc);
        extract_flags (msg_);
        EXIT_MUTEX();
        return 0;
    }

    //  If the message cannot be fetched immediately, there are two scenarios.
    //  For non-blocking recv, commands are processed in case there's an
    //  activate_reader command already waiting int a command pipe.
    //  If it's not, return EAGAIN.
    if (flags_ & ZMQ_DONTWAIT || options.rcvtimeo == 0) {
        if (unlikely (process_commands (0, false) != 0)) {
            EXIT_MUTEX();
            return -1;
        }
        ticks = 0;

        rc = xrecv (msg_);
        if (rc < 0) {
            EXIT_MUTEX();
            return rc;
        }
        if (file_desc != retired_fd)
            msg_->set_fd(file_desc);
        extract_flags (msg_);

        EXIT_MUTEX();
        return 0;
    }

    //  Compute the time when the timeout should occur.
    //  If the timeout is infinite, don't care.
    int timeout = options.rcvtimeo;
    uint64_t end = timeout < 0 ? 0 : (clock.now_ms () + timeout);

    //  In blocking scenario, commands are processed over and over again until
    //  we are able to fetch a message.
    bool block = (ticks != 0);
    while (true) {
        if (unlikely (process_commands (block ? timeout : 0, false) != 0)) {
            EXIT_MUTEX();
            return -1;
        }
        rc = xrecv (msg_);
        if (rc == 0) {
            ticks = 0;
            break;
        }
        if (unlikely (errno != EAGAIN)) {
            EXIT_MUTEX();
            return -1;
        }
        block = true;
        if (timeout > 0) {
            timeout = (int) (end - clock.now_ms ());
            if (timeout <= 0) {
                errno = EAGAIN;
                EXIT_MUTEX();
                return -1;
            }
        }
    }

    if (file_desc != retired_fd)
        msg_->set_fd(file_desc);
    extract_flags (msg_);
    EXIT_MUTEX();
    return 0;
}
Exemple #15
0
NTSTATUS
NpfsCommonCreate(
    PNPFS_IRP_CONTEXT pIrpContext,
    PIRP pIrp
    )
{
    NTSTATUS ntStatus = 0;
    PUNICODE_STRING pPipeName = &pIrpContext->pIrp->Args.Create.FileName.Name;
    PNPFS_FCB pFCB = NULL;
    PNPFS_PIPE pPipe = NULL;
    PNPFS_CCB pCCB = NULL;
    BOOLEAN bReleaseLock = FALSE;
    PNPFS_IRP_CONTEXT pConnectContext = NULL;

    ntStatus = NpfsValidateCreate(pIrpContext);
    BAIL_ON_NT_STATUS(ntStatus);

    ENTER_READER_RW_LOCK(&gServerLock);
    ntStatus = NpfsFindFCB(pPipeName, &pFCB);
    LEAVE_READER_RW_LOCK(&gServerLock);
    BAIL_ON_NT_STATUS(ntStatus);

    ntStatus = NpfsFindAvailablePipe(pFCB, &pPipe);
    BAIL_ON_NT_STATUS(ntStatus);

    ENTER_MUTEX(&pPipe->PipeMutex);
    bReleaseLock = TRUE;

    if (pPipe->PipeServerState != PIPE_SERVER_WAITING_FOR_CONNECTION)
    {
        ntStatus = STATUS_INVALID_SERVER_STATE;
        BAIL_ON_NT_STATUS(ntStatus);
    }

    ntStatus = NpfsCreateCCB(pIrpContext, pPipe, &pCCB);
    BAIL_ON_NT_STATUS(ntStatus);

    pPipe->PipeClientState = PIPE_CLIENT_CONNECTED;

    pPipe->pClientAccessToken = IoSecurityGetAccessToken(pIrp->Args.Create.SecurityContext);
    RtlReferenceAccessToken(pPipe->pClientAccessToken);

    ntStatus = NpfsCommonProcessCreateEcp(pIrpContext, pIrp, pCCB);
    BAIL_ON_NT_STATUS(ntStatus);

    /* Wake up blocking pipe waiters */
    pthread_cond_signal(&pPipe->PipeCondition);

    /* If there is a pending connect IRP, grab it
       to complete once we leave the pipe mutex */
    if (pPipe->pPendingServerConnect)
    {
        pConnectContext = pPipe->pPendingServerConnect;
        pPipe->pPendingServerConnect = NULL;
        pPipe->PipeServerState = PIPE_SERVER_CONNECTED;
    }

    LEAVE_MUTEX(&pPipe->PipeMutex);
    bReleaseLock = FALSE;

    if (pConnectContext)
    {
        pConnectContext->pIrp->IoStatusBlock.Status = STATUS_SUCCESS;
        IoIrpComplete(pConnectContext->pIrp);
        IO_FREE(&pConnectContext);
    }

    ntStatus = NpfsSetCCB(pIrpContext->pIrp->FileHandle, pCCB);
    BAIL_ON_NT_STATUS(ntStatus);

    pIrpContext->pIrp->IoStatusBlock.CreateResult = FILE_OPENED;

cleanup:

    if (bReleaseLock)
    {
        LEAVE_MUTEX(&pPipe->PipeMutex);
    }

    if (pFCB)
    {
        NpfsReleaseFCB(pFCB);
    }

    if (pPipe)
    {
        NpfsReleasePipe(pPipe);
    }

    if (pCCB)
    {
        NpfsReleaseCCB(pCCB);
    }

    pIrpContext->pIrp->IoStatusBlock.Status = ntStatus;

    return ntStatus;

error:

    pIrpContext->pIrp->IoStatusBlock.CreateResult = FILE_DOES_NOT_EXIST;

    goto cleanup;
}
Exemple #16
0
int zmq::socket_base_t::term_endpoint (const char *addr_)
{
    ENTER_MUTEX ();

    //  Check whether the library haven't been shut down yet.
    if (unlikely (ctx_terminated)) {
        errno = ETERM;
        EXIT_MUTEX ();
        return -1;
    }

    //  Check whether endpoint address passed to the function is valid.
    if (unlikely (!addr_)) {
        errno = EINVAL;
        EXIT_MUTEX ();
        return -1;
    }

    //  Process pending commands, if any, since there could be pending unprocessed process_own()'s
    //  (from launch_child() for example) we're asked to terminate now.
    int rc = process_commands (0, false);
    if (unlikely(rc != 0)) {
        EXIT_MUTEX ();
        return -1;
    }

    //  Parse addr_ string.
    std::string protocol;
    std::string address;
    if (parse_uri(addr_, protocol, address) || check_protocol(protocol)) {
        EXIT_MUTEX ();
        return -1;
    }

    // Disconnect an inproc socket
    if (protocol == "inproc") {
        if (unregister_endpoint (std::string(addr_), this) == 0) {
            EXIT_MUTEX ();
            return 0;
        }
        std::pair <inprocs_t::iterator, inprocs_t::iterator> range = inprocs.equal_range (std::string (addr_));
        if (range.first == range.second) {
            errno = ENOENT;
            EXIT_MUTEX ();
            return -1;
        }

        for (inprocs_t::iterator it = range.first; it != range.second; ++it)
            it->second->terminate (true);
        inprocs.erase (range.first, range.second);
        EXIT_MUTEX ();
        return 0;
    }

    std::string resolved_addr = std::string (addr_);
    std::pair <endpoints_t::iterator, endpoints_t::iterator> range;

    // The resolved last_endpoint is used as a key in the endpoints map.
    // The address passed by the user might not match in the TCP case due to
    // IPv4-in-IPv6 mapping (EG: tcp://[::ffff:127.0.0.1]:9999), so try to
    // resolve before giving up. Given at this stage we don't know whether a
    // socket is connected or bound, try with both.
    if (protocol == "tcp") {
        range = endpoints.equal_range (resolved_addr);
        if (range.first == range.second) {
            tcp_address_t *tcp_addr = new (std::nothrow) tcp_address_t ();
            alloc_assert (tcp_addr);
            rc = tcp_addr->resolve (address.c_str (), false, options.ipv6);

            if (rc == 0) {
                tcp_addr->to_string (resolved_addr);
                range = endpoints.equal_range (resolved_addr);

                if (range.first == range.second) {
                    rc = tcp_addr->resolve (address.c_str (), true, options.ipv6);
                    if (rc == 0) {
                        tcp_addr->to_string (resolved_addr);
                    }
                }
            }
            LIBZMQ_DELETE(tcp_addr);
        }
    }

    //  Find the endpoints range (if any) corresponding to the addr_ string.
    range = endpoints.equal_range (resolved_addr);
    if (range.first == range.second) {
        errno = ENOENT;
        EXIT_MUTEX ();
        return -1;
    }

    for (endpoints_t::iterator it = range.first; it != range.second; ++it) {
        //  If we have an associated pipe, terminate it.
        if (it->second.second != NULL)
            it->second.second->terminate (false);
        term_child (it->second.first);
    }
    endpoints.erase (range.first, range.second);
    EXIT_MUTEX ();
    return 0;
}