Пример #1
0
static void nn_global_term (void)
{
#if defined NN_HAVE_WINDOWS
    int rc;
#endif
    struct nn_list_item *it;
    struct nn_transport *tp;

    /*  If there are no sockets remaining, uninitialise the global context. */
    nn_assert (self.socks);
    if (self.nsocks > 0)
        return;

    /*  Stop the FSM  */
    nn_ctx_enter (&self.ctx);
    nn_fsm_stop (&self.fsm);
    nn_ctx_leave (&self.ctx);

    /*  Shut down the worker threads. */
    nn_pool_term (&self.pool);

    /* Terminate ctx mutex */
    nn_ctx_term (&self.ctx);

    /*  Ask all the transport to deallocate their global resources. */
    while (!nn_list_empty (&self.transports)) {
        it = nn_list_begin (&self.transports);
        tp = nn_cont (it, struct nn_transport, item);
        if (tp->term)
            tp->term ();
        nn_list_erase (&self.transports, it);
    }

    /*  For now there's nothing to deallocate about socket types, however,
        let's remove them from the list anyway. */
    while (!nn_list_empty (&self.socktypes))
        nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes));

    /*  Final deallocation of the nn_global object itself. */
    nn_list_term (&self.socktypes);
    nn_list_term (&self.transports);
    nn_free (self.socks);

    /*  This marks the global state as uninitialised. */
    self.socks = NULL;

    /*  Shut down the memory allocation subsystem. */
    nn_alloc_term ();

    /*  On Windows, uninitialise the socket library. */
#if defined NN_HAVE_WINDOWS
    rc = WSACleanup ();
    nn_assert (rc == 0);
#endif
}
Пример #2
0
static int nn_inproc_bind (const char *addr, void *hint,
    struct nn_epbase **epbase)
{
    struct nn_list_item *it;
    struct nn_binproc *binproc;
    struct nn_cinproc *cinproc;

    nn_mutex_lock (&self.sync);

    /*  Check whether the endpoint isn't already bound. */
    /*  TODO:  This is an O(n) algorithm! */
    for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        binproc = nn_cont (it, struct nn_binproc, item);
        if (strncmp (addr, nn_binproc_getaddr (binproc),
              NN_SOCKADDR_MAX) == 0) {
            nn_mutex_unlock (&self.sync);
            return -EADDRINUSE;
        }
    }

    /*  Insert the entry into the endpoint repository. */
    binproc = nn_binproc_create (hint);
    nn_list_insert (&self.bound, &binproc->item, nn_list_end (&self.bound));

    /*  During this process new pipes may be created. */
    for (it = nn_list_begin (&self.connected);
          it != nn_list_end (&self.connected);
          it = nn_list_next (&self.connected, it)) {
        cinproc = nn_cont (it, struct nn_cinproc, item);
        if (strncmp (addr, nn_cinproc_getaddr (cinproc),
              NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_epbase_ispeer (&binproc->epbase, cinproc->protocol))
                continue;

            nn_assert (cinproc->connects == 0);
            cinproc->connects = 1;
            nn_binproc_connect (binproc, cinproc);
        }
    }

    nn_assert (epbase);
    *epbase = &binproc->epbase;
    nn_mutex_unlock (&self.sync);

    return 0;
}
Пример #3
0
static int nn_inproc_ctx_connect (const char *addr, void *hint,
    struct nn_epbase **epbase)
{
    int rc;
    struct nn_list_item *it;
    struct nn_inprocc *inprocc;
    struct nn_inprocb *inprocb;
    struct nn_msgpipe *pipe;

    /*  Insert the entry into the endpoint repository. */
    inprocc = nn_alloc (sizeof (struct nn_inprocc), "inprocc");
    alloc_assert (inprocc);
    rc = nn_inprocc_init (inprocc, addr, hint);
    if (nn_slow (rc != 0))
        return rc;
    nn_list_insert (&self.connected, &inprocc->list,
        nn_list_end (&self.connected));

    /*  During this process a pipe may be created. */
    for (it = nn_list_begin (&self.bound);
          it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        inprocb = nn_cont (it, struct nn_inprocb, list);
        if (strcmp (addr, nn_inprocb_getaddr (inprocb)) == 0) {
            pipe = nn_alloc (sizeof (struct nn_msgpipe), "msgpipe");
            alloc_assert (pipe);
            nn_msgpipe_init (pipe, inprocb, inprocc);
            break;
        }
    }

    nn_assert (epbase);
    *epbase = &inprocc->epbase;
    return 0;
}
Пример #4
0
static void nn_cinproc_shutdown (struct nn_fsm *self, int src, int type,
    NN_UNUSED void *srcptr)
{
    struct nn_cinproc *cinproc;
    struct nn_sinproc *sinproc;
    struct nn_list_item *it;

    cinproc = nn_cont (self, struct nn_cinproc, fsm);

    if (src == NN_FSM_ACTION && type == NN_FSM_STOP) {

        /*  First, unregister the endpoint from the global repository of inproc
            endpoints. This way, new connections cannot be created anymore. */
        nn_ins_disconnect (&cinproc->item);

        /*  Stop the existing connection. */
        for (it = nn_list_begin (&cinproc->sinprocs);
              it != nn_list_end (&cinproc->sinprocs);
              it = nn_list_next (&cinproc->sinprocs, it)) {
            sinproc = nn_cont (it, struct nn_sinproc, item);
            nn_sinproc_stop (sinproc);
        }
        cinproc->state = NN_CINPROC_STATE_STOPPING;
        goto finish;
    }
Пример #5
0
void nn_priolist_advance (struct nn_priolist *self, int release)
{
    struct nn_priolist_slot *slot;
    struct nn_list_item *it;

    nn_assert (self->current > 0);
    slot = &self->slots [self->current - 1];

    /*  Move slot's current pointer to the next pipe. */
    if (release)
        it = nn_list_erase (&slot->pipes, &slot->current->item);
    else
        it = nn_list_next (&slot->pipes, &slot->current->item);
    if (!it)
        it = nn_list_begin (&slot->pipes);
    slot->current = nn_cont (it, struct nn_priolist_data, item);

    /* If there are no more pipes in this slot, find a non-empty slot with
       lower priority. */
    while (nn_list_empty (&slot->pipes)) {
        ++self->current;
        if (self->current > NN_PRIOLIST_SLOTS) {
            self->current = -1;
            return;
        }
        slot = &self->slots [self->current - 1];
    }
}
Пример #6
0
static void nn_binproc_handler (struct nn_fsm *self, int src, int type,
    void *srcptr)
{
    struct nn_binproc *binproc;
    struct nn_list_item *it;
    struct nn_sinproc *sinproc;
    struct nn_sinproc *peer;

    binproc = nn_cont (self, struct nn_binproc, fsm);

/******************************************************************************/
/*  STOP procedure.                                                           */
/******************************************************************************/
    if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) {

        /*  First, unregister the endpoint from the global repository of inproc
            endpoints. This way, new connections cannot be created anymore. */
        nn_inproc_unbind (binproc);

        /*  Stop the existing connections. */
        for (it = nn_list_begin (&binproc->sinprocs);
              it != nn_list_end (&binproc->sinprocs);
              it = nn_list_next (&binproc->sinprocs, it)) {
            sinproc = nn_cont (it, struct nn_sinproc, item);
            nn_sinproc_stop (sinproc);
        }

        binproc->state = NN_BINPROC_STATE_STOPPING;
        goto finish;
    }
Пример #7
0
Файл: ins.c Проект: 4ker/nanomsg
void nn_ins_connect (struct nn_ins_item *item, nn_ins_fn fn)
{
    struct nn_list_item *it;
    struct nn_ins_item *bitem;

    nn_mutex_lock (&self.sync);

    /*  Insert the entry into the endpoint repository. */
    nn_list_insert (&self.connected, &item->item,
        nn_list_end (&self.connected));

    /*  During this process a pipe may be created. */
    for (it = nn_list_begin (&self.bound);
          it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        bitem = nn_cont (it, struct nn_ins_item, item);
        if (strncmp (nn_epbase_getaddr (&item->epbase),
              nn_epbase_getaddr (&bitem->epbase), NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_epbase_ispeer (&item->epbase, bitem->protocol))
                break;

            /*  Call back to cinproc to create actual connection. */
            fn (item, bitem);

            break;
        }
    }

    nn_mutex_unlock (&self.sync);
}
Пример #8
0
void nn_priolist_rm (struct nn_priolist *self, struct nn_pipe *pipe,
    struct nn_priolist_data *data)
{
    struct nn_priolist_slot *slot;
    struct nn_list_item *it;

    /*  Non-active pipes don't need any special processing. */
    if (!nn_list_item_isinlist (&data->item)) {
        nn_list_item_term (&data->item);
        return;
    }

    /*  If the pipe being removed is not current, we can simply erase it
        from the list. */
    slot = &self->slots [data->priority - 1];
    if (slot->current != data) {
        nn_list_erase (&slot->pipes, &data->item);
        nn_list_item_term (&data->item);
        return;
    }

    /*  Advance the current pointer (with wrap-over). */
    it = nn_list_erase (&slot->pipes, &data->item);
    slot->current = nn_cont (it, struct nn_priolist_data, item);
    nn_list_item_term (&data->item);
    if (!slot->current) {
        it = nn_list_begin (&slot->pipes);
        slot->current = nn_cont (it, struct nn_priolist_data, item);
    }
Пример #9
0
int nn_ins_bind (struct nn_ins_item *item, nn_ins_fn fn)
{
    struct nn_list_item *it;
    struct nn_ins_item *bitem;
    struct nn_ins_item *citem;

    nn_mutex_lock (&self.sync);

    /*  Check whether the endpoint isn't already bound. */
    /*  TODO:  This is an O(n) algorithm! */
    for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        bitem = nn_cont (it, struct nn_ins_item, item);

        if (strncmp (nn_ep_getaddr(bitem->ep), nn_ep_getaddr(item->ep),
            NN_SOCKADDR_MAX) == 0) {

            nn_mutex_unlock (&self.sync);
            return -EADDRINUSE;
        }
    }

    /*  Insert the entry into the endpoint repository. */
    nn_list_insert (&self.bound, &item->item,
        nn_list_end (&self.bound));

    /*  During this process new pipes may be created. */
    for (it = nn_list_begin (&self.connected);
          it != nn_list_end (&self.connected);
          it = nn_list_next (&self.connected, it)) {
        citem = nn_cont (it, struct nn_ins_item, item);
        if (strncmp (nn_ep_getaddr(item->ep), nn_ep_getaddr(citem->ep),
            NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_ep_ispeer_ep (item->ep, citem->ep))
                continue;

            fn (item, citem);
        }
    }

    nn_mutex_unlock (&self.sync);

    return 0;
}
Пример #10
0
static void nn_ctx_term (void)
{
#if defined NN_HAVE_WINDOWS
    int rc;
#endif
    struct nn_list_item *it;

    /*  If there are no sockets remaining, uninitialise the global context. */
    nn_assert (self.socks);
    if (self.nsocks > 0)
        return;

#if defined NN_LATENCY_MONITOR
    nn_latmon_term ();
#endif

    /*  Ask all the transport to deallocate their global resources. */
    while (!nn_list_empty (&self.transports)) {
        it = nn_list_begin (&self.transports);
        nn_cont (it, struct nn_transport, list)->term ();
        nn_list_erase (&self.transports, it);
    }

    /*  For now there's nothing to deallocate about socket types, however,
        let's remove them from the list anyway. */
    while (!nn_list_empty (&self.socktypes))
        nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes));

    /*  Final deallocation of the nn_ctx object itself. */
    nn_list_term (&self.socktypes);
    nn_list_term (&self.transports);
    nn_free (self.socks);

    /*  This marks the global state as uninitialised. */
    self.socks = NULL;

    /*  Shut down the memory allocation subsystem. */
    nn_alloc_term ();

    /*  On Windows, uninitialise the socket library. */
#if defined NN_HAVE_WINDOWS
    rc = WSACleanup ();
    nn_assert (rc == 0);
#endif
}
Пример #11
0
static void nn_bipc_shutdown (struct nn_fsm *self, int src, int type,
    void *srcptr)
{
    struct nn_bipc *bipc;
    struct nn_list_item *it;
    struct nn_aipc *aipc;

    bipc = nn_cont (self, struct nn_bipc, fsm);

    if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) {
        nn_aipc_stop (bipc->aipc);
        bipc->state = NN_BIPC_STATE_STOPPING_AIPC;
    }
    if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPC)) {
        if (!nn_aipc_isidle (bipc->aipc))
            return;
        nn_aipc_term (bipc->aipc);
        nn_free (bipc->aipc);
        bipc->aipc = NULL;
        nn_usock_stop (&bipc->usock);
        bipc->state = NN_BIPC_STATE_STOPPING_USOCK;
    }
    if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_USOCK)) {
       if (!nn_usock_isidle (&bipc->usock))
            return;
        for (it = nn_list_begin (&bipc->aipcs);
              it != nn_list_end (&bipc->aipcs);
              it = nn_list_next (&bipc->aipcs, it)) {
            aipc = nn_cont (it, struct nn_aipc, item);
            nn_aipc_stop (aipc);
        }
        bipc->state = NN_BIPC_STATE_STOPPING_AIPCS;
        goto aipcs_stopping;
    }
    if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPCS)) {
        nn_assert (src == NN_BIPC_SRC_AIPC && type == NN_AIPC_STOPPED);
        aipc = (struct nn_aipc *) srcptr;
        nn_list_erase (&bipc->aipcs, &aipc->item);
        nn_aipc_term (aipc);
        nn_free (aipc);

        /*  If there are no more aipc state machines, we can stop the whole
            bipc object. */
aipcs_stopping:
        if (nn_list_empty (&bipc->aipcs)) {
            bipc->state = NN_BIPC_STATE_IDLE;
            nn_fsm_stopped_noevent (&bipc->fsm);
            nn_epbase_stopped (&bipc->epbase);
            return;
        }

        return;
    }

    nn_fsm_bad_state(bipc->state, src, type);
}
Пример #12
0
static void nn_btcp_shutdown (struct nn_fsm *self, int src, int type,
    void *srcptr)
{
    struct nn_btcp *btcp;
    struct nn_list_item *it;
    struct nn_atcp *atcp;

    btcp = nn_cont (self, struct nn_btcp, fsm);

    if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) {
        nn_atcp_stop (btcp->atcp);
        btcp->state = NN_BTCP_STATE_STOPPING_ATCP;
    }
    if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_ATCP)) {
        if (!nn_atcp_isidle (btcp->atcp))
            return;
        nn_atcp_term (btcp->atcp);
        nn_free (btcp->atcp);
        btcp->atcp = NULL;
        nn_usock_stop (&btcp->usock);
        btcp->state = NN_BTCP_STATE_STOPPING_USOCK;
    }
    if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_USOCK)) {
       if (!nn_usock_isidle (&btcp->usock))
            return;
        for (it = nn_list_begin (&btcp->atcps);
              it != nn_list_end (&btcp->atcps);
              it = nn_list_next (&btcp->atcps, it)) {
            atcp = nn_cont (it, struct nn_atcp, item);
            nn_atcp_stop (atcp);
        }
        btcp->state = NN_BTCP_STATE_STOPPING_ATCPS;
        goto atcps_stopping;
    }
    if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_ATCPS)) {
        nn_assert (src == NN_BTCP_SRC_ATCP && type == NN_ATCP_STOPPED);
        atcp = (struct nn_atcp *) srcptr;
        nn_list_erase (&btcp->atcps, &atcp->item);
        nn_atcp_term (atcp);
        nn_free (atcp);

        /*  If there are no more atcp state machines, we can stop the whole
            btcp object. */
atcps_stopping:
        if (nn_list_empty (&btcp->atcps)) {
            btcp->state = NN_BTCP_STATE_IDLE;
            nn_fsm_stopped_noevent (&btcp->fsm);
            nn_epbase_stopped (&btcp->epbase);
            return;
        }

        return;
    }

    nn_fsm_bad_action(btcp->state, src, type);
}
Пример #13
0
static int nn_inproc_ctx_bind (const char *addr, void *hint,
    struct nn_epbase **epbase)
{
    int rc;
    struct nn_list_item *it;
    struct nn_inprocb *inprocb;
    struct nn_inprocc *inprocc;
    struct nn_msgpipe *pipe;

    /*  Check whether the endpoint isn't already bound. */
    /*  TODO:  This is an O(n) algorithm! */
    for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        inprocb = nn_cont (it, struct nn_inprocb, list);
        if (strncmp (addr, nn_inprocb_getaddr (inprocb), NN_SOCKADDR_MAX) == 0)
            return -EADDRINUSE;
    }

    /*  Insert the entry into the endpoint repository. */
    inprocb = nn_alloc (sizeof (struct nn_inprocb), "inprocb");
    alloc_assert (inprocb);
    rc = nn_inprocb_init (inprocb, addr, hint);
    if (nn_slow (rc != 0))
        return rc;
    nn_list_insert (&self.bound, &inprocb->list, nn_list_end (&self.bound));

    /*  During this process new pipes may be created. */
    for (it = nn_list_begin (&self.connected);
          it != nn_list_end (&self.connected);
          it = nn_list_next (&self.connected, it)) {
        inprocc = nn_cont (it, struct nn_inprocc, list);
        if (strncmp (addr, nn_inprocc_getaddr (inprocc),
              NN_SOCKADDR_MAX) == 0) {
            pipe = nn_alloc (sizeof (struct nn_msgpipe), "msgpipe");
            alloc_assert (pipe);
            nn_msgpipe_init (pipe, inprocb, inprocc);
        }
    }

    nn_assert (epbase);
    *epbase = &inprocb->epbase;
    return 0;
}
Пример #14
0
/*  Deallocate an entire message array. */
static void nn_msg_array_term (struct nn_list *msg_array)
{
    struct nn_list_item *it;
    struct msg_chunk *ch;

    while (!nn_list_empty (msg_array)) {
        it = nn_list_begin (msg_array);
        ch = nn_cont (it, struct msg_chunk, item);
        nn_msg_chunk_term (ch, msg_array);
    }

    nn_list_term (msg_array);
}
Пример #15
0
static int nn_ctx_create_ep (int fd, const char *addr, int bind)
{
    int rc;
    const char *proto;
    const char *delim;
    size_t protosz;
    struct nn_transport *tp;
    struct nn_list_item *it;

    /*  Check whether address is valid. */
    if (!addr)
        return -EINVAL;
    if (strlen (addr) >= NN_SOCKADDR_MAX)
        return -ENAMETOOLONG;

    /*  Separate the protocol and the actual address. */
    proto = addr;
    delim = strchr (addr, ':');
    if (!delim)
        return -EINVAL;
    if (delim [1] != '/' || delim [2] != '/')
        return -EINVAL;
    protosz = delim - addr;
    addr += protosz + 3;

    /*  Find the specified protocol. */
    tp = NULL;
    nn_glock_lock ();
    for (it = nn_list_begin (&self.transports);
          it != nn_list_end (&self.transports);
          it = nn_list_next (&self.transports, it)) {
        tp = nn_cont (it, struct nn_transport, list);
        if (strlen (tp->name) == protosz &&
              memcmp (tp->name, proto, protosz) == 0)
            break;
        tp = NULL;
    }

    /*  The protocol specified doesn't match any known protocol. */
    if (!tp) {
        nn_glock_unlock ();
        return -EPROTONOSUPPORT;
    }

    /*  Ask socket to create the endpoint. Pass it the class factory
        function. */
    rc = nn_sock_add_ep (self.socks [fd], addr,
        bind ? tp->bind : tp->connect);
    nn_glock_unlock ();
    return rc;
}
Пример #16
0
int nn_global_create_socket (int domain, int protocol)
{
    int rc;
    int s;
    struct nn_list_item *it;
    struct nn_socktype *socktype;
    struct nn_sock *sock;
    /* The function is called with nn_glock held */

    /*  Only AF_SP and AF_SP_RAW domains are supported. */
    if (nn_slow (domain != AF_SP && domain != AF_SP_RAW)) {
        return -EAFNOSUPPORT;
    }

    /*  If socket limit was reached, report error. */
    if (nn_slow (self.nsocks >= NN_MAX_SOCKETS)) {
        return -EMFILE;
    }

    /*  Find an empty socket slot. */
    s = self.unused [NN_MAX_SOCKETS - self.nsocks - 1];

    /*  Find the appropriate socket type. */
    for (it = nn_list_begin (&self.socktypes);
          it != nn_list_end (&self.socktypes);
          it = nn_list_next (&self.socktypes, it)) {
        socktype = nn_cont (it, struct nn_socktype, item);
        if (socktype->domain == domain && socktype->protocol == protocol) {

            /*  Instantiate the socket. */
            sock = nn_alloc (sizeof (struct nn_sock), "sock");
            alloc_assert (sock);
            rc = nn_sock_init (sock, socktype, s);
            if (rc < 0)
                return rc;

            /*  Adjust the global socket table. */
            self.socks [s] = sock;
            ++self.nsocks;
            return s;
        }
    }
    /*  Specified socket type wasn't found. */
    return -EINVAL;
}
Пример #17
0
int nn_sock_destroy (struct nn_sock *self)
{
    int rc;
    struct nn_sockbase *sockbase;
    struct nn_list_item *it;
    struct nn_epbase *ep;

    sockbase = (struct nn_sockbase*) self;

    nn_cp_lock (&sockbase->cp);

    /*  The call may have been interrupted by a singal and restarted afterwards.
        In such case don't do the following stuff again. */
    if (!(sockbase->flags & NN_SOCK_FLAG_CLOSING)) {

        /*  Mark the socket as being in process of shutting down. */
        sockbase->flags |= NN_SOCK_FLAG_CLOSING;

        /*  Close sndfd and rcvfd. This should make any current select/poll
            using SNDFD and/or RCVFD exit. */
        if (!(sockbase->vfptr->flags & NN_SOCKBASE_FLAG_NORECV)) {
            nn_efd_term (&sockbase->rcvfd);
            memset (&sockbase->rcvfd, 0xcd, sizeof (sockbase->rcvfd));
        }
        if (!(sockbase->vfptr->flags & NN_SOCKBASE_FLAG_NOSEND)) {
            nn_efd_term (&sockbase->sndfd);
            memset (&sockbase->sndfd, 0xcd, sizeof (sockbase->sndfd));
        }

        /*  Create a semaphore to wait on for all endpoint to terminate. */
        nn_sem_init (&sockbase->termsem);

        /*  Ask all the associated endpoints to terminate. Call to nn_ep_close
            can actually deallocate the endpoint, so take care to get pointer
            to the next endpoint before the call. */
        it = nn_list_begin (&sockbase->eps);
        while (it != nn_list_end (&sockbase->eps)) {
            ep = nn_cont (it, struct nn_epbase, item);
            it = nn_list_next (&sockbase->eps, it);
            rc = nn_ep_close ((void*) ep);
            errnum_assert (rc == 0 || rc == -EINPROGRESS, -rc);
        }
    }
Пример #18
0
int nn_dist_send (struct nn_dist *self, struct nn_msg *msg,
    struct nn_pipe *exclude)
{
    int rc;
    struct nn_list_item *it;
    struct nn_dist_data *data;
    struct nn_msg copy;

    /*  TODO: We can optimise for the case when there's only one outbound
        pipe here. No message copying is needed in such case. */

    /*  In the specific case when there are no outbound pipes. There's nowhere
        to send the message to. Deallocate it. */
    if (nn_slow (self->count) == 0) {
        nn_msg_term (msg);
        return 0;
    }

    /*  Send the message to all the subscribers. */
    nn_msg_bulkcopy_start (msg, self->count);
    it = nn_list_begin (&self->pipes);
    while (it != nn_list_end (&self->pipes)) {
       data = nn_cont (it, struct nn_dist_data, item);
       nn_msg_bulkcopy_cp (&copy, msg);
       if (nn_fast (data->pipe == exclude)) {
           nn_msg_term (&copy);
       }
       else {
           rc = nn_pipe_send (data->pipe, &copy);
           errnum_assert (rc >= 0, -rc);
           if (rc & NN_PIPE_RELEASE) {
               --self->count;
               it = nn_list_erase (&self->pipes, it);
               continue;
           }
       }
       it = nn_list_next (&self->pipes, it);
    }
    nn_msg_term (msg);

    return 0;
}
Пример #19
0
struct nn_transport *nn_global_transport (int id)
{
    struct nn_transport *tp;
    struct nn_list_item *it;

    /*  Find the specified protocol. */
    tp = NULL;
    nn_glock_lock ();
    for (it = nn_list_begin (&self.transports);
          it != nn_list_end (&self.transports);
          it = nn_list_next (&self.transports, it)) {
        tp = nn_cont (it, struct nn_transport, item);
        if (tp->id == id)
            break;
        tp = NULL;
    }
    nn_glock_unlock ();

    return tp;
}
Пример #20
0
static int nn_inproc_connect (const char *addr, void *hint,
    struct nn_epbase **epbase)
{
    struct nn_list_item *it;
    struct nn_cinproc *cinproc;
    struct nn_binproc *binproc;

    nn_mutex_lock (&self.sync);

    /*  Insert the entry into the endpoint repository. */
    cinproc = nn_cinproc_create (hint);
    nn_list_insert (&self.connected, &cinproc->item,
        nn_list_end (&self.connected));

    /*  During this process a pipe may be created. */
    for (it = nn_list_begin (&self.bound);
          it != nn_list_end (&self.bound);
          it = nn_list_next (&self.bound, it)) {
        binproc = nn_cont (it, struct nn_binproc, item);
        if (strncmp (addr, nn_binproc_getaddr (binproc),
              NN_SOCKADDR_MAX) == 0) {

            /*  Check whether the two sockets are compatible. */
            if (!nn_epbase_ispeer (&cinproc->epbase, binproc->protocol))
                break;

            ++binproc->connects;
            nn_cinproc_connect (cinproc, binproc);
            break;
        }
    }

    nn_assert (epbase);
    *epbase = &cinproc->epbase;
    nn_mutex_unlock (&self.sync);

    return 0;
}
Пример #21
0
static void nn_sock_shutdown (struct nn_fsm *self, int src, int type,
    void *srcptr)
{
    struct nn_sock *sock;
    struct nn_list_item *it;
    struct nn_ep *ep;

    sock = nn_cont (self, struct nn_sock, fsm);

    if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) {
        nn_assert (sock->state == NN_SOCK_STATE_ACTIVE ||
            sock->state == NN_SOCK_STATE_ZOMBIE);

        /*  Close sndfd and rcvfd. This should make any current
            select/poll using SNDFD and/or RCVFD exit. */
        if (!(sock->socktype->flags & NN_SOCKTYPE_FLAG_NORECV)) {
            nn_efd_term (&sock->rcvfd);
            memset (&sock->rcvfd, 0xcd, sizeof (sock->rcvfd));
        }
        if (!(sock->socktype->flags & NN_SOCKTYPE_FLAG_NOSEND)) {
            nn_efd_term (&sock->sndfd);
            memset (&sock->sndfd, 0xcd, sizeof (sock->sndfd));
        }

        /*  Ask all the associated endpoints to stop. */
        it = nn_list_begin (&sock->eps);
        while (it != nn_list_end (&sock->eps)) {
            ep = nn_cont (it, struct nn_ep, item);
            it = nn_list_next (&sock->eps, it);
            nn_list_erase (&sock->eps, &ep->item);
            nn_list_insert (&sock->sdeps, &ep->item,
                nn_list_end (&sock->sdeps));
            nn_ep_stop (ep);

        }
        sock->state = NN_SOCK_STATE_STOPPING_EPS;
        goto finish2;
    }
Пример #22
0
int nn_sock_rm_ep (struct nn_sock *self, int eid)
{
    struct nn_list_item *it;
    struct nn_ep *ep;

    nn_ctx_enter (&self->ctx);

    /*  Find the specified enpoint. */
    ep = NULL;
    for (it = nn_list_begin (&self->eps);
          it != nn_list_end (&self->eps);
          it = nn_list_next (&self->eps, it)) {
        ep = nn_cont (it, struct nn_ep, item);
        if (ep->eid == eid)
            break;
        ep = NULL;
    }

    /*  The endpoint doesn't exist. */
    if (!ep) {
        nn_ctx_leave (&self->ctx);
        return -EINVAL;
    }

    /*  Move the endpoint from the list of active endpoints to the list
        of shutting down endpoints. */
    nn_list_erase (&self->eps, &ep->item);
    nn_list_insert (&self->sdeps, &ep->item, nn_list_end (&self->sdeps));

    /*  Ask the endpoint to stop. Actual terminatation may be delayed
        by the transport. */
    nn_ep_stop (ep);

    nn_ctx_leave (&self->ctx);

    return 0;
}
Пример #23
0
void ftw_nanomsg_shutdown_active_sockets(struct ftw_socket_callsite *callsite)
{
    struct ftw_socket *sock;
    struct nn_list_item *it;

    /*  Preconditions expected of LabVIEW. */
    ftw_assert(callsite);
    nn_mutex_lock(&callsite->sync);

    ftw_debug("Shutting down sockets from Callsite %d", callsite->id);

    it = nn_list_begin(&callsite->active_sockets);
    while (it != NULL) {
        sock = nn_cont(it, struct ftw_socket, item);
        ftw_debug("Cleaning up active socket: %04d", sock->id);
        ftw_socket_close(sock);
        it = nn_list_erase(&callsite->active_sockets, it);
    }

    nn_list_term(&callsite->active_sockets);
    nn_mutex_unlock(&callsite->sync);

    return;
}
Пример #24
0
int nn_socket (int domain, int protocol)
{
    int rc;
    int s;
    struct nn_list_item *it;
    struct nn_socktype *socktype;
    struct nn_sock *sock;

    nn_glock_lock ();

    /*  Make sure that global state is initialised. */
    nn_global_init ();

    /*  If nn_term() was already called, return ETERM. */
    if (nn_slow (self.flags & NN_CTX_FLAG_ZOMBIE)) {
        nn_global_term ();
        nn_glock_unlock ();
        errno = ETERM;
        return -1;
    }

    /*  Only AF_SP and AF_SP_RAW domains are supported. */
    if (nn_slow (domain != AF_SP && domain != AF_SP_RAW)) {
        nn_global_term ();
        nn_glock_unlock ();
        errno = EAFNOSUPPORT;
        return -1;
    }

    /*  If socket limit was reached, report error. */
    if (nn_slow (self.nsocks >= NN_MAX_SOCKETS)) {
        nn_global_term ();
        nn_glock_unlock ();
        errno = EMFILE;
        return -1;
    }

    /*  Find an empty socket slot. */
    s = self.unused [NN_MAX_SOCKETS - self.nsocks - 1];

    /*  Find the appropriate socket type. */
    for (it = nn_list_begin (&self.socktypes);
          it != nn_list_end (&self.socktypes);
          it = nn_list_next (&self.socktypes, it)) {
        socktype = nn_cont (it, struct nn_socktype, item);
        if (socktype->domain == domain && socktype->protocol == protocol) {

            /*  Instantiate the socket. */
            sock = nn_alloc (sizeof (struct nn_sock), "sock");
            alloc_assert (sock);
            rc = nn_sock_init (sock, socktype);
            if (rc < 0)
                goto error;

            /*  Adjust the global socket table. */
            self.socks [s] = sock;
            ++self.nsocks;
            nn_glock_unlock ();
            return s;
        }
    }
    rc = -EINVAL;

    /*  Specified socket type wasn't found. */
error:
    nn_global_term ();
    nn_glock_unlock ();
    errno = -rc;
    return -1;
}
Пример #25
0
/*  Main body of the daemon. */
static void nn_tcpmuxd_routine (void *arg)
{
    int rc;
    struct nn_tcpmuxd_ctx *ctx;
    int conn;
    int pos;
    char service [256];
    struct nn_tcpmuxd_conn *tc = 0;
    size_t sz;
    ssize_t ssz;
    int i;
    struct nn_list_item *it;
    unsigned char buf [2];
    struct timeval tv;

    ctx = (struct nn_tcpmuxd_ctx*) arg;

    while (1) {

        /*  Wait for events. */
        rc = (int32_t)poll (ctx->pfd, (int32_t)ctx->pfd_size, -1);
        errno_assert (rc >= 0);
        nn_assert (rc != 0);

        /*  There's an incoming TCP connection. */
        if (ctx->pfd [0].revents & POLLIN) {

            /*  Accept the connection. */
            conn = accept (ctx->tcp_listener, NULL, NULL);
            if (conn < 0 && errno == ECONNABORTED)
                continue;
            errno_assert (conn >= 0);

            /*  Set timeouts to prevent malevolent client blocking the service.
                Note that these options are not supported on Solaris. */
            tv.tv_sec = 0;
            tv.tv_usec = 100000;
            rc = setsockopt (conn, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof (tv));
            errno_assert (rc == 0 || (rc < 0 && errno == ENOPROTOOPT));
            rc = setsockopt (conn, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof (tv));
            errno_assert (rc == 0 || (rc < 0 && errno == ENOPROTOOPT));

            /*  Read TCPMUX header. */
            pos = 0;
            while (1) {
                nn_assert (pos < sizeof (service));
                ssz = recv (conn, &service [pos], 1, 0);
                if (ssz < 0 && errno == EAGAIN) {
                    close (conn);
                    continue;
                }
                errno_assert (ssz >= 0);
                nn_assert (ssz == 1);
                service [pos] = tolower ((uint32_t)service [pos]);
                if (pos > 0 && service [pos - 1] == 0x0d &&
                      service [pos] == 0x0a)
                    break;
                ++pos;
            }
            service [pos - 1] = 0;
            
            /*  Check whether specified service is listening. */
            for (it = nn_list_begin (&ctx->conns);
                  it != nn_list_end (&ctx->conns);
                  it = nn_list_next (&ctx->conns, it)) {
                tc = nn_cont (it, struct nn_tcpmuxd_conn, item);
                if (strcmp (service, tc->service) == 0)
                    break;
            }

            /* If no one is listening, tear down the connection. */
            if (it == nn_list_end (&ctx->conns)) {
                ssz = send (conn, "-\x0d\x0a", 3, 0);
                if (ssz < 0 && errno == EAGAIN) {
                    close (conn);
                    continue;
                }
                errno_assert (ssz >= 0);
                nn_assert (ssz == 3);
                close (conn);
                continue;
            }

            /*  Send TCPMUX reply. */
            ssz = send (conn, "+\x0d\x0a", 3, 0);
            if (ssz < 0 && errno == EAGAIN) {
                close (conn);
                continue;
            }
            errno_assert (ssz >= 0);
            nn_assert (ssz == 3);
            nn_assert (tc != 0);

            /*  Pass the file descriptor to the listening process. */
            rc = nn_tcpmuxd_send_fd (tc->fd, conn);
            errno_assert (rc == 0);
        }

        /*  There's an incoming IPC connection. */
        if (ctx->pfd [1].revents & POLLIN) {

            /*  Accept the connection. */
            conn = accept (ctx->ipc_listener, NULL, NULL);
            if (conn < 0 && errno == ECONNABORTED)
                continue;
            errno_assert (conn >= 0);

            /*  Create new connection entry. */
            tc = nn_alloc (sizeof (struct nn_tcpmuxd_conn), "tcpmuxd_conn");
            nn_assert (tc);
            tc->fd = conn;
            nn_list_item_init (&tc->item); 

            /*  Adjust the pollset. We will poll for errors only. */
            ctx->pfd_size++;
            if (ctx->pfd_size > ctx->pfd_capacity) {
                ctx->pfd_capacity *= 2;
                ctx->pfd = nn_realloc (ctx->pfd,
                    sizeof (struct pollfd) * ctx->pfd_capacity);
                alloc_assert (ctx->pfd);
            }
            ctx->pfd [ctx->pfd_size - 1].fd = conn;
            ctx->pfd [ctx->pfd_size - 1].events = 0;
            ctx->pfd [ctx->pfd_size - 1].revents = 0;

            /*  Read the connection header. */
            ssz = recv (conn, buf, 2, 0);
            errno_assert (ssz >= 0);
            nn_assert (ssz == 2);
            sz = nn_gets (buf);
            tc->service = nn_alloc (sz + 1, "tcpmuxd_conn.service");
            nn_assert (tc->service);
            ssz = recv (conn, tc->service, sz, 0);
            errno_assert (ssz >= 0);
            nn_assert (ssz == sz);
            for (i = 0; i != sz; ++i)
                tc->service [i] = tolower ((uint32_t)tc->service [i]);
            tc->service [sz] = 0;
            
            /*  Add the entry to the IPC connections list. */
            nn_list_insert (&ctx->conns, &tc->item, nn_list_end (&ctx->conns));
        }

        for (i = 2; i < ctx->pfd_size; ++i) {
            if (ctx->pfd [i].revents & POLLERR ||
                  ctx->pfd [i].revents & POLLHUP) {
                nn_tcpmuxd_disconnect (ctx, i);
                i--;
            }
        }
    }
Пример #26
0
/*  Main body of the daemon. */
static void nn_tcpmuxd_routine (void *arg)
{
    int rc;
    struct nn_tcpmuxd_ctx *ctx;
    struct pollfd pfd [2];
    int conn;
    int pos;
    char service [256];
    struct nn_tcpmuxd_conn *tc;
    size_t sz;
    ssize_t ssz;
    int i;
    struct nn_list_item *it;
    unsigned char buf [2];
    struct timeval tv;

    ctx = (struct nn_tcpmuxd_ctx*) arg;

    pfd [0].fd = ctx->tcp_listener;
    pfd [0].events = POLLIN;
    pfd [1].fd = ctx->ipc_listener;
    pfd [1].events = POLLIN;

    while (1) {

        /*  Wait for events. */
        rc = poll (pfd, 2, -1);
        errno_assert (rc >= 0);
        nn_assert (rc != 0);

        /*  There's an incoming TCP connection. */
        if (pfd [0].revents & POLLIN) {

            /*  Accept the connection. */
            conn = accept (ctx->tcp_listener, NULL, NULL);
            if (conn < 0 && errno == ECONNABORTED)
                continue;
            errno_assert (conn >= 0);
            tv.tv_sec = 0;
            tv.tv_usec = 100000;
            rc = setsockopt (conn, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof (tv));
            errno_assert (rc == 0);
            rc = setsockopt (conn, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof (tv));
            errno_assert (rc == 0);

            /*  Read TCPMUX header. */
            pos = 0;
            while (1) {
                nn_assert (pos < sizeof (service));
                ssz = recv (conn, &service [pos], 1, 0);
                if (ssz < 0 && errno == EAGAIN) {
                    close (conn);
                    continue;
                }
                errno_assert (ssz >= 0);
                nn_assert (ssz == 1);
                service [pos] = tolower (service [pos]);
                if (pos > 0 && service [pos - 1] == 0x0d &&
                      service [pos] == 0x0a)
                    break;
                ++pos;
            }
            service [pos - 1] = 0;
            
            /*  Check whether specified service is listening. */
            for (it = nn_list_begin (&ctx->conns);
                  it != nn_list_end (&ctx->conns);
                  it = nn_list_next (&ctx->conns, it)) {
                tc = nn_cont (it, struct nn_tcpmuxd_conn, item);
                if (strcmp (service, tc->service) == 0)
                    break;
            }

            /* If no one is listening, tear down the connection. */
            if (it == nn_list_end (&ctx->conns)) {
                ssz = send (conn, "-\x0d\x0a", 3, 0);
                if (ssz < 0 && errno == EAGAIN) {
                    close (conn);
                    continue;
                }
                errno_assert (ssz >= 0);
                nn_assert (ssz == 3);
                close (conn);
                continue;
            }

            /*  Send TCPMUX reply. */
            ssz = send (conn, "+\x0d\x0a", 3, 0);
            if (ssz < 0 && errno == EAGAIN) {
                close (conn);
                continue;
            }
            errno_assert (ssz >= 0);
            nn_assert (ssz == 3);

            /*  Pass the file descriptor to the listening process. */
            rc = send_fd (tc->fd, conn);
            errno_assert (rc == 0);
        }

        /*  There's an incoming IPC connection. */
        if (pfd [1].revents & POLLIN) {

            /*  Accept the connection. */
            conn = accept (ctx->ipc_listener, NULL, NULL);
            if (conn < 0 && errno == ECONNABORTED)
                continue;
            errno_assert (conn >= 0);

            /*  Create new connection entry. */
            tc = nn_alloc (sizeof (struct nn_tcpmuxd_conn), "tcpmuxd_conn");
            nn_assert (tc);
            tc->fd = conn;
            nn_list_item_init (&tc->item);    

            /*  Read the connection header. */
            ssz = recv (conn, buf, 2, 0);
            errno_assert (ssz >= 0);
            nn_assert (ssz == 2);
            sz = nn_gets (buf);
            tc->service = nn_alloc (sz + 1, "tcpmuxd_conn.service");
            nn_assert (tc->service);
            ssz = recv (conn, tc->service, sz, 0);
            errno_assert (ssz >= 0);
            nn_assert (ssz == sz);
            for (i = 0; i != sz; ++i)
                tc->service [sz] = tolower (tc->service [sz]);
            tc->service [sz] = 0;
            
            /*  Add the entry to the IPC connections list. */
            nn_list_insert (&ctx->conns, &tc->item, nn_list_end (&ctx->conns));
        }
    }
Пример #27
0
static void nn_bws_shutdown (struct nn_fsm *self, int src, int type,
    void *srcptr)
{
    struct nn_bws *bws;
    struct nn_list_item *it;
    struct nn_aws *aws;

    bws = nn_cont (self, struct nn_bws, fsm);

    if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) {
        if (bws->aws) {
            nn_aws_stop (bws->aws);
            bws->state = NN_BWS_STATE_STOPPING_AWS;
        }
        else {
            bws->state = NN_BWS_STATE_STOPPING_USOCK;
        }
    }
    if (nn_slow (bws->state == NN_BWS_STATE_STOPPING_AWS)) {
        if (!nn_aws_isidle (bws->aws))
            return;
        nn_aws_term (bws->aws);
        nn_free (bws->aws);
        bws->aws = NULL;
        nn_usock_stop (&bws->usock);
        bws->state = NN_BWS_STATE_STOPPING_USOCK;
    }
    if (nn_slow (bws->state == NN_BWS_STATE_STOPPING_USOCK)) {
       if (!nn_usock_isidle (&bws->usock))
            return;
        for (it = nn_list_begin (&bws->awss);
              it != nn_list_end (&bws->awss);
              it = nn_list_next (&bws->awss, it)) {
            aws = nn_cont (it, struct nn_aws, item);
            nn_aws_stop (aws);
        }
        bws->state = NN_BWS_STATE_STOPPING_AWSS;
        goto awss_stopping;
    }
    if (nn_slow (bws->state == NN_BWS_STATE_STOPPING_AWSS)) {
        nn_assert (src == NN_BWS_SRC_AWS && type == NN_AWS_STOPPED);
        aws = (struct nn_aws *) srcptr;
        nn_list_erase (&bws->awss, &aws->item);
        nn_aws_term (aws);
        nn_free (aws);

        /*  If there are no more aws state machines, we can stop the whole
            bws object. */
awss_stopping:
        if (nn_list_empty (&bws->awss)) {
            bws->state = NN_BWS_STATE_IDLE;
            nn_fsm_stopped_noevent (&bws->fsm);
            nn_epbase_stopped (&bws->epbase);
            return;
        }

        return;
    }

    nn_fsm_bad_action (bws->state, src, type);
}
Пример #28
0
static int nn_sws_recv (struct nn_pipebase *self, struct nn_msg *msg)
{
    struct nn_sws *sws;
    struct nn_iovec iov [1];
    struct nn_list_item *it;
    struct msg_chunk *ch;
    int pos;
    size_t len;

    sws = nn_cont (self, struct nn_sws, pipebase);

    nn_assert_state (sws, NN_SWS_STATE_ACTIVE);

    switch (sws->instate) {
    case NN_SWS_INSTATE_FAILING:

        /*  Prevent further send/recv operations on this connection. */
        nn_pipebase_stop (self);
        sws->instate = NN_SWS_INSTATE_CLOSED;

        /*  Inform user this connection has been failed. */
        nn_msg_init (msg, 1);
        *(uint8_t *) nn_chunkref_data (&msg->body) = 0x7f |
            NN_SWS_FRAME_BITMASK_FIN;

        iov [0].iov_base = sws->fail_msg;
        iov [0].iov_len = sws->fail_msg_len;

        /*  TODO: Consider queueing and unconditionally sending close
            handshake rather than skipping it. */
        /*  RFC 6455 7.1.7 - try to send helpful Closing Handshake only if
            the socket is not currently sending. If it's still busy sending,
            forcibly close this connection, since it's not readily deterministic
            how much time that action could take to complete, or if the peer is
            even healthy enough to receive. Rationale: try to be nice, but be
            mindful of self-preservation! */
        if (sws->outstate == NN_SWS_OUTSTATE_IDLE) {
            nn_usock_send (sws->usock, iov, 1);
            sws->outstate = NN_SWS_OUTSTATE_SENDING;
            sws->state = NN_SWS_STATE_CLOSING_CONNECTION;
        }
        else {
            sws->state = NN_SWS_STATE_DONE;
            nn_fsm_raise (&sws->fsm, &sws->done,
                NN_SWS_RETURN_CLOSE_HANDSHAKE);
        }
        return 0;
    
    case NN_SWS_INSTATE_RECVD_CHUNKED:

        /*  This library should not deliver fragmented messages to the
            application, so it's expected that this is the final frame. */
        nn_assert (sws->is_final_frame);

        len = sws->inmsg_total_size;

        nn_msg_init (msg, len);

        /*  Reassemble incoming message scatter array. */
        while (!nn_list_empty (&sws->inmsg_array)) {
            it = nn_list_begin (&sws->inmsg_array);
            ch = nn_cont (it, struct msg_chunk, item);
            memcpy (((uint8_t*) nn_chunkref_data (&msg->body)) + pos,
                nn_chunkref_data (&ch->chunk),
                nn_chunkref_size (&ch->chunk));
            pos += nn_chunkref_size (&ch->chunk);
            nn_msg_chunk_term (ch, &sws->inmsg_array);
        }

        nn_assert (pos == len);
        nn_assert (nn_list_empty (&sws->inmsg_array));

        /*  No longer collecting scatter array of incoming msg chunks. */
        sws->continuing = 0;

        nn_sws_recv_hdr (sws);

        return 0;

    case NN_SWS_INSTATE_RECVD_CONTROL:

        /*  This library should not deliver fragmented messages to the user, so
        it's expected that this is the final frame. */
        nn_assert (sws->is_final_frame);

        len = sws->inmsg_current_chunk_len + sizeof (sws->inmsg_hdr);

        nn_msg_init (msg, len);

        /*  Relay opcode, RSV and FIN bits to the user in order to
            interpret payload. */
        memcpy (nn_chunkref_data (&msg->body),
            &sws->inhdr, sizeof (sws->inmsg_hdr));
        pos = sizeof (sws->inmsg_hdr);

        memcpy (((uint8_t*) nn_chunkref_data (&msg->body)) + pos,
            sws->inmsg_control, sws->inmsg_current_chunk_len);

        /*  If a closing handshake was just transferred to the application,
            discontinue continual, async receives. */
        if (sws->opcode == NN_WS_OPCODE_CLOSE) {
            sws->instate = NN_SWS_INSTATE_CLOSED;
        }
        else {
            nn_sws_recv_hdr (sws);
        }

        return 0;

    default:
        /*  Unexpected state. */
        nn_assert (0);
        return 0;
    }
}
Пример #29
0
static void nn_global_submit_errors (int i, struct nn_sock *s,
    char *name, int value)
{
    /*  TODO(tailhook) dynamically allocate buffer  */
    char buf[4096];
    char *curbuf;
    int buf_left;
    char timebuf[20];
    time_t numtime;
    struct tm strtime;
    int len;
    struct nn_list_item *it;
    struct nn_ep *ep;

    if (self.statistics_socket >= 0) {
        /*  TODO(tailhook) add HAVE_GMTIME_R ifdef  */
        time(&numtime);
#ifdef NN_HAVE_GMTIME_R
        gmtime_r (&numtime, &strtime);
#else
#error
#endif
        strftime (timebuf, 20, "%Y-%m-%dT%H:%M:%S", &strtime);
        if(*s->socket_name) {
            len = sprintf (buf, "ESTP:%s:%s:socket.%s:%s: %sZ 10 %d\n",
                self.hostname, self.appname, s->socket_name, name,
                timebuf, value);
        } else {
            len = sprintf (buf, "ESTP:%s:%s:socket.%d:%s: %sZ 10 %d\n",
                self.hostname, self.appname, i, name,
                timebuf, value);
        }
        buf_left = sizeof(buf) - len;
        curbuf = buf + len;


        for (it = nn_list_begin (&s->eps);
              it != nn_list_end (&s->eps);
              it = nn_list_next (&s->eps, it)) {
            ep = nn_cont (it, struct nn_ep, item);

            if (ep->last_errno) {
#ifdef NN_HAVE_WINDOWS
                len = _snprintf_s (curbuf, buf_left, _TRUNCATE,
                    " nanomsg: Endpoint %d [%s] error: %s\n",
                    ep->eid, nn_ep_getaddr (ep), nn_strerror (ep->last_errno));
#else
                 len = snprintf (curbuf, buf_left,
                     " nanomsg: Endpoint %d [%s] error: %s\n",
                     ep->eid, nn_ep_getaddr (ep), nn_strerror (ep->last_errno));
#endif
                if (buf_left < len)
                    break;
                curbuf += len;
                buf_left -= len;
            }

        }

        (void) nn_send (self.statistics_socket,
            buf, sizeof(buf) - buf_left, NN_DONTWAIT);
    }
Пример #30
0
static void nn_bipc_handler (struct nn_fsm *self, int src, int type,
    void *srcptr)
{
    struct nn_bipc *bipc;
    struct nn_list_item *it;
    struct nn_aipc *aipc;

    bipc = nn_cont (self, struct nn_bipc, fsm);

/******************************************************************************/
/*  STOP procedure.                                                           */
/******************************************************************************/
    if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) {
        nn_aipc_stop (bipc->aipc);
        bipc->state = NN_BIPC_STATE_STOPPING_AIPC;
    }
    if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPC)) {
        if (!nn_aipc_isidle (bipc->aipc))
            return;
        nn_aipc_term (bipc->aipc);
        nn_free (bipc->aipc);
        bipc->aipc = NULL;
        nn_usock_stop (&bipc->usock);
        bipc->state = NN_BIPC_STATE_STOPPING_USOCK;
    }
    if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_USOCK)) {
       if (!nn_usock_isidle (&bipc->usock))
            return;
        for (it = nn_list_begin (&bipc->aipcs);
              it != nn_list_end (&bipc->aipcs);
              it = nn_list_next (&bipc->aipcs, it)) {
            aipc = nn_cont (it, struct nn_aipc, item);
            nn_aipc_stop (aipc);
        }
        bipc->state = NN_BIPC_STATE_STOPPING_AIPCS;
        goto aipcs_stopping;
    }
    if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPCS)) {
        nn_assert (src == NN_BIPC_SRC_AIPC && type == NN_AIPC_STOPPED);
        aipc = (struct nn_aipc *) srcptr;
        nn_list_erase (&bipc->aipcs, &aipc->item);
        nn_aipc_term (aipc);
        nn_free (aipc);
        
        /*  If there are no more aipc state machines, we can stop the whole
            bipc object. */
aipcs_stopping:
        if (nn_list_empty (&bipc->aipcs)) {
            bipc->state = NN_BIPC_STATE_IDLE;
            nn_fsm_stopped_noevent (&bipc->fsm);
            nn_epbase_stopped (&bipc->epbase);
            return;
        }

        return;
    }

    switch (bipc->state) {

/******************************************************************************/
/*  IDLE state.                                                               */
/******************************************************************************/
    case NN_BIPC_STATE_IDLE:
        switch (src) {

        case NN_FSM_ACTION:
            switch (type) {
            case NN_FSM_START:
                nn_bipc_start_listening (bipc);
                nn_bipc_start_accepting (bipc);
                bipc->state = NN_BIPC_STATE_ACTIVE;
                return;
            default:
                nn_fsm_bad_action (bipc->state, src, type);
            }

        default:
            nn_fsm_bad_source (bipc->state, src, type);
        }

/******************************************************************************/
/*  ACTIVE state.                                                             */
/*  The execution is yielded to the aipc state machine in this state.         */
/******************************************************************************/
    case NN_BIPC_STATE_ACTIVE:
        if (srcptr == bipc->aipc) {
            switch (type) {
            case NN_AIPC_ACCEPTED:

                /*  Move the newly created connection to the list of existing
                    connections. */
                nn_list_insert (&bipc->aipcs, &bipc->aipc->item,
                    nn_list_end (&bipc->aipcs));
                bipc->aipc = NULL;

                /*  Start waiting for a new incoming connection. */
                nn_bipc_start_accepting (bipc);

                return;

            default:
                nn_fsm_bad_action (bipc->state, src, type);
            }
        }

        /*  For all remaining events we'll assume they are coming from one
            of remaining child aipc objects. */
        nn_assert (src == NN_BIPC_SRC_AIPC);
        aipc = (struct nn_aipc*) srcptr;
        switch (type) {
        case NN_AIPC_ERROR:
            nn_aipc_stop (aipc);
            return;
        case NN_AIPC_STOPPED:
            nn_list_erase (&bipc->aipcs, &aipc->item);
            nn_aipc_term (aipc);
            nn_free (aipc);
            return;
        default:
            nn_fsm_bad_action (bipc->state, src, type);
        }

/******************************************************************************/
/*  Invalid state.                                                            */
/******************************************************************************/
    default:
        nn_fsm_bad_state (bipc->state, src, type);
    }
}