Ejemplo n.º 1
0
int nn_sock_add_ep (struct nn_sock *self, struct nn_transport *transport,
    int bind, const char *addr)
{
    int rc;
    struct nn_ep *ep;
    int eid;

    nn_ctx_enter (&self->ctx);

    /*  Instantiate the endpoint. */
    ep = nn_alloc (sizeof (struct nn_ep), "endpoint");
    rc = nn_ep_init (ep, NN_SOCK_SRC_EP, self, self->eid, transport,
        bind, addr);
    if (nn_slow (rc < 0)) {
        nn_free (ep);
        nn_ctx_leave (&self->ctx);
        return rc;
    }
    nn_ep_start (ep);

    /*  Increase the endpoint ID for the next endpoint. */
    eid = self->eid;
    ++self->eid;

    /*  Add it to the list of active endpoints. */
    nn_list_insert (&self->eps, &ep->item, nn_list_end (&self->eps));

    nn_ctx_leave (&self->ctx);

    return eid;
}
Ejemplo n.º 2
0
int nn_sock_getopt (struct nn_sock *self, int level, int option,
    void *optval, size_t *optvallen)
{
    int rc;

    nn_ctx_enter (&self->ctx);
    if (nn_slow (self->state == NN_SOCK_STATE_ZOMBIE)) {
        nn_ctx_leave (&self->ctx);
        return -ETERM;
    }
    rc = nn_sock_getopt_inner (self, level, option, optval, optvallen);
    nn_ctx_leave (&self->ctx);

    return rc;
}
Ejemplo n.º 3
0
static void nn_global_term (void)
{
#if defined NN_HAVE_WINDOWS
    int rc;
#endif
    struct nn_list_item *it;
    struct nn_transport *tp;

    /*  If there are no sockets remaining, uninitialise the global context. */
    nn_assert (self.socks);
    if (self.nsocks > 0)
        return;

    /*  Stop the FSM  */
    nn_ctx_enter (&self.ctx);
    nn_fsm_stop (&self.fsm);
    nn_ctx_leave (&self.ctx);

    /*  Shut down the worker threads. */
    nn_pool_term (&self.pool);

    /* Terminate ctx mutex */
    nn_ctx_term (&self.ctx);

    /*  Ask all the transport to deallocate their global resources. */
    while (!nn_list_empty (&self.transports)) {
        it = nn_list_begin (&self.transports);
        tp = nn_cont (it, struct nn_transport, item);
        if (tp->term)
            tp->term ();
        nn_list_erase (&self.transports, it);
    }

    /*  For now there's nothing to deallocate about socket types, however,
        let's remove them from the list anyway. */
    while (!nn_list_empty (&self.socktypes))
        nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes));

    /*  Final deallocation of the nn_global object itself. */
    nn_list_term (&self.socktypes);
    nn_list_term (&self.transports);
    nn_free (self.socks);

    /*  This marks the global state as uninitialised. */
    self.socks = NULL;

    /*  Shut down the memory allocation subsystem. */
    nn_alloc_term ();

    /*  On Windows, uninitialise the socket library. */
#if defined NN_HAVE_WINDOWS
    rc = WSACleanup ();
    nn_assert (rc == 0);
#endif
}
Ejemplo n.º 4
0
int nn_sock_term (struct nn_sock *self)
{
    int rc;
    int i;

    /*  NOTE: nn_sock_stop must have already been called. */

    /*  Some endpoints may still be alive.  Here we are going to wait
        till they are all closed.  This loop is not interruptible, because
        making it so would leave a partially cleaned up socket, and we don't
        have a way to defer resource deallocation. */
    for (;;) {
        rc = nn_sem_wait (&self->termsem);
        if (nn_slow (rc == -EINTR))
            continue;
        errnum_assert (rc == 0, -rc);
        break;
    }

    /*  Also, wait for all holds on the socket to be released.  */
    for (;;) {
        rc = nn_sem_wait (&self->relesem);
        if (nn_slow (rc == -EINTR))
            continue;
        errnum_assert (rc == 0, -rc);
        break;
    }

    /*  Threads that posted the semaphore(s) can still have the ctx locked
        for a short while. By simply entering the context and exiting it
        immediately we can be sure that any such threads have already
        exited the context. */
    nn_ctx_enter (&self->ctx);
    nn_ctx_leave (&self->ctx);

    /*  At this point, we can be reasonably certain that no other thread
        has any references to the socket. */

    nn_fsm_stopped_noevent (&self->fsm);
    nn_fsm_term (&self->fsm);
    nn_sem_term (&self->termsem);
    nn_list_term (&self->sdeps);
    nn_list_term (&self->eps);
    nn_clock_term (&self->clock);
    nn_ctx_term (&self->ctx);

    /*  Destroy any optsets associated with the socket. */
    for (i = 0; i != NN_MAX_TRANSPORT; ++i)
        if (self->optsets [i])
            self->optsets [i]->vfptr->destroy (self->optsets [i]);

    return 0;
}
Ejemplo n.º 5
0
int nn_sock_term (struct nn_sock *self)
{
    int rc;
    int i;

    /*  Ask the state machine to start closing the socket. */
    nn_ctx_enter (&self->ctx);
    nn_fsm_stop (&self->fsm);
    nn_ctx_leave (&self->ctx);

    /*  Shutdown process was already started but some endpoints may still
        alive. Here we are going to wait till they are all closed. */
    rc = nn_sem_wait (&self->termsem);
    if (nn_slow (rc == -EINTR))
        return -EINTR;
    errnum_assert (rc == 0, -rc);

    /*  The thread that posted the semaphore can still have the ctx locked
        for a short while. By simply entering the context and exiting it
        immediately we can be sure that the thread in question have already
        exited the context. */
    nn_ctx_enter (&self->ctx);
    nn_ctx_leave (&self->ctx);

    /*  Deallocate the resources. */
    nn_fsm_stopped_noevent (&self->fsm);
    nn_fsm_term (&self->fsm);
    nn_sem_term (&self->termsem);
    nn_list_term (&self->sdeps);
    nn_list_term (&self->eps);
    nn_clock_term (&self->clock);
    nn_ctx_term (&self->ctx);

    /*  Destroy any optsets associated with the socket. */
    for (i = 0; i != NN_MAX_TRANSPORT; ++i)
        if (self->optsets [i])
            self->optsets [i]->vfptr->destroy (self->optsets [i]);

    return 0;
}
Ejemplo n.º 6
0
int nn_sock_rm_ep (struct nn_sock *self, int eid)
{
    struct nn_list_item *it;
    struct nn_ep *ep;

    nn_ctx_enter (&self->ctx);

    /*  Find the specified enpoint. */
    ep = NULL;
    for (it = nn_list_begin (&self->eps);
          it != nn_list_end (&self->eps);
          it = nn_list_next (&self->eps, it)) {
        ep = nn_cont (it, struct nn_ep, item);
        if (ep->eid == eid)
            break;
        ep = NULL;
    }

    /*  The endpoint doesn't exist. */
    if (!ep) {
        nn_ctx_leave (&self->ctx);
        return -EINVAL;
    }

    /*  Move the endpoint from the list of active endpoints to the list
        of shutting down endpoints. */
    nn_list_erase (&self->eps, &ep->item);
    nn_list_insert (&self->sdeps, &ep->item, nn_list_end (&self->sdeps));

    /*  Ask the endpoint to stop. Actual terminatation may be delayed
        by the transport. */
    nn_ep_stop (ep);

    nn_ctx_leave (&self->ctx);

    return 0;
}
Ejemplo n.º 7
0
/*  Initialize a socket.  A hold is placed on the initialized socket for
    the caller as well. */
int nn_sock_init (struct nn_sock *self, struct nn_socktype *socktype, int fd)
{
    int rc;
    int i;

    /* Make sure that at least one message direction is supported. */
    nn_assert (!(socktype->flags & NN_SOCKTYPE_FLAG_NOSEND) ||
        !(socktype->flags & NN_SOCKTYPE_FLAG_NORECV));

    /*  Create the AIO context for the SP socket. */
    nn_ctx_init (&self->ctx, nn_global_getpool (), nn_sock_onleave);

    /*  Initialise the state machine. */
    nn_fsm_init_root (&self->fsm, nn_sock_handler,
        nn_sock_shutdown, &self->ctx);
    self->state = NN_SOCK_STATE_INIT;

    /*  Open the NN_SNDFD and NN_RCVFD efds. Do so, only if the socket type
        supports send/recv, as appropriate. */
    if (socktype->flags & NN_SOCKTYPE_FLAG_NOSEND)
        memset (&self->sndfd, 0xcd, sizeof (self->sndfd));
    else {
        rc = nn_efd_init (&self->sndfd);
        if (nn_slow (rc < 0))
            return rc;
    }
    if (socktype->flags & NN_SOCKTYPE_FLAG_NORECV)
        memset (&self->rcvfd, 0xcd, sizeof (self->rcvfd));
    else {
        rc = nn_efd_init (&self->rcvfd);
        if (nn_slow (rc < 0)) {
            if (!(socktype->flags & NN_SOCKTYPE_FLAG_NOSEND))
                nn_efd_term (&self->sndfd);
            return rc;
        }
    }
    nn_sem_init (&self->termsem);
    nn_sem_init (&self->relesem);
    if (nn_slow (rc < 0)) {
        if (!(socktype->flags & NN_SOCKTYPE_FLAG_NORECV))
            nn_efd_term (&self->rcvfd);
        if (!(socktype->flags & NN_SOCKTYPE_FLAG_NOSEND))
            nn_efd_term (&self->sndfd);
        return rc;
    }

    self->holds = 1;   /*  Callers hold. */
    self->flags = 0;
    nn_clock_init (&self->clock);
    nn_list_init (&self->eps);
    nn_list_init (&self->sdeps);
    self->eid = 1;

    /*  Default values for NN_SOL_SOCKET options. */
    self->linger = 1000;
    self->sndbuf = 128 * 1024;
    self->rcvbuf = 128 * 1024;
    self->rcvmaxsize = 1024 * 1024;
    self->sndtimeo = -1;
    self->rcvtimeo = -1;
    self->reconnect_ivl = 100;
    self->reconnect_ivl_max = 0;
    self->ep_template.sndprio = 8;
    self->ep_template.rcvprio = 8;
    self->ep_template.ipv4only = 1;

    /* Initialize statistic entries */
    self->statistics.established_connections = 0;
    self->statistics.accepted_connections = 0;
    self->statistics.dropped_connections = 0;
    self->statistics.broken_connections = 0;
    self->statistics.connect_errors = 0;
    self->statistics.bind_errors = 0;
    self->statistics.accept_errors = 0;

    self->statistics.messages_sent = 0;
    self->statistics.messages_received = 0;
    self->statistics.bytes_sent = 0;
    self->statistics.bytes_received = 0;

    self->statistics.current_connections = 0;
    self->statistics.inprogress_connections = 0;
    self->statistics.current_snd_priority = 0;
    self->statistics.current_ep_errors = 0;

    /*  Should be pretty much enough space for just the number  */
    sprintf(self->socket_name, "%d", fd);

    /*  The transport-specific options are not initialised immediately,
        rather, they are allocated later on when needed. */
    for (i = 0; i != NN_MAX_TRANSPORT; ++i)
        self->optsets [i] = NULL;

    /*  Create the specific socket type itself. */
    rc = socktype->create ((void*) self, &self->sockbase);
    errnum_assert (rc == 0, -rc);
    self->socktype = socktype;

    /*  Launch the state machine. */
    nn_ctx_enter (&self->ctx);
    nn_fsm_start (&self->fsm);
    nn_ctx_leave (&self->ctx);

    return 0;
}
Ejemplo n.º 8
0
int nn_sock_recv (struct nn_sock *self, struct nn_msg *msg, int flags)
{
    int rc;
    uint64_t deadline;
    uint64_t now;
    int timeout;

    /*  Some sockets types cannot be used for receiving messages. */
    if (nn_slow (self->socktype->flags & NN_SOCKTYPE_FLAG_NORECV))
        return -ENOTSUP;

    nn_ctx_enter (&self->ctx);

    /*  Compute the deadline for RCVTIMEO timer. */
    if (self->rcvtimeo < 0) {
        deadline = -1;
        timeout = -1;
    }
    else {
        deadline = nn_clock_now (&self->clock) + self->rcvtimeo;
        timeout = self->rcvtimeo;
    }

    while (1) {

        switch (self->state) {
        case NN_SOCK_STATE_ACTIVE:
        case NN_SOCK_STATE_INIT:
             break;

        case NN_SOCK_STATE_ZOMBIE:
            /*  If nn_term() was already called, return ETERM. */
            nn_ctx_leave (&self->ctx);
            return -ETERM;

        case NN_SOCK_STATE_STOPPING_EPS:
        case NN_SOCK_STATE_STOPPING:
        case NN_SOCK_STATE_FINI:
            /*  Socket closed or closing.  Should we return something
                else here; recvmsg(2) for example returns no data in
                this case, like read(2).  The use of indexed file
                descriptors is further problematic, as an FD can be reused
                leading to situations where technically the outstanding
                operation should refer to some other socket entirely.  */
            nn_ctx_leave (&self->ctx);
            return -EBADF;
        }

        /*  Try to receive the message in a non-blocking way. */
        rc = self->sockbase->vfptr->recv (self->sockbase, msg);
        if (nn_fast (rc == 0)) {
            nn_ctx_leave (&self->ctx);
            return 0;
        }
        nn_assert (rc < 0);

        /*  Any unexpected error is forwarded to the caller. */
        if (nn_slow (rc != -EAGAIN)) {
            nn_ctx_leave (&self->ctx);
            return rc;
        }

        /*  If the message cannot be received at the moment and the recv call
            is non-blocking, return immediately. */
        if (nn_fast (flags & NN_DONTWAIT)) {
            nn_ctx_leave (&self->ctx);
            return -EAGAIN;
        }

        /*  With blocking recv, wait while there are new pipes available
            for receiving. */
        nn_ctx_leave (&self->ctx);
        rc = nn_efd_wait (&self->rcvfd, timeout);
        if (nn_slow (rc == -ETIMEDOUT))
            return -ETIMEDOUT;
        if (nn_slow (rc == -EINTR))
            return -EINTR;
        if (nn_slow (rc == -EBADF))
            return -EBADF;
        errnum_assert (rc == 0, rc);
        nn_ctx_enter (&self->ctx);
        /*
         *  Double check if pipes are still available for receiving
         */
        if (!nn_efd_wait (&self->rcvfd, 0)) {
            self->flags |= NN_SOCK_FLAG_IN;
        }

        /*  If needed, re-compute the timeout to reflect the time that have
            already elapsed. */
        if (self->rcvtimeo >= 0) {
            now = nn_clock_now (&self->clock);
            timeout = (int) (now > deadline ? 0 : deadline - now);
        }
    }
}
Ejemplo n.º 9
0
/*  Stop the socket.  This will prevent new calls from aquiring a
    hold on the socket, cause endpoints to shut down, and wake any
    threads waiting to recv or send data. */
void nn_sock_stop (struct nn_sock *self)
{
    nn_ctx_enter (&self->ctx);
    nn_fsm_stop (&self->fsm);
    nn_ctx_leave (&self->ctx);
}
Ejemplo n.º 10
0
void nn_sock_zombify (struct nn_sock *self)
{
    nn_ctx_enter (&self->ctx);
    nn_fsm_action (&self->fsm, NN_SOCK_ACTION_ZOMBIFY);
    nn_ctx_leave (&self->ctx);
}
Ejemplo n.º 11
0
int nn_sock_recv (struct nn_sock *self, struct nn_msg *msg, int flags)
{
    int rc;
    uint64_t deadline;
    uint64_t now;
    int timeout;

    /*  Some sockets types cannot be used for receiving messages. */
    if (nn_slow (self->socktype->flags & NN_SOCKTYPE_FLAG_NORECV))
        return -ENOTSUP;

    nn_ctx_enter (&self->ctx);

    /*  Compute the deadline for RCVTIMEO timer. */
    if (self->rcvtimeo < 0) {
        deadline = -1;
        timeout = -1;
    }
    else {
        deadline = nn_clock_now (&self->clock) + self->rcvtimeo;
        timeout = self->rcvtimeo;
    }

    while (1) {

        /*  If nn_term() was already called, return ETERM. */
        if (nn_slow (self->state == NN_SOCK_STATE_ZOMBIE)) {
            nn_ctx_leave (&self->ctx);
            return -ETERM;
        }

        /*  Try to receive the message in a non-blocking way. */
        rc = self->sockbase->vfptr->recv (self->sockbase, msg);
        if (nn_fast (rc == 0)) {
            nn_ctx_leave (&self->ctx);
            return 0;
        }
        nn_assert (rc < 0);

        /*  Any unexpected error is forwarded to the caller. */
        if (nn_slow (rc != -EAGAIN)) {
            nn_ctx_leave (&self->ctx);
            return rc;
        }

        /*  If the message cannot be received at the moment and the recv call
            is non-blocking, return immediately. */
        if (nn_fast (flags & NN_DONTWAIT)) {
            nn_ctx_leave (&self->ctx);
            return -EAGAIN;
        }

        /*  With blocking recv, wait while there are new pipes available
            for receiving. */
        nn_ctx_leave (&self->ctx);
        rc = nn_efd_wait (&self->rcvfd, timeout);
        if (nn_slow (rc == -ETIMEDOUT))
            return -EAGAIN;
        if (nn_slow (rc == -EINTR))
            return -EINTR;
        errnum_assert (rc == 0, rc);
        nn_ctx_enter (&self->ctx);
        self->flags |= NN_SOCK_FLAG_IN;

        /*  If needed, re-compute the timeout to reflect the time that have
            already elapsed. */
        if (self->rcvtimeo >= 0) {
            now = nn_clock_now (&self->clock);
            timeout = (int) (now > deadline ? 0 : deadline - now);
        }
    }
}
Ejemplo n.º 12
0
static void nn_worker_routine (void *arg)
{
    int32_t rc,pevent;
    struct nn_worker *self;
    struct nn_poller_hndl *phndl;
    struct nn_timerset_hndl *thndl;
    struct nn_queue tasks;
    struct nn_queue_item *item;
    struct nn_worker_task *task;
    struct nn_worker_fd *fd;
    struct nn_worker_timer *timer;
    PostMessage("nn_worker_routine started\n");
    self = (struct nn_worker*) arg;
    while ( 1 ) //  Infinite loop. It will be interrupted only when the object is shut down.
    {
        // Wait for new events and/or timeouts.
        rc = nn_poller_wait(&self->poller,nn_timerset_timeout (&self->timerset));
        errnum_assert(rc == 0, -rc);
        while ( 1 ) // Process all expired timers
        {
            rc = nn_timerset_event(&self->timerset, &thndl);
            if ( rc == -EAGAIN )
                break;
            //PostMessage("nn_worker process expired user\n");
            errnum_assert(rc == 0, -rc);
            timer = nn_cont(thndl, struct nn_worker_timer, hndl);
            nn_ctx_enter(timer->owner->ctx);
            nn_fsm_feed(timer->owner,-1,NN_WORKER_TIMER_TIMEOUT,timer);
            nn_ctx_leave(timer->owner->ctx);
        }
        while ( 1 ) // Process all events from the poller
        {
            rc = nn_poller_event(&self->poller,&pevent,&phndl); //  Get next poller event, such as IN or OUT
            if ( nn_slow(rc == -EAGAIN) )
                break;
            //PostMessage("nn_worker process all events from the poller\n");
            if ( phndl == &self->efd_hndl ) // If there are any new incoming worker tasks, process them
            {
                nn_assert (pevent == NN_POLLER_IN);
                //  Make a local copy of the task queue. This way the application threads are not blocked and can post new tasks while the existing tasks are being processed. Also, new tasks can be posted from within task handlers
                nn_mutex_lock(&self->sync);
                nn_efd_unsignal(&self->efd);
                memcpy(&tasks,&self->tasks,sizeof(tasks));
                nn_queue_init(&self->tasks);
                nn_mutex_unlock(&self->sync);
                while ( 1 )
                {
                    item = nn_queue_pop(&tasks); //  Next worker task
                    if ( nn_slow(!item) )
                        break;
                    //PostMessage("nn_worker next worker task\n");
                    if ( nn_slow(item == &self->stop) ) //  If the worker thread is asked to stop, do so
                    {
                        nn_queue_term(&tasks);
                        return;
                    }
                    // It's a user-defined task. Notify the user that it has arrived in the worker thread
                    //PostMessage("nn_worker user defined task\n");
                    task = nn_cont(item,struct nn_worker_task,item);
                    nn_ctx_enter(task->owner->ctx);
                    nn_fsm_feed(task->owner,task->src,NN_WORKER_TASK_EXECUTE,task);
                    nn_ctx_leave (task->owner->ctx);
                }
                nn_queue_term (&tasks);
                continue;
            }
            PostMessage("nn_worker true i/o, invoke handler\n");
            fd = nn_cont(phndl,struct nn_worker_fd,hndl); // It's a true I/O event. Invoke the handler
            PostMessage("nn_worker true i/o, fd.%p\n",fd);
            nn_ctx_enter(fd->owner->ctx);
            PostMessage("nn_worker true i/o, after nn_ctx_enter\n");
            nn_fsm_feed(fd->owner,fd->src,pevent,fd);
            PostMessage("nn_worker true i/o, after nn_fsm_feed leave.%p\n",fd->owner->ctx);
            nn_ctx_leave(fd->owner->ctx);
            PostMessage("nn_worker true i/o, after nn_ctx_leave\n");
        }
    }