static void nn_req_stop (struct nn_sockbase *self) { struct nn_req *req; req = nn_cont (self, struct nn_req, xreq.sockbase); nn_fsm_stop (&req->fsm); }
static void nn_btcp_stop (struct nn_epbase *self) { struct nn_btcp *btcp; btcp = nn_cont (self, struct nn_btcp, epbase); nn_fsm_stop (&btcp->fsm); }
static void nn_global_term (void) { #if defined NN_HAVE_WINDOWS int rc; #endif struct nn_list_item *it; struct nn_transport *tp; /* If there are no sockets remaining, uninitialise the global context. */ nn_assert (self.socks); if (self.nsocks > 0) return; /* Stop the FSM */ nn_ctx_enter (&self.ctx); nn_fsm_stop (&self.fsm); nn_ctx_leave (&self.ctx); /* Shut down the worker threads. */ nn_pool_term (&self.pool); /* Terminate ctx mutex */ nn_ctx_term (&self.ctx); /* Ask all the transport to deallocate their global resources. */ while (!nn_list_empty (&self.transports)) { it = nn_list_begin (&self.transports); tp = nn_cont (it, struct nn_transport, item); if (tp->term) tp->term (); nn_list_erase (&self.transports, it); } /* For now there's nothing to deallocate about socket types, however, let's remove them from the list anyway. */ while (!nn_list_empty (&self.socktypes)) nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes)); /* Final deallocation of the nn_global object itself. */ nn_list_term (&self.socktypes); nn_list_term (&self.transports); nn_free (self.socks); /* This marks the global state as uninitialised. */ self.socks = NULL; /* Shut down the memory allocation subsystem. */ nn_alloc_term (); /* On Windows, uninitialise the socket library. */ #if defined NN_HAVE_WINDOWS rc = WSACleanup (); nn_assert (rc == 0); #endif }
int nn_sock_term (struct nn_sock *self) { int rc; int i; /* Ask the state machine to start closing the socket. */ nn_ctx_enter (&self->ctx); nn_fsm_stop (&self->fsm); nn_ctx_leave (&self->ctx); /* Shutdown process was already started but some endpoints may still alive. Here we are going to wait till they are all closed. */ rc = nn_sem_wait (&self->termsem); if (nn_slow (rc == -EINTR)) return -EINTR; errnum_assert (rc == 0, -rc); /* The thread that posted the semaphore can still have the ctx locked for a short while. By simply entering the context and exiting it immediately we can be sure that the thread in question have already exited the context. */ nn_ctx_enter (&self->ctx); nn_ctx_leave (&self->ctx); /* Deallocate the resources. */ nn_fsm_stopped_noevent (&self->fsm); nn_fsm_term (&self->fsm); nn_sem_term (&self->termsem); nn_list_term (&self->sdeps); nn_list_term (&self->eps); nn_clock_term (&self->clock); nn_ctx_term (&self->ctx); /* Destroy any optsets associated with the socket. */ for (i = 0; i != NN_MAX_TRANSPORT; ++i) if (self->optsets [i]) self->optsets [i]->vfptr->destroy (self->optsets [i]); return 0; }
void nn_stcp_stop(struct nn_stcp *self) { nn_fsm_stop(&self->fsm); }
/* Stop the socket. This will prevent new calls from aquiring a hold on the socket, cause endpoints to shut down, and wake any threads waiting to recv or send data. */ void nn_sock_stop (struct nn_sock *self) { nn_ctx_enter (&self->ctx); nn_fsm_stop (&self->fsm); nn_ctx_leave (&self->ctx); }
void nn_slibfabric_stop (struct nn_slibfabric *self) { nn_fsm_stop (&self->fsm); }
void nn_usock_stop (struct nn_usock *self) { nn_fsm_stop (&self->fsm); }
static void nn_cinproc_stop (void *self) { struct nn_cinproc *cinproc = self; nn_fsm_stop (&cinproc->fsm); }
static void nn_cipc_stop (void *self) { struct nn_cipc *cipc = self; nn_fsm_stop (&cipc->fsm); }
void nn_aipc_stop (struct nn_aipc *self) { nn_fsm_stop (&self->fsm); }
void nn_sws_stop (struct nn_sws *self) { nn_fsm_stop (&self->fsm); }
void nn_timer_stop (struct nn_timer *self) { nn_fsm_stop (&self->fsm); }
void nn_streamhdr_stop (struct nn_streamhdr *self) { nn_fsm_stop (&self->fsm); }
static void nn_cws_stop (void *self) { struct nn_cws *cws = self; nn_fsm_stop (&cws->fsm); }