void nn_pipebase_term(struct nn_pipebase *self) { nn_assert_state(self,NN_PIPEBASE_STATE_IDLE); nn_fsm_event_term(&self->out); nn_fsm_event_term(&self->in); nn_fsm_term(&self->fsm); }
int nn_binproc_create (void *hint, struct nn_epbase **epbase) { int rc; struct nn_binproc *self; self = nn_alloc (sizeof (struct nn_binproc), "binproc"); alloc_assert (self); nn_ins_item_init (&self->item, &nn_binproc_vfptr, hint); nn_fsm_init_root (&self->fsm, nn_binproc_handler, nn_binproc_shutdown, nn_epbase_getctx (&self->item.epbase)); self->state = NN_BINPROC_STATE_IDLE; nn_list_init (&self->sinprocs); /* Start the state machine. */ nn_fsm_start (&self->fsm); /* Register the inproc endpoint into a global repository. */ rc = nn_ins_bind (&self->item, nn_binproc_connect); if (nn_slow (rc < 0)) { nn_list_term (&self->sinprocs); /* TODO: Now, this is ugly! We are getting the state machine into the idle state manually. How should it be done correctly? */ self->fsm.state = 1; nn_fsm_term (&self->fsm); nn_ins_item_term (&self->item); nn_free (self); return rc; } *epbase = &self->item.epbase; return 0; }
int nn_ep_init (struct nn_ep *self, int src, struct nn_sock *sock, int eid, struct nn_transport *transport, int bind, const char *addr) { int rc; nn_fsm_init (&self->fsm, nn_ep_handler, nn_ep_shutdown, src, self, &sock->fsm); self->state = NN_EP_STATE_IDLE; self->epbase = NULL; self->sock = sock; self->eid = eid; self->last_errno = 0; nn_list_item_init (&self->item); memcpy (&self->options, &sock->ep_template, sizeof(struct nn_ep_options)); /* Store the textual form of the address. */ nn_assert (strlen (addr) <= NN_SOCKADDR_MAX); strcpy (self->addr, addr); /* Create transport-specific part of the endpoint. */ if (bind) rc = transport->bind ((void*) self, &self->epbase); else rc = transport->connect ((void*) self, &self->epbase); /* Endpoint creation failed. */ if (rc < 0) { nn_list_item_term (&self->item); nn_fsm_term (&self->fsm); return rc; } return 0; }
static void nn_surveyor_term (struct nn_surveyor *self) { nn_msg_term (&self->tosend); nn_timer_term (&self->timer); nn_fsm_term (&self->fsm); nn_xsurveyor_term (&self->xsurveyor); }
void nn_streamhdr_term (struct nn_streamhdr *self) { nn_assert (self->state == NN_STREAMHDR_STATE_IDLE); nn_fsm_event_term (&self->done); nn_timer_term (&self->timer); nn_fsm_term (&self->fsm); }
static void nn_req_term (struct nn_req *self) { nn_timer_term (&self->timer); nn_msg_term (&self->reply); nn_msg_term (&self->request); nn_fsm_term (&self->fsm); nn_xreq_term (&self->xreq); }
void nn_ep_term (struct nn_ep *self) { nn_assert_state (self, NN_EP_STATE_IDLE); self->epbase->vfptr->destroy (self->epbase); nn_list_item_term (&self->item); nn_fsm_term (&self->fsm); }
void nn_req_term (struct nn_req *self) { nn_timer_term (&self->task.timer); nn_task_term (&self->task); nn_msg_term (&self->task.reply); nn_msg_term (&self->task.request); nn_fsm_term (&self->fsm); nn_xreq_term (&self->xreq); }
void nn_timer_term (struct nn_timer *self) { nn_assert_state (self, NN_TIMER_STATE_IDLE); nn_fsm_event_term (&self->done); nn_worker_timer_term (&self->wtimer); nn_worker_task_term (&self->stop_task); nn_worker_task_term (&self->start_task); nn_fsm_term (&self->fsm); }
static void nn_cinproc_destroy (void *self) { struct nn_cinproc *cinproc = self; nn_list_term (&cinproc->sinprocs); nn_fsm_term (&cinproc->fsm); nn_ins_item_term (&cinproc->item); nn_free (cinproc); }
void nn_stcp_term(struct nn_stcp *self) { nn_assert_state(self,NN_STCP_STATE_IDLE); nn_fsm_event_term(&self->done); nn_msg_term(&self->outmsg); nn_msg_term(&self->inmsg); nn_pipebase_term(&self->pipebase); nn_streamhdr_term(&self->streamhdr); nn_fsm_term(&self->fsm); }
void nn_sws_term (struct nn_sws *self) { nn_assert_state (self, NN_SWS_STATE_IDLE); nn_fsm_event_term (&self->done); nn_msg_term (&self->outmsg); nn_msg_array_term (&self->inmsg_array); nn_pipebase_term (&self->pipebase); nn_wshdr_term (&self->wshdr); nn_fsm_term (&self->fsm); }
void nn_sipc_term (struct nn_sipc *self) { nn_assert (self->state == NN_SIPC_STATE_IDLE); nn_fsm_event_term (&self->done); nn_msg_term (&self->outmsg); nn_msg_term (&self->inmsg); nn_pipebase_term (&self->pipebase); nn_streamhdr_term (&self->streamhdr); nn_fsm_term (&self->fsm); }
void nn_aipc_term (struct nn_aipc *self) { nn_assert (self->state == NN_AIPC_STATE_IDLE); nn_list_item_term (&self->item); nn_fsm_event_term (&self->done); nn_fsm_event_term (&self->accepted); nn_sipc_term (&self->sipc); nn_usock_term (&self->usock); nn_fsm_term (&self->fsm); }
void nn_aws_term (struct nn_aws *self) { nn_assert_state (self, NN_AWS_STATE_IDLE); nn_list_item_term (&self->item); nn_fsm_event_term (&self->done); nn_fsm_event_term (&self->accepted); nn_sws_term (&self->sws); nn_usock_term (&self->usock); nn_fsm_term (&self->fsm); }
void nn_atcp_term (struct nn_atcp *self) { nn_assert_state (self, NN_ATCP_STATE_IDLE); nn_list_item_term (&self->item); nn_fsm_event_term (&self->done); nn_fsm_event_term (&self->accepted); nn_stcp_term (&self->stcp); nn_usock_term (&self->usock); nn_fsm_term (&self->fsm); }
void nn_slibfabric_term (struct nn_slibfabric *self) { nn_assert_state (self, NN_SLIBFABRIC_STATE_IDLE); nn_fsm_event_term (&self->done); nn_msg_term (&self->outmsg); nn_msg_term (&self->inmsg); nn_pipebase_term (&self->pipebase); nn_streamhdr_term (&self->streamhdr); nn_fsm_term (&self->fsm); }
static void nn_cipc_destroy (void *self) { struct nn_cipc *cipc = self; nn_sipc_term (&cipc->sipc); nn_backoff_term (&cipc->retry); nn_usock_term (&cipc->usock); nn_fsm_term (&cipc->fsm); nn_free (cipc); }
int nn_sock_term (struct nn_sock *self) { int rc; int i; /* NOTE: nn_sock_stop must have already been called. */ /* Some endpoints may still be alive. Here we are going to wait till they are all closed. This loop is not interruptible, because making it so would leave a partially cleaned up socket, and we don't have a way to defer resource deallocation. */ for (;;) { rc = nn_sem_wait (&self->termsem); if (nn_slow (rc == -EINTR)) continue; errnum_assert (rc == 0, -rc); break; } /* Also, wait for all holds on the socket to be released. */ for (;;) { rc = nn_sem_wait (&self->relesem); if (nn_slow (rc == -EINTR)) continue; errnum_assert (rc == 0, -rc); break; } /* Threads that posted the semaphore(s) can still have the ctx locked for a short while. By simply entering the context and exiting it immediately we can be sure that any such threads have already exited the context. */ nn_ctx_enter (&self->ctx); nn_ctx_leave (&self->ctx); /* At this point, we can be reasonably certain that no other thread has any references to the socket. */ nn_fsm_stopped_noevent (&self->fsm); nn_fsm_term (&self->fsm); nn_sem_term (&self->termsem); nn_list_term (&self->sdeps); nn_list_term (&self->eps); nn_clock_term (&self->clock); nn_ctx_term (&self->ctx); /* Destroy any optsets associated with the socket. */ for (i = 0; i != NN_MAX_TRANSPORT; ++i) if (self->optsets [i]) self->optsets [i]->vfptr->destroy (self->optsets [i]); return 0; }
static void nn_cinproc_destroy (struct nn_epbase *self) { struct nn_cinproc *cinproc; cinproc = nn_cont (self, struct nn_cinproc, item.epbase); nn_sinproc_term (&cinproc->sinproc); nn_fsm_term (&cinproc->fsm); nn_ins_item_term (&cinproc->item); nn_free (cinproc); }
static void nn_binproc_destroy (struct nn_epbase *self) { struct nn_binproc *binproc; binproc = nn_cont (self, struct nn_binproc, item.epbase); nn_list_term (&binproc->sinprocs); nn_fsm_term (&binproc->fsm); nn_ins_item_term (&binproc->item); nn_free (binproc); }
static void nn_cipc_destroy (struct nn_epbase *self) { struct nn_cipc *cipc; cipc = nn_cont (self, struct nn_cipc, epbase); nn_sipc_term (&cipc->sipc); nn_backoff_term (&cipc->retry); nn_usock_term (&cipc->usock); nn_fsm_term (&cipc->fsm); nn_epbase_term (&cipc->epbase); nn_free (cipc); }
static void nn_btcp_destroy (struct nn_epbase *self) { struct nn_btcp *btcp; btcp = nn_cont (self, struct nn_btcp, epbase); nn_assert_state (btcp, NN_BTCP_STATE_IDLE); nn_list_term (&btcp->atcps); nn_assert (btcp->atcp == NULL); nn_usock_term (&btcp->usock); nn_epbase_term (&btcp->epbase); nn_fsm_term (&btcp->fsm); nn_free (btcp); }
static void nn_bws_destroy (struct nn_epbase *self) { struct nn_bws *bws; bws = nn_cont (self, struct nn_bws, epbase); nn_assert_state (bws, NN_BWS_STATE_IDLE); nn_list_term (&bws->awss); nn_assert (bws->aws == NULL); nn_usock_term (&bws->usock); nn_epbase_term (&bws->epbase); nn_fsm_term (&bws->fsm); nn_free (bws); }
static void nn_ctcp_destroy (struct nn_epbase *self) { struct nn_ctcp *ctcp; ctcp = nn_cont (self, struct nn_ctcp, epbase); nn_dns_term (&ctcp->dns); nn_stcp_term (&ctcp->stcp); nn_backoff_term (&ctcp->retry); nn_usock_term (&ctcp->usock); nn_fsm_term (&ctcp->fsm); nn_epbase_term (&ctcp->epbase); nn_free (ctcp); }
static void nn_cws_destroy (void *self) { struct nn_cws *cws = self; nn_chunkref_term (&cws->resource); nn_chunkref_term (&cws->remote_host); nn_chunkref_term (&cws->nic); nn_dns_term (&cws->dns); nn_sws_term (&cws->sws); nn_backoff_term (&cws->retry); nn_usock_term (&cws->usock); nn_fsm_term (&cws->fsm); nn_free (cws); }
static void nn_bipc_destroy (struct nn_epbase *self) { struct nn_bipc *bipc; bipc = nn_cont (self, struct nn_bipc, epbase); nn_assert (bipc->state == NN_BIPC_STATE_IDLE); nn_list_term (&bipc->aipcs); nn_assert (bipc->aipc == NULL); nn_usock_term (&bipc->usock); nn_epbase_term (&bipc->epbase); nn_fsm_term (&bipc->fsm); nn_free (bipc); }
static void nn_cws_destroy (struct nn_epbase *self) { struct nn_cws *cws; cws = nn_cont (self, struct nn_cws, epbase); nn_chunkref_term (&cws->resource); nn_chunkref_term (&cws->remote_host); nn_chunkref_term (&cws->nic); nn_dns_term (&cws->dns); nn_sws_term (&cws->sws); nn_backoff_term (&cws->retry); nn_usock_term (&cws->usock); nn_fsm_term (&cws->fsm); nn_epbase_term (&cws->epbase); nn_free (cws); }
int nn_sock_term (struct nn_sock *self) { int rc; int i; /* Ask the state machine to start closing the socket. */ nn_ctx_enter (&self->ctx); nn_fsm_stop (&self->fsm); nn_ctx_leave (&self->ctx); /* Shutdown process was already started but some endpoints may still alive. Here we are going to wait till they are all closed. */ rc = nn_sem_wait (&self->termsem); if (nn_slow (rc == -EINTR)) return -EINTR; errnum_assert (rc == 0, -rc); /* The thread that posted the semaphore can still have the ctx locked for a short while. By simply entering the context and exiting it immediately we can be sure that the thread in question have already exited the context. */ nn_ctx_enter (&self->ctx); nn_ctx_leave (&self->ctx); /* Deallocate the resources. */ nn_fsm_stopped_noevent (&self->fsm); nn_fsm_term (&self->fsm); nn_sem_term (&self->termsem); nn_list_term (&self->sdeps); nn_list_term (&self->eps); nn_clock_term (&self->clock); nn_ctx_term (&self->ctx); /* Destroy any optsets associated with the socket. */ for (i = 0; i != NN_MAX_TRANSPORT; ++i) if (self->optsets [i]) self->optsets [i]->vfptr->destroy (self->optsets [i]); return 0; }
void nn_usock_term (struct nn_usock *self) { nn_assert_state (self, NN_USOCK_STATE_IDLE); if (self->in.batch) nn_free (self->in.batch); nn_fsm_event_term (&self->event_error); nn_fsm_event_term (&self->event_received); nn_fsm_event_term (&self->event_sent); nn_fsm_event_term (&self->event_established); nn_worker_cancel (self->worker, &self->task_recv); nn_worker_task_term (&self->task_stop); nn_worker_task_term (&self->task_recv); nn_worker_task_term (&self->task_send); nn_worker_task_term (&self->task_accept); nn_worker_task_term (&self->task_connected); nn_worker_task_term (&self->task_connecting); nn_worker_fd_term (&self->wfd); nn_fsm_term (&self->fsm); }