static void nn_global_term (void) { #if defined NN_HAVE_WINDOWS int rc; #endif struct nn_list_item *it; struct nn_transport *tp; /* If there are no sockets remaining, uninitialise the global context. */ nn_assert (self.socks); if (self.nsocks > 0) return; /* Stop the FSM */ nn_ctx_enter (&self.ctx); nn_fsm_stop (&self.fsm); nn_ctx_leave (&self.ctx); /* Shut down the worker threads. */ nn_pool_term (&self.pool); /* Terminate ctx mutex */ nn_ctx_term (&self.ctx); /* Ask all the transport to deallocate their global resources. */ while (!nn_list_empty (&self.transports)) { it = nn_list_begin (&self.transports); tp = nn_cont (it, struct nn_transport, item); if (tp->term) tp->term (); nn_list_erase (&self.transports, it); } /* For now there's nothing to deallocate about socket types, however, let's remove them from the list anyway. */ while (!nn_list_empty (&self.socktypes)) nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes)); /* Final deallocation of the nn_global object itself. */ nn_list_term (&self.socktypes); nn_list_term (&self.transports); nn_free (self.socks); /* This marks the global state as uninitialised. */ self.socks = NULL; /* Shut down the memory allocation subsystem. */ nn_alloc_term (); /* On Windows, uninitialise the socket library. */ #if defined NN_HAVE_WINDOWS rc = WSACleanup (); nn_assert (rc == 0); #endif }
void nn_priolist_advance (struct nn_priolist *self, int release) { struct nn_priolist_slot *slot; struct nn_list_item *it; nn_assert (self->current > 0); slot = &self->slots [self->current - 1]; /* Move slot's current pointer to the next pipe. */ if (release) it = nn_list_erase (&slot->pipes, &slot->current->item); else it = nn_list_next (&slot->pipes, &slot->current->item); if (!it) it = nn_list_begin (&slot->pipes); slot->current = nn_cont (it, struct nn_priolist_data, item); /* If there are no more pipes in this slot, find a non-empty slot with lower priority. */ while (nn_list_empty (&slot->pipes)) { ++self->current; if (self->current > NN_PRIOLIST_SLOTS) { self->current = -1; return; } slot = &self->slots [self->current - 1]; } }
void nn_priolist_activate (struct nn_priolist *self, struct nn_pipe *pipe, struct nn_priolist_data *data) { struct nn_priolist_slot *slot; slot = &self->slots [data->priority - 1]; /* If there are already some elements in this slot, current pipe is not going to change. */ if (!nn_list_empty (&slot->pipes)) { nn_list_insert (&slot->pipes, &data->item, nn_list_end (&slot->pipes)); return; } /* Add first pipe into the slot. If there are no pipes in priolist at all this slot becomes current. */ nn_list_insert (&slot->pipes, &data->item, nn_list_end (&slot->pipes)); slot->current = data; if (self->current == -1) { self->current = data->priority; return; } /* If the current priority is lower than the one of the newly activated pipe, this slot becomes current. */ if (self->current > data->priority) { self->current = data->priority; return; } /* Current doesn't change otherwise. */ }
static void nn_ctx_term (void) { #if defined NN_HAVE_WINDOWS int rc; #endif struct nn_list_item *it; /* If there are no sockets remaining, uninitialise the global context. */ nn_assert (self.socks); if (self.nsocks > 0) return; #if defined NN_LATENCY_MONITOR nn_latmon_term (); #endif /* Ask all the transport to deallocate their global resources. */ while (!nn_list_empty (&self.transports)) { it = nn_list_begin (&self.transports); nn_cont (it, struct nn_transport, list)->term (); nn_list_erase (&self.transports, it); } /* For now there's nothing to deallocate about socket types, however, let's remove them from the list anyway. */ while (!nn_list_empty (&self.socktypes)) nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes)); /* Final deallocation of the nn_ctx object itself. */ nn_list_term (&self.socktypes); nn_list_term (&self.transports); nn_free (self.socks); /* This marks the global state as uninitialised. */ self.socks = NULL; /* Shut down the memory allocation subsystem. */ nn_alloc_term (); /* On Windows, uninitialise the socket library. */ #if defined NN_HAVE_WINDOWS rc = WSACleanup (); nn_assert (rc == 0); #endif }
static void nn_btcp_shutdown (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_btcp *btcp; struct nn_list_item *it; struct nn_atcp *atcp; btcp = nn_cont (self, struct nn_btcp, fsm); if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { nn_atcp_stop (btcp->atcp); btcp->state = NN_BTCP_STATE_STOPPING_ATCP; } if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_ATCP)) { if (!nn_atcp_isidle (btcp->atcp)) return; nn_atcp_term (btcp->atcp); nn_free (btcp->atcp); btcp->atcp = NULL; nn_usock_stop (&btcp->usock); btcp->state = NN_BTCP_STATE_STOPPING_USOCK; } if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_USOCK)) { if (!nn_usock_isidle (&btcp->usock)) return; for (it = nn_list_begin (&btcp->atcps); it != nn_list_end (&btcp->atcps); it = nn_list_next (&btcp->atcps, it)) { atcp = nn_cont (it, struct nn_atcp, item); nn_atcp_stop (atcp); } btcp->state = NN_BTCP_STATE_STOPPING_ATCPS; goto atcps_stopping; } if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_ATCPS)) { nn_assert (src == NN_BTCP_SRC_ATCP && type == NN_ATCP_STOPPED); atcp = (struct nn_atcp *) srcptr; nn_list_erase (&btcp->atcps, &atcp->item); nn_atcp_term (atcp); nn_free (atcp); /* If there are no more atcp state machines, we can stop the whole btcp object. */ atcps_stopping: if (nn_list_empty (&btcp->atcps)) { btcp->state = NN_BTCP_STATE_IDLE; nn_fsm_stopped_noevent (&btcp->fsm); nn_epbase_stopped (&btcp->epbase); return; } return; } nn_fsm_bad_action(btcp->state, src, type); }
static void nn_bipc_shutdown (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_bipc *bipc; struct nn_list_item *it; struct nn_aipc *aipc; bipc = nn_cont (self, struct nn_bipc, fsm); if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { nn_aipc_stop (bipc->aipc); bipc->state = NN_BIPC_STATE_STOPPING_AIPC; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPC)) { if (!nn_aipc_isidle (bipc->aipc)) return; nn_aipc_term (bipc->aipc); nn_free (bipc->aipc); bipc->aipc = NULL; nn_usock_stop (&bipc->usock); bipc->state = NN_BIPC_STATE_STOPPING_USOCK; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_USOCK)) { if (!nn_usock_isidle (&bipc->usock)) return; for (it = nn_list_begin (&bipc->aipcs); it != nn_list_end (&bipc->aipcs); it = nn_list_next (&bipc->aipcs, it)) { aipc = nn_cont (it, struct nn_aipc, item); nn_aipc_stop (aipc); } bipc->state = NN_BIPC_STATE_STOPPING_AIPCS; goto aipcs_stopping; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPCS)) { nn_assert (src == NN_BIPC_SRC_AIPC && type == NN_AIPC_STOPPED); aipc = (struct nn_aipc *) srcptr; nn_list_erase (&bipc->aipcs, &aipc->item); nn_aipc_term (aipc); nn_free (aipc); /* If there are no more aipc state machines, we can stop the whole bipc object. */ aipcs_stopping: if (nn_list_empty (&bipc->aipcs)) { bipc->state = NN_BIPC_STATE_IDLE; nn_fsm_stopped_noevent (&bipc->fsm); nn_epbase_stopped (&bipc->epbase); return; } return; } nn_fsm_bad_state(bipc->state, src, type); }
/* Deallocate an entire message array. */ static void nn_msg_array_term (struct nn_list *msg_array) { struct nn_list_item *it; struct msg_chunk *ch; while (!nn_list_empty (msg_array)) { it = nn_list_begin (msg_array); ch = nn_cont (it, struct msg_chunk, item); nn_msg_chunk_term (ch, msg_array); } nn_list_term (msg_array); }
/* Start receiving new frame. */ static int nn_sws_recv_hdr (struct nn_sws *self) { if (!self->continuing) { nn_assert (nn_list_empty (&self->inmsg_array)); self->inmsg_current_chunk_buf = NULL; self->inmsg_chunks = 0; self->inmsg_current_chunk_len = 0; self->inmsg_total_size = 0; } memset (self->inmsg_control, 0, sizeof (self->inmsg_control)); memset (self->inhdr, 0, NN_SWS_FRAME_MAX_HDR_LEN); self->instate = NN_SWS_INSTATE_RECV_HDR; nn_usock_recv (self->usock, self->inhdr, NN_SWS_FRAME_SIZE_INITIAL, NULL); return 0; }
static void nn_bipc_handler (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_bipc *bipc; struct nn_list_item *it; struct nn_aipc *aipc; bipc = nn_cont (self, struct nn_bipc, fsm); /******************************************************************************/ /* STOP procedure. */ /******************************************************************************/ if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { nn_aipc_stop (bipc->aipc); bipc->state = NN_BIPC_STATE_STOPPING_AIPC; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPC)) { if (!nn_aipc_isidle (bipc->aipc)) return; nn_aipc_term (bipc->aipc); nn_free (bipc->aipc); bipc->aipc = NULL; nn_usock_stop (&bipc->usock); bipc->state = NN_BIPC_STATE_STOPPING_USOCK; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_USOCK)) { if (!nn_usock_isidle (&bipc->usock)) return; for (it = nn_list_begin (&bipc->aipcs); it != nn_list_end (&bipc->aipcs); it = nn_list_next (&bipc->aipcs, it)) { aipc = nn_cont (it, struct nn_aipc, item); nn_aipc_stop (aipc); } bipc->state = NN_BIPC_STATE_STOPPING_AIPCS; goto aipcs_stopping; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPCS)) { nn_assert (src == NN_BIPC_SRC_AIPC && type == NN_AIPC_STOPPED); aipc = (struct nn_aipc *) srcptr; nn_list_erase (&bipc->aipcs, &aipc->item); nn_aipc_term (aipc); nn_free (aipc); /* If there are no more aipc state machines, we can stop the whole bipc object. */ aipcs_stopping: if (nn_list_empty (&bipc->aipcs)) { bipc->state = NN_BIPC_STATE_IDLE; nn_fsm_stopped_noevent (&bipc->fsm); nn_epbase_stopped (&bipc->epbase); return; } return; } switch (bipc->state) { /******************************************************************************/ /* IDLE state. */ /******************************************************************************/ case NN_BIPC_STATE_IDLE: switch (src) { case NN_FSM_ACTION: switch (type) { case NN_FSM_START: nn_bipc_start_listening (bipc); nn_bipc_start_accepting (bipc); bipc->state = NN_BIPC_STATE_ACTIVE; return; default: nn_fsm_bad_action (bipc->state, src, type); } default: nn_fsm_bad_source (bipc->state, src, type); } /******************************************************************************/ /* ACTIVE state. */ /* The execution is yielded to the aipc state machine in this state. */ /******************************************************************************/ case NN_BIPC_STATE_ACTIVE: if (srcptr == bipc->aipc) { switch (type) { case NN_AIPC_ACCEPTED: /* Move the newly created connection to the list of existing connections. */ nn_list_insert (&bipc->aipcs, &bipc->aipc->item, nn_list_end (&bipc->aipcs)); bipc->aipc = NULL; /* Start waiting for a new incoming connection. */ nn_bipc_start_accepting (bipc); return; default: nn_fsm_bad_action (bipc->state, src, type); } } /* For all remaining events we'll assume they are coming from one of remaining child aipc objects. */ nn_assert (src == NN_BIPC_SRC_AIPC); aipc = (struct nn_aipc*) srcptr; switch (type) { case NN_AIPC_ERROR: nn_aipc_stop (aipc); return; case NN_AIPC_STOPPED: nn_list_erase (&bipc->aipcs, &aipc->item); nn_aipc_term (aipc); nn_free (aipc); return; default: nn_fsm_bad_action (bipc->state, src, type); } /******************************************************************************/ /* Invalid state. */ /******************************************************************************/ default: nn_fsm_bad_state (bipc->state, src, type); } }
static int nn_sws_recv (struct nn_pipebase *self, struct nn_msg *msg) { struct nn_sws *sws; struct nn_iovec iov [1]; struct nn_list_item *it; struct msg_chunk *ch; int pos; size_t len; sws = nn_cont (self, struct nn_sws, pipebase); nn_assert_state (sws, NN_SWS_STATE_ACTIVE); switch (sws->instate) { case NN_SWS_INSTATE_FAILING: /* Prevent further send/recv operations on this connection. */ nn_pipebase_stop (self); sws->instate = NN_SWS_INSTATE_CLOSED; /* Inform user this connection has been failed. */ nn_msg_init (msg, 1); *(uint8_t *) nn_chunkref_data (&msg->body) = 0x7f | NN_SWS_FRAME_BITMASK_FIN; iov [0].iov_base = sws->fail_msg; iov [0].iov_len = sws->fail_msg_len; /* TODO: Consider queueing and unconditionally sending close handshake rather than skipping it. */ /* RFC 6455 7.1.7 - try to send helpful Closing Handshake only if the socket is not currently sending. If it's still busy sending, forcibly close this connection, since it's not readily deterministic how much time that action could take to complete, or if the peer is even healthy enough to receive. Rationale: try to be nice, but be mindful of self-preservation! */ if (sws->outstate == NN_SWS_OUTSTATE_IDLE) { nn_usock_send (sws->usock, iov, 1); sws->outstate = NN_SWS_OUTSTATE_SENDING; sws->state = NN_SWS_STATE_CLOSING_CONNECTION; } else { sws->state = NN_SWS_STATE_DONE; nn_fsm_raise (&sws->fsm, &sws->done, NN_SWS_RETURN_CLOSE_HANDSHAKE); } return 0; case NN_SWS_INSTATE_RECVD_CHUNKED: /* This library should not deliver fragmented messages to the application, so it's expected that this is the final frame. */ nn_assert (sws->is_final_frame); len = sws->inmsg_total_size; nn_msg_init (msg, len); /* Reassemble incoming message scatter array. */ while (!nn_list_empty (&sws->inmsg_array)) { it = nn_list_begin (&sws->inmsg_array); ch = nn_cont (it, struct msg_chunk, item); memcpy (((uint8_t*) nn_chunkref_data (&msg->body)) + pos, nn_chunkref_data (&ch->chunk), nn_chunkref_size (&ch->chunk)); pos += nn_chunkref_size (&ch->chunk); nn_msg_chunk_term (ch, &sws->inmsg_array); } nn_assert (pos == len); nn_assert (nn_list_empty (&sws->inmsg_array)); /* No longer collecting scatter array of incoming msg chunks. */ sws->continuing = 0; nn_sws_recv_hdr (sws); return 0; case NN_SWS_INSTATE_RECVD_CONTROL: /* This library should not deliver fragmented messages to the user, so it's expected that this is the final frame. */ nn_assert (sws->is_final_frame); len = sws->inmsg_current_chunk_len + sizeof (sws->inmsg_hdr); nn_msg_init (msg, len); /* Relay opcode, RSV and FIN bits to the user in order to interpret payload. */ memcpy (nn_chunkref_data (&msg->body), &sws->inhdr, sizeof (sws->inmsg_hdr)); pos = sizeof (sws->inmsg_hdr); memcpy (((uint8_t*) nn_chunkref_data (&msg->body)) + pos, sws->inmsg_control, sws->inmsg_current_chunk_len); /* If a closing handshake was just transferred to the application, discontinue continual, async receives. */ if (sws->opcode == NN_WS_OPCODE_CLOSE) { sws->instate = NN_SWS_INSTATE_CLOSED; } else { nn_sws_recv_hdr (sws); } return 0; default: /* Unexpected state. */ nn_assert (0); return 0; } }
static void nn_bws_shutdown (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_bws *bws; struct nn_list_item *it; struct nn_aws *aws; bws = nn_cont (self, struct nn_bws, fsm); if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { if (bws->aws) { nn_aws_stop (bws->aws); bws->state = NN_BWS_STATE_STOPPING_AWS; } else { bws->state = NN_BWS_STATE_STOPPING_USOCK; } } if (nn_slow (bws->state == NN_BWS_STATE_STOPPING_AWS)) { if (!nn_aws_isidle (bws->aws)) return; nn_aws_term (bws->aws); nn_free (bws->aws); bws->aws = NULL; nn_usock_stop (&bws->usock); bws->state = NN_BWS_STATE_STOPPING_USOCK; } if (nn_slow (bws->state == NN_BWS_STATE_STOPPING_USOCK)) { if (!nn_usock_isidle (&bws->usock)) return; for (it = nn_list_begin (&bws->awss); it != nn_list_end (&bws->awss); it = nn_list_next (&bws->awss, it)) { aws = nn_cont (it, struct nn_aws, item); nn_aws_stop (aws); } bws->state = NN_BWS_STATE_STOPPING_AWSS; goto awss_stopping; } if (nn_slow (bws->state == NN_BWS_STATE_STOPPING_AWSS)) { nn_assert (src == NN_BWS_SRC_AWS && type == NN_AWS_STOPPED); aws = (struct nn_aws *) srcptr; nn_list_erase (&bws->awss, &aws->item); nn_aws_term (aws); nn_free (aws); /* If there are no more aws state machines, we can stop the whole bws object. */ awss_stopping: if (nn_list_empty (&bws->awss)) { bws->state = NN_BWS_STATE_IDLE; nn_fsm_stopped_noevent (&bws->fsm); nn_epbase_stopped (&bws->epbase); return; } return; } nn_fsm_bad_action (bws->state, src, type); }