static int nn_inproc_ctx_connect (const char *addr, void *hint, struct nn_epbase **epbase) { int rc; struct nn_list_item *it; struct nn_inprocc *inprocc; struct nn_inprocb *inprocb; struct nn_msgpipe *pipe; /* Insert the entry into the endpoint repository. */ inprocc = nn_alloc (sizeof (struct nn_inprocc), "inprocc"); alloc_assert (inprocc); rc = nn_inprocc_init (inprocc, addr, hint); if (nn_slow (rc != 0)) return rc; nn_list_insert (&self.connected, &inprocc->list, nn_list_end (&self.connected)); /* During this process a pipe may be created. */ for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound); it = nn_list_next (&self.bound, it)) { inprocb = nn_cont (it, struct nn_inprocb, list); if (strcmp (addr, nn_inprocb_getaddr (inprocb)) == 0) { pipe = nn_alloc (sizeof (struct nn_msgpipe), "msgpipe"); alloc_assert (pipe); nn_msgpipe_init (pipe, inprocb, inprocc); break; } } nn_assert (epbase); *epbase = &inprocc->epbase; return 0; }
void nn_priolist_activate (struct nn_priolist *self, struct nn_pipe *pipe, struct nn_priolist_data *data) { struct nn_priolist_slot *slot; slot = &self->slots [data->priority - 1]; /* If there are already some elements in this slot, current pipe is not going to change. */ if (!nn_list_empty (&slot->pipes)) { nn_list_insert (&slot->pipes, &data->item, nn_list_end (&slot->pipes)); return; } /* Add first pipe into the slot. If there are no pipes in priolist at all this slot becomes current. */ nn_list_insert (&slot->pipes, &data->item, nn_list_end (&slot->pipes)); slot->current = data; if (self->current == -1) { self->current = data->priority; return; } /* If the current priority is lower than the one of the newly activated pipe, this slot becomes current. */ if (self->current > data->priority) { self->current = data->priority; return; } /* Current doesn't change otherwise. */ }
void nn_ins_connect (struct nn_ins_item *item, nn_ins_fn fn) { struct nn_list_item *it; struct nn_ins_item *bitem; nn_mutex_lock (&self.sync); /* Insert the entry into the endpoint repository. */ nn_list_insert (&self.connected, &item->item, nn_list_end (&self.connected)); /* During this process a pipe may be created. */ for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound); it = nn_list_next (&self.bound, it)) { bitem = nn_cont (it, struct nn_ins_item, item); if (strncmp (nn_epbase_getaddr (&item->epbase), nn_epbase_getaddr (&bitem->epbase), NN_SOCKADDR_MAX) == 0) { /* Check whether the two sockets are compatible. */ if (!nn_epbase_ispeer (&item->epbase, bitem->protocol)) break; /* Call back to cinproc to create actual connection. */ fn (item, bitem); break; } } nn_mutex_unlock (&self.sync); }
static int nn_inproc_bind (const char *addr, void *hint, struct nn_epbase **epbase) { struct nn_list_item *it; struct nn_binproc *binproc; struct nn_cinproc *cinproc; nn_mutex_lock (&self.sync); /* Check whether the endpoint isn't already bound. */ /* TODO: This is an O(n) algorithm! */ for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound); it = nn_list_next (&self.bound, it)) { binproc = nn_cont (it, struct nn_binproc, item); if (strncmp (addr, nn_binproc_getaddr (binproc), NN_SOCKADDR_MAX) == 0) { nn_mutex_unlock (&self.sync); return -EADDRINUSE; } } /* Insert the entry into the endpoint repository. */ binproc = nn_binproc_create (hint); nn_list_insert (&self.bound, &binproc->item, nn_list_end (&self.bound)); /* During this process new pipes may be created. */ for (it = nn_list_begin (&self.connected); it != nn_list_end (&self.connected); it = nn_list_next (&self.connected, it)) { cinproc = nn_cont (it, struct nn_cinproc, item); if (strncmp (addr, nn_cinproc_getaddr (cinproc), NN_SOCKADDR_MAX) == 0) { /* Check whether the two sockets are compatible. */ if (!nn_epbase_ispeer (&binproc->epbase, cinproc->protocol)) continue; nn_assert (cinproc->connects == 0); cinproc->connects = 1; nn_binproc_connect (binproc, cinproc); } } nn_assert (epbase); *epbase = &binproc->epbase; nn_mutex_unlock (&self.sync); return 0; }
static void nn_global_add_transport (struct nn_transport *transport) { if (transport->init) transport->init (); nn_list_insert (&self.transports, &transport->item, nn_list_end (&self.transports)); }
static void nn_ctx_add_transport (struct nn_transport *transport) { transport->init (); nn_list_insert (&self.transports, &transport->list, nn_list_end (&self.transports)); }
static void nn_cinproc_shutdown (struct nn_fsm *self, int src, int type, NN_UNUSED void *srcptr) { struct nn_cinproc *cinproc; struct nn_sinproc *sinproc; struct nn_list_item *it; cinproc = nn_cont (self, struct nn_cinproc, fsm); if (src == NN_FSM_ACTION && type == NN_FSM_STOP) { /* First, unregister the endpoint from the global repository of inproc endpoints. This way, new connections cannot be created anymore. */ nn_ins_disconnect (&cinproc->item); /* Stop the existing connection. */ for (it = nn_list_begin (&cinproc->sinprocs); it != nn_list_end (&cinproc->sinprocs); it = nn_list_next (&cinproc->sinprocs, it)) { sinproc = nn_cont (it, struct nn_sinproc, item); nn_sinproc_stop (sinproc); } cinproc->state = NN_CINPROC_STATE_STOPPING; goto finish; }
int nn_sock_add_ep (struct nn_sock *self, struct nn_transport *transport, int bind, const char *addr) { int rc; struct nn_ep *ep; int eid; nn_ctx_enter (&self->ctx); /* Instantiate the endpoint. */ ep = nn_alloc (sizeof (struct nn_ep), "endpoint"); rc = nn_ep_init (ep, NN_SOCK_SRC_EP, self, self->eid, transport, bind, addr); if (nn_slow (rc < 0)) { nn_free (ep); nn_ctx_leave (&self->ctx); return rc; } nn_ep_start (ep); /* Increase the endpoint ID for the next endpoint. */ eid = self->eid; ++self->eid; /* Add it to the list of active endpoints. */ nn_list_insert (&self->eps, &ep->item, nn_list_end (&self->eps)); nn_ctx_leave (&self->ctx); return eid; }
static void nn_binproc_handler (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_binproc *binproc; struct nn_list_item *it; struct nn_sinproc *sinproc; struct nn_sinproc *peer; binproc = nn_cont (self, struct nn_binproc, fsm); /******************************************************************************/ /* STOP procedure. */ /******************************************************************************/ if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { /* First, unregister the endpoint from the global repository of inproc endpoints. This way, new connections cannot be created anymore. */ nn_inproc_unbind (binproc); /* Stop the existing connections. */ for (it = nn_list_begin (&binproc->sinprocs); it != nn_list_end (&binproc->sinprocs); it = nn_list_next (&binproc->sinprocs, it)) { sinproc = nn_cont (it, struct nn_sinproc, item); nn_sinproc_stop (sinproc); } binproc->state = NN_BINPROC_STATE_STOPPING; goto finish; }
static void nn_cinproc_connect (struct nn_ins_item *self, struct nn_ins_item *peer) { struct nn_cinproc *cinproc; struct nn_binproc *binproc; struct nn_sinproc *sinproc; cinproc = nn_cont (self, struct nn_cinproc, item); binproc = nn_cont (peer, struct nn_binproc, item); nn_assert_state (cinproc, NN_CINPROC_STATE_ACTIVE); sinproc = nn_alloc (sizeof (struct nn_sinproc), "sinproc"); alloc_assert (sinproc); nn_sinproc_init (sinproc, NN_CINPROC_SRC_SINPROC, cinproc->item.ep, &cinproc->fsm); nn_list_insert (&cinproc->sinprocs, &sinproc->item, nn_list_end (&cinproc->sinprocs)); nn_sinproc_connect (sinproc, &binproc->fsm); nn_ep_stat_increment (cinproc->item.ep, NN_STAT_INPROGRESS_CONNECTIONS, -1); nn_ep_stat_increment (cinproc->item.ep, NN_STAT_ESTABLISHED_CONNECTIONS, 1); }
int nn_ins_bind (struct nn_ins_item *item, nn_ins_fn fn) { struct nn_list_item *it; struct nn_ins_item *bitem; struct nn_ins_item *citem; nn_mutex_lock (&self.sync); /* Check whether the endpoint isn't already bound. */ /* TODO: This is an O(n) algorithm! */ for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound); it = nn_list_next (&self.bound, it)) { bitem = nn_cont (it, struct nn_ins_item, item); if (strncmp (nn_ep_getaddr(bitem->ep), nn_ep_getaddr(item->ep), NN_SOCKADDR_MAX) == 0) { nn_mutex_unlock (&self.sync); return -EADDRINUSE; } } /* Insert the entry into the endpoint repository. */ nn_list_insert (&self.bound, &item->item, nn_list_end (&self.bound)); /* During this process new pipes may be created. */ for (it = nn_list_begin (&self.connected); it != nn_list_end (&self.connected); it = nn_list_next (&self.connected, it)) { citem = nn_cont (it, struct nn_ins_item, item); if (strncmp (nn_ep_getaddr(item->ep), nn_ep_getaddr(citem->ep), NN_SOCKADDR_MAX) == 0) { /* Check whether the two sockets are compatible. */ if (!nn_ep_ispeer_ep (item->ep, citem->ep)) continue; fn (item, citem); } } nn_mutex_unlock (&self.sync); return 0; }
int ftw_publisher_construct(struct ftw_socket_callsite **callsite, const char *addr, int linger, struct ftw_socket **sock) { struct ftw_socket *inst; int rcs; int rco; int rcb; /* Preconditions expected of LabVIEW. */ ftw_assert(*callsite && addr); nn_mutex_lock(&(*callsite)->sync); rcs = nn_socket(AF_SP, NN_PUB); /* Socket creation failure? */ if (rcs < 0) { *sock = NULL; nn_mutex_unlock(&(*callsite)->sync); return rcs; } rco = nn_setsockopt(rcs, NN_SOL_SOCKET, NN_LINGER, &linger, sizeof(linger)); if (rco < 0) { *sock = NULL; nn_mutex_unlock(&(*callsite)->sync); return rco; } rcb = nn_bind(rcs, addr); /* Endpoint creation failure? */ if (rcb < 0) { nn_close(rcs); *sock = NULL; nn_mutex_unlock(&(*callsite)->sync); return rcb; } inst = ftw_malloc(sizeof(struct ftw_socket)); ftw_assert(inst); memset(inst, 0, sizeof(*inst)); inst->id = rcs; inst->callsite = *callsite; nn_list_item_init(&inst->item); nn_list_insert(&(*callsite)->active_sockets, &inst->item, nn_list_end(&(*callsite)->active_sockets)); *sock = inst; (*callsite)->lifetime_sockets++; nn_mutex_unlock(&(*callsite)->sync); return 0; }
static void nn_btcp_shutdown (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_btcp *btcp; struct nn_list_item *it; struct nn_atcp *atcp; btcp = nn_cont (self, struct nn_btcp, fsm); if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { nn_atcp_stop (btcp->atcp); btcp->state = NN_BTCP_STATE_STOPPING_ATCP; } if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_ATCP)) { if (!nn_atcp_isidle (btcp->atcp)) return; nn_atcp_term (btcp->atcp); nn_free (btcp->atcp); btcp->atcp = NULL; nn_usock_stop (&btcp->usock); btcp->state = NN_BTCP_STATE_STOPPING_USOCK; } if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_USOCK)) { if (!nn_usock_isidle (&btcp->usock)) return; for (it = nn_list_begin (&btcp->atcps); it != nn_list_end (&btcp->atcps); it = nn_list_next (&btcp->atcps, it)) { atcp = nn_cont (it, struct nn_atcp, item); nn_atcp_stop (atcp); } btcp->state = NN_BTCP_STATE_STOPPING_ATCPS; goto atcps_stopping; } if (nn_slow (btcp->state == NN_BTCP_STATE_STOPPING_ATCPS)) { nn_assert (src == NN_BTCP_SRC_ATCP && type == NN_ATCP_STOPPED); atcp = (struct nn_atcp *) srcptr; nn_list_erase (&btcp->atcps, &atcp->item); nn_atcp_term (atcp); nn_free (atcp); /* If there are no more atcp state machines, we can stop the whole btcp object. */ atcps_stopping: if (nn_list_empty (&btcp->atcps)) { btcp->state = NN_BTCP_STATE_IDLE; nn_fsm_stopped_noevent (&btcp->fsm); nn_epbase_stopped (&btcp->epbase); return; } return; } nn_fsm_bad_action(btcp->state, src, type); }
static void nn_bipc_shutdown (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_bipc *bipc; struct nn_list_item *it; struct nn_aipc *aipc; bipc = nn_cont (self, struct nn_bipc, fsm); if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { nn_aipc_stop (bipc->aipc); bipc->state = NN_BIPC_STATE_STOPPING_AIPC; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPC)) { if (!nn_aipc_isidle (bipc->aipc)) return; nn_aipc_term (bipc->aipc); nn_free (bipc->aipc); bipc->aipc = NULL; nn_usock_stop (&bipc->usock); bipc->state = NN_BIPC_STATE_STOPPING_USOCK; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_USOCK)) { if (!nn_usock_isidle (&bipc->usock)) return; for (it = nn_list_begin (&bipc->aipcs); it != nn_list_end (&bipc->aipcs); it = nn_list_next (&bipc->aipcs, it)) { aipc = nn_cont (it, struct nn_aipc, item); nn_aipc_stop (aipc); } bipc->state = NN_BIPC_STATE_STOPPING_AIPCS; goto aipcs_stopping; } if (nn_slow (bipc->state == NN_BIPC_STATE_STOPPING_AIPCS)) { nn_assert (src == NN_BIPC_SRC_AIPC && type == NN_AIPC_STOPPED); aipc = (struct nn_aipc *) srcptr; nn_list_erase (&bipc->aipcs, &aipc->item); nn_aipc_term (aipc); nn_free (aipc); /* If there are no more aipc state machines, we can stop the whole bipc object. */ aipcs_stopping: if (nn_list_empty (&bipc->aipcs)) { bipc->state = NN_BIPC_STATE_IDLE; nn_fsm_stopped_noevent (&bipc->fsm); nn_epbase_stopped (&bipc->epbase); return; } return; } nn_fsm_bad_state(bipc->state, src, type); }
static int nn_inproc_ctx_bind (const char *addr, void *hint, struct nn_epbase **epbase) { int rc; struct nn_list_item *it; struct nn_inprocb *inprocb; struct nn_inprocc *inprocc; struct nn_msgpipe *pipe; /* Check whether the endpoint isn't already bound. */ /* TODO: This is an O(n) algorithm! */ for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound); it = nn_list_next (&self.bound, it)) { inprocb = nn_cont (it, struct nn_inprocb, list); if (strncmp (addr, nn_inprocb_getaddr (inprocb), NN_SOCKADDR_MAX) == 0) return -EADDRINUSE; } /* Insert the entry into the endpoint repository. */ inprocb = nn_alloc (sizeof (struct nn_inprocb), "inprocb"); alloc_assert (inprocb); rc = nn_inprocb_init (inprocb, addr, hint); if (nn_slow (rc != 0)) return rc; nn_list_insert (&self.bound, &inprocb->list, nn_list_end (&self.bound)); /* During this process new pipes may be created. */ for (it = nn_list_begin (&self.connected); it != nn_list_end (&self.connected); it = nn_list_next (&self.connected, it)) { inprocc = nn_cont (it, struct nn_inprocc, list); if (strncmp (addr, nn_inprocc_getaddr (inprocc), NN_SOCKADDR_MAX) == 0) { pipe = nn_alloc (sizeof (struct nn_msgpipe), "msgpipe"); alloc_assert (pipe); nn_msgpipe_init (pipe, inprocb, inprocc); } } nn_assert (epbase); *epbase = &inprocb->epbase; return 0; }
static int nn_ctx_create_ep (int fd, const char *addr, int bind) { int rc; const char *proto; const char *delim; size_t protosz; struct nn_transport *tp; struct nn_list_item *it; /* Check whether address is valid. */ if (!addr) return -EINVAL; if (strlen (addr) >= NN_SOCKADDR_MAX) return -ENAMETOOLONG; /* Separate the protocol and the actual address. */ proto = addr; delim = strchr (addr, ':'); if (!delim) return -EINVAL; if (delim [1] != '/' || delim [2] != '/') return -EINVAL; protosz = delim - addr; addr += protosz + 3; /* Find the specified protocol. */ tp = NULL; nn_glock_lock (); for (it = nn_list_begin (&self.transports); it != nn_list_end (&self.transports); it = nn_list_next (&self.transports, it)) { tp = nn_cont (it, struct nn_transport, list); if (strlen (tp->name) == protosz && memcmp (tp->name, proto, protosz) == 0) break; tp = NULL; } /* The protocol specified doesn't match any known protocol. */ if (!tp) { nn_glock_unlock (); return -EPROTONOSUPPORT; } /* Ask socket to create the endpoint. Pass it the class factory function. */ rc = nn_sock_add_ep (self.socks [fd], addr, bind ? tp->bind : tp->connect); nn_glock_unlock (); return rc; }
static void nn_sock_shutdown (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_sock *sock; struct nn_list_item *it; struct nn_ep *ep; sock = nn_cont (self, struct nn_sock, fsm); if (nn_slow (src == NN_FSM_ACTION && type == NN_FSM_STOP)) { nn_assert (sock->state == NN_SOCK_STATE_ACTIVE || sock->state == NN_SOCK_STATE_ZOMBIE); /* Close sndfd and rcvfd. This should make any current select/poll using SNDFD and/or RCVFD exit. */ if (!(sock->socktype->flags & NN_SOCKTYPE_FLAG_NORECV)) { nn_efd_term (&sock->rcvfd); memset (&sock->rcvfd, 0xcd, sizeof (sock->rcvfd)); } if (!(sock->socktype->flags & NN_SOCKTYPE_FLAG_NOSEND)) { nn_efd_term (&sock->sndfd); memset (&sock->sndfd, 0xcd, sizeof (sock->sndfd)); } /* Ask all the associated endpoints to stop. */ it = nn_list_begin (&sock->eps); while (it != nn_list_end (&sock->eps)) { ep = nn_cont (it, struct nn_ep, item); it = nn_list_next (&sock->eps, it); nn_list_erase (&sock->eps, &ep->item); nn_list_insert (&sock->sdeps, &ep->item, nn_list_end (&sock->sdeps)); nn_ep_stop (ep); } sock->state = NN_SOCK_STATE_STOPPING_EPS; goto finish2; }
static int nn_inproc_connect (const char *addr, void *hint, struct nn_epbase **epbase) { struct nn_list_item *it; struct nn_cinproc *cinproc; struct nn_binproc *binproc; nn_mutex_lock (&self.sync); /* Insert the entry into the endpoint repository. */ cinproc = nn_cinproc_create (hint); nn_list_insert (&self.connected, &cinproc->item, nn_list_end (&self.connected)); /* During this process a pipe may be created. */ for (it = nn_list_begin (&self.bound); it != nn_list_end (&self.bound); it = nn_list_next (&self.bound, it)) { binproc = nn_cont (it, struct nn_binproc, item); if (strncmp (addr, nn_binproc_getaddr (binproc), NN_SOCKADDR_MAX) == 0) { /* Check whether the two sockets are compatible. */ if (!nn_epbase_ispeer (&cinproc->epbase, binproc->protocol)) break; ++binproc->connects; nn_cinproc_connect (cinproc, binproc); break; } } nn_assert (epbase); *epbase = &cinproc->epbase; nn_mutex_unlock (&self.sync); return 0; }
void nn_binproc_connect (struct nn_binproc *self, struct nn_cinproc *peer) { struct nn_sinproc *sinproc; nn_assert (self->state == NN_BINPROC_STATE_ACTIVE); sinproc = nn_alloc (sizeof (struct nn_sinproc), "sinproc"); alloc_assert (sinproc); nn_sinproc_init (sinproc, NN_BINPROC_SRC_SINPROC, &self->epbase, &self->fsm); nn_list_insert (&self->sinprocs, &sinproc->item, nn_list_end (&self->sinprocs)); nn_sinproc_connect (sinproc, &peer->fsm); }
/* Allocate a new message chunk, append it to message array, and return pointer to its buffer. */ static void *nn_msg_chunk_new (size_t size, struct nn_list *msg_array) { struct msg_chunk *self; self = nn_alloc (sizeof (struct msg_chunk), "msg_chunk"); alloc_assert (self); nn_chunkref_init (&self->chunk, size); nn_list_item_init (&self->item); nn_list_insert (msg_array, &self->item, nn_list_end (msg_array)); return nn_chunkref_data (&self->chunk); }
int nn_sock_rm_ep (struct nn_sock *self, int eid) { struct nn_list_item *it; struct nn_ep *ep; nn_ctx_enter (&self->ctx); /* Find the specified enpoint. */ ep = NULL; for (it = nn_list_begin (&self->eps); it != nn_list_end (&self->eps); it = nn_list_next (&self->eps, it)) { ep = nn_cont (it, struct nn_ep, item); if (ep->eid == eid) break; ep = NULL; } /* The endpoint doesn't exist. */ if (!ep) { nn_ctx_leave (&self->ctx); return -EINVAL; } /* Move the endpoint from the list of active endpoints to the list of shutting down endpoints. */ nn_list_erase (&self->eps, &ep->item); nn_list_insert (&self->sdeps, &ep->item, nn_list_end (&self->sdeps)); /* Ask the endpoint to stop. Actual terminatation may be delayed by the transport. */ nn_ep_stop (ep); nn_ctx_leave (&self->ctx); return 0; }
int nn_global_create_socket (int domain, int protocol) { int rc; int s; struct nn_list_item *it; struct nn_socktype *socktype; struct nn_sock *sock; /* The function is called with nn_glock held */ /* Only AF_SP and AF_SP_RAW domains are supported. */ if (nn_slow (domain != AF_SP && domain != AF_SP_RAW)) { return -EAFNOSUPPORT; } /* If socket limit was reached, report error. */ if (nn_slow (self.nsocks >= NN_MAX_SOCKETS)) { return -EMFILE; } /* Find an empty socket slot. */ s = self.unused [NN_MAX_SOCKETS - self.nsocks - 1]; /* Find the appropriate socket type. */ for (it = nn_list_begin (&self.socktypes); it != nn_list_end (&self.socktypes); it = nn_list_next (&self.socktypes, it)) { socktype = nn_cont (it, struct nn_socktype, item); if (socktype->domain == domain && socktype->protocol == protocol) { /* Instantiate the socket. */ sock = nn_alloc (sizeof (struct nn_sock), "sock"); alloc_assert (sock); rc = nn_sock_init (sock, socktype, s); if (rc < 0) return rc; /* Adjust the global socket table. */ self.socks [s] = sock; ++self.nsocks; return s; } } /* Specified socket type wasn't found. */ return -EINVAL; }
int nn_sock_destroy (struct nn_sock *self) { int rc; struct nn_sockbase *sockbase; struct nn_list_item *it; struct nn_epbase *ep; sockbase = (struct nn_sockbase*) self; nn_cp_lock (&sockbase->cp); /* The call may have been interrupted by a singal and restarted afterwards. In such case don't do the following stuff again. */ if (!(sockbase->flags & NN_SOCK_FLAG_CLOSING)) { /* Mark the socket as being in process of shutting down. */ sockbase->flags |= NN_SOCK_FLAG_CLOSING; /* Close sndfd and rcvfd. This should make any current select/poll using SNDFD and/or RCVFD exit. */ if (!(sockbase->vfptr->flags & NN_SOCKBASE_FLAG_NORECV)) { nn_efd_term (&sockbase->rcvfd); memset (&sockbase->rcvfd, 0xcd, sizeof (sockbase->rcvfd)); } if (!(sockbase->vfptr->flags & NN_SOCKBASE_FLAG_NOSEND)) { nn_efd_term (&sockbase->sndfd); memset (&sockbase->sndfd, 0xcd, sizeof (sockbase->sndfd)); } /* Create a semaphore to wait on for all endpoint to terminate. */ nn_sem_init (&sockbase->termsem); /* Ask all the associated endpoints to terminate. Call to nn_ep_close can actually deallocate the endpoint, so take care to get pointer to the next endpoint before the call. */ it = nn_list_begin (&sockbase->eps); while (it != nn_list_end (&sockbase->eps)) { ep = nn_cont (it, struct nn_epbase, item); it = nn_list_next (&sockbase->eps, it); rc = nn_ep_close ((void*) ep); errnum_assert (rc == 0 || rc == -EINPROGRESS, -rc); } }
int nn_dist_send (struct nn_dist *self, struct nn_msg *msg, struct nn_pipe *exclude) { int rc; struct nn_list_item *it; struct nn_dist_data *data; struct nn_msg copy; /* TODO: We can optimise for the case when there's only one outbound pipe here. No message copying is needed in such case. */ /* In the specific case when there are no outbound pipes. There's nowhere to send the message to. Deallocate it. */ if (nn_slow (self->count) == 0) { nn_msg_term (msg); return 0; } /* Send the message to all the subscribers. */ nn_msg_bulkcopy_start (msg, self->count); it = nn_list_begin (&self->pipes); while (it != nn_list_end (&self->pipes)) { data = nn_cont (it, struct nn_dist_data, item); nn_msg_bulkcopy_cp (©, msg); if (nn_fast (data->pipe == exclude)) { nn_msg_term (©); } else { rc = nn_pipe_send (data->pipe, ©); errnum_assert (rc >= 0, -rc); if (rc & NN_PIPE_RELEASE) { --self->count; it = nn_list_erase (&self->pipes, it); continue; } } it = nn_list_next (&self->pipes, it); } nn_msg_term (msg); return 0; }
static void nn_binproc_connect (struct nn_ins_item *self, struct nn_ins_item *peer) { struct nn_binproc *binproc; struct nn_cinproc *cinproc; struct nn_sinproc *sinproc; binproc = nn_cont (self, struct nn_binproc, item); cinproc = nn_cont (peer, struct nn_cinproc, item); nn_assert (binproc->state == NN_BINPROC_STATE_ACTIVE); sinproc = nn_alloc (sizeof (struct nn_sinproc), "sinproc"); alloc_assert (sinproc); nn_sinproc_init (sinproc, NN_BINPROC_SRC_SINPROC, &binproc->item.epbase, &binproc->fsm); nn_list_insert (&binproc->sinprocs, &sinproc->item, nn_list_end (&binproc->sinprocs)); nn_sinproc_connect (sinproc, &cinproc->fsm); }
struct nn_transport *nn_global_transport (int id) { struct nn_transport *tp; struct nn_list_item *it; /* Find the specified protocol. */ tp = NULL; nn_glock_lock (); for (it = nn_list_begin (&self.transports); it != nn_list_end (&self.transports); it = nn_list_next (&self.transports, it)) { tp = nn_cont (it, struct nn_transport, item); if (tp->id == id) break; tp = NULL; } nn_glock_unlock (); return tp; }
void nn_astream_init (struct nn_astream *self, struct nn_epbase *epbase, int s, struct nn_usock *usock, struct nn_bstream *bstream) { int sndbuf; int rcvbuf; size_t sz; /* Switch the state. */ self->sink = &nn_astream_state_connected; self->bstream = bstream; /* This stearm does not belong yet to the bstream. */ nn_list_item_init (&self->item); /* Get the current values of NN_SNDBUF and NN_RCVBUF options. */ sz = sizeof (sndbuf); nn_epbase_getopt (&self->bstream->epbase, NN_SOL_SOCKET, NN_SNDBUF, &sndbuf, &sz); nn_assert (sz == sizeof (sndbuf)); sz = sizeof (rcvbuf); nn_epbase_getopt (&self->bstream->epbase, NN_SOL_SOCKET, NN_RCVBUF, &rcvbuf, &sz); nn_assert (sz == sizeof (rcvbuf)); /* Start the stream state machine. */ nn_usock_init_child (&self->usock, usock, s, &self->sink, sndbuf, rcvbuf, usock->cp); /* Note: must add myself to the astreams list *before* initializing my stream, which may fail and terminate me. */ nn_list_insert (&bstream->astreams, &self->item, nn_list_end (&bstream->astreams)); /* Note: may fail and terminate me - do not reference self after this point! */ nn_stream_init (&self->stream, epbase, &self->usock); }
static void nn_btcp_handler (struct nn_fsm *self, int src, int type, void *srcptr) { struct nn_btcp *btcp; struct nn_atcp *atcp; btcp = nn_cont (self, struct nn_btcp, fsm); switch (btcp->state) { /******************************************************************************/ /* IDLE state. */ /******************************************************************************/ case NN_BTCP_STATE_IDLE: switch (src) { case NN_FSM_ACTION: switch (type) { case NN_FSM_START: nn_btcp_start_listening (btcp); nn_btcp_start_accepting (btcp); btcp->state = NN_BTCP_STATE_ACTIVE; return; default: nn_fsm_bad_action (btcp->state, src, type); } default: nn_fsm_bad_source (btcp->state, src, type); } /******************************************************************************/ /* ACTIVE state. */ /* The execution is yielded to the atcp state machine in this state. */ /******************************************************************************/ case NN_BTCP_STATE_ACTIVE: if (srcptr == btcp->atcp) { switch (type) { case NN_ATCP_ACCEPTED: /* Move the newly created connection to the list of existing connections. */ nn_list_insert (&btcp->atcps, &btcp->atcp->item, nn_list_end (&btcp->atcps)); btcp->atcp = NULL; /* Start waiting for a new incoming connection. */ nn_btcp_start_accepting (btcp); return; default: nn_fsm_bad_action (btcp->state, src, type); } } /* For all remaining events we'll assume they are coming from one of remaining child atcp objects. */ nn_assert (src == NN_BTCP_SRC_ATCP); atcp = (struct nn_atcp*) srcptr; switch (type) { case NN_ATCP_ERROR: nn_atcp_stop (atcp); return; case NN_ATCP_STOPPED: nn_list_erase (&btcp->atcps, &atcp->item); nn_atcp_term (atcp); nn_free (atcp); return; default: nn_fsm_bad_action (btcp->state, src, type); } /******************************************************************************/ /* Invalid state. */ /******************************************************************************/ default: nn_fsm_bad_state (btcp->state, src, type); } }
static void nn_global_add_socktype (struct nn_socktype *socktype) { nn_list_insert (&self.socktypes, &socktype->item, nn_list_end (&self.socktypes)); }
int nn_socket (int domain, int protocol) { int rc; int s; struct nn_list_item *it; struct nn_socktype *socktype; struct nn_sock *sock; nn_glock_lock (); /* Make sure that global state is initialised. */ nn_global_init (); /* If nn_term() was already called, return ETERM. */ if (nn_slow (self.flags & NN_CTX_FLAG_ZOMBIE)) { nn_global_term (); nn_glock_unlock (); errno = ETERM; return -1; } /* Only AF_SP and AF_SP_RAW domains are supported. */ if (nn_slow (domain != AF_SP && domain != AF_SP_RAW)) { nn_global_term (); nn_glock_unlock (); errno = EAFNOSUPPORT; return -1; } /* If socket limit was reached, report error. */ if (nn_slow (self.nsocks >= NN_MAX_SOCKETS)) { nn_global_term (); nn_glock_unlock (); errno = EMFILE; return -1; } /* Find an empty socket slot. */ s = self.unused [NN_MAX_SOCKETS - self.nsocks - 1]; /* Find the appropriate socket type. */ for (it = nn_list_begin (&self.socktypes); it != nn_list_end (&self.socktypes); it = nn_list_next (&self.socktypes, it)) { socktype = nn_cont (it, struct nn_socktype, item); if (socktype->domain == domain && socktype->protocol == protocol) { /* Instantiate the socket. */ sock = nn_alloc (sizeof (struct nn_sock), "sock"); alloc_assert (sock); rc = nn_sock_init (sock, socktype); if (rc < 0) goto error; /* Adjust the global socket table. */ self.socks [s] = sock; ++self.nsocks; nn_glock_unlock (); return s; } } rc = -EINVAL; /* Specified socket type wasn't found. */ error: nn_global_term (); nn_glock_unlock (); errno = -rc; return -1; }