int nn_close (int s) { int rc; NN_BASIC_CHECKS; /* Deallocate the socket object. */ rc = nn_sock_destroy (self.socks [s]); if (nn_slow (rc == -EINTR)) { errno = EINTR; return -1; } nn_glock_lock (); /* Remove the socket from the socket table, add it to unused socket table. */ self.socks [s] = NULL; self.unused [NN_MAX_SOCKETS - self.nsocks] = s; --self.nsocks; /* Destroy the global context if there's no socket remaining. */ nn_ctx_term (); nn_glock_unlock (); return 0; }
static void nn_global_term (void) { #if defined NN_HAVE_WINDOWS int rc; #endif struct nn_list_item *it; struct nn_transport *tp; /* If there are no sockets remaining, uninitialise the global context. */ nn_assert (self.socks); if (self.nsocks > 0) return; /* Stop the FSM */ nn_ctx_enter (&self.ctx); nn_fsm_stop (&self.fsm); nn_ctx_leave (&self.ctx); /* Shut down the worker threads. */ nn_pool_term (&self.pool); /* Terminate ctx mutex */ nn_ctx_term (&self.ctx); /* Ask all the transport to deallocate their global resources. */ while (!nn_list_empty (&self.transports)) { it = nn_list_begin (&self.transports); tp = nn_cont (it, struct nn_transport, item); if (tp->term) tp->term (); nn_list_erase (&self.transports, it); } /* For now there's nothing to deallocate about socket types, however, let's remove them from the list anyway. */ while (!nn_list_empty (&self.socktypes)) nn_list_erase (&self.socktypes, nn_list_begin (&self.socktypes)); /* Final deallocation of the nn_global object itself. */ nn_list_term (&self.socktypes); nn_list_term (&self.transports); nn_free (self.socks); /* This marks the global state as uninitialised. */ self.socks = NULL; /* Shut down the memory allocation subsystem. */ nn_alloc_term (); /* On Windows, uninitialise the socket library. */ #if defined NN_HAVE_WINDOWS rc = WSACleanup (); nn_assert (rc == 0); #endif }
int nn_sock_term (struct nn_sock *self) { int rc; int i; /* NOTE: nn_sock_stop must have already been called. */ /* Some endpoints may still be alive. Here we are going to wait till they are all closed. This loop is not interruptible, because making it so would leave a partially cleaned up socket, and we don't have a way to defer resource deallocation. */ for (;;) { rc = nn_sem_wait (&self->termsem); if (nn_slow (rc == -EINTR)) continue; errnum_assert (rc == 0, -rc); break; } /* Also, wait for all holds on the socket to be released. */ for (;;) { rc = nn_sem_wait (&self->relesem); if (nn_slow (rc == -EINTR)) continue; errnum_assert (rc == 0, -rc); break; } /* Threads that posted the semaphore(s) can still have the ctx locked for a short while. By simply entering the context and exiting it immediately we can be sure that any such threads have already exited the context. */ nn_ctx_enter (&self->ctx); nn_ctx_leave (&self->ctx); /* At this point, we can be reasonably certain that no other thread has any references to the socket. */ nn_fsm_stopped_noevent (&self->fsm); nn_fsm_term (&self->fsm); nn_sem_term (&self->termsem); nn_list_term (&self->sdeps); nn_list_term (&self->eps); nn_clock_term (&self->clock); nn_ctx_term (&self->ctx); /* Destroy any optsets associated with the socket. */ for (i = 0; i != NN_MAX_TRANSPORT; ++i) if (self->optsets [i]) self->optsets [i]->vfptr->destroy (self->optsets [i]); return 0; }
int nn_sock_term (struct nn_sock *self) { int rc; int i; /* Ask the state machine to start closing the socket. */ nn_ctx_enter (&self->ctx); nn_fsm_stop (&self->fsm); nn_ctx_leave (&self->ctx); /* Shutdown process was already started but some endpoints may still alive. Here we are going to wait till they are all closed. */ rc = nn_sem_wait (&self->termsem); if (nn_slow (rc == -EINTR)) return -EINTR; errnum_assert (rc == 0, -rc); /* The thread that posted the semaphore can still have the ctx locked for a short while. By simply entering the context and exiting it immediately we can be sure that the thread in question have already exited the context. */ nn_ctx_enter (&self->ctx); nn_ctx_leave (&self->ctx); /* Deallocate the resources. */ nn_fsm_stopped_noevent (&self->fsm); nn_fsm_term (&self->fsm); nn_sem_term (&self->termsem); nn_list_term (&self->sdeps); nn_list_term (&self->eps); nn_clock_term (&self->clock); nn_ctx_term (&self->ctx); /* Destroy any optsets associated with the socket. */ for (i = 0; i != NN_MAX_TRANSPORT; ++i) if (self->optsets [i]) self->optsets [i]->vfptr->destroy (self->optsets [i]); return 0; }
int nn_socket (int domain, int protocol) { int rc; int s; struct nn_list_item *it; struct nn_socktype *socktype; nn_glock_lock (); /* Make sure that global state is initialised. */ nn_ctx_init (); /* If nn_term() was already called, return ETERM. */ if (nn_slow (self.flags & NN_CTX_FLAG_ZOMBIE)) { nn_ctx_term (); nn_glock_unlock (); errno = ETERM; return -1; } /* Only AF_SP and AF_SP_RAW domains are supported. */ if (nn_slow (domain != AF_SP && domain != AF_SP_RAW)) { nn_ctx_term (); nn_glock_unlock (); errno = EAFNOSUPPORT; return -1; } /* If socket limit was reached, report error. */ if (nn_slow (self.nsocks >= NN_MAX_SOCKETS)) { nn_ctx_term (); nn_glock_unlock (); errno = EMFILE; return -1; } /* Find an empty socket slot. */ s = self.unused [NN_MAX_SOCKETS - self.nsocks - 1]; /* Find the appropriate socket type and instantiate it. */ for (it = nn_list_begin (&self.socktypes); it != nn_list_end (&self.socktypes); it = nn_list_next (&self.socktypes, it)) { socktype = nn_cont (it, struct nn_socktype, list); if (socktype->domain == domain && socktype->protocol == protocol) { rc = socktype->create ((struct nn_sockbase**) &self.socks [s]); if (rc < 0) goto error; nn_sock_postinit (self.socks [s], domain, protocol); ++self.nsocks; nn_glock_unlock (); return s; } } rc = -EINVAL; /* Specified socket type wasn't found. */ error: nn_ctx_term (); nn_glock_unlock (); errno = -rc; return -1; }