/* * Predicate requests can be aborted. This function is only called once * and will interlock against processing/reply races (since such races * occur on the same thread that controls the port where the abort is * requeued). * * This part of the abort request occurs on the target cpu. The message * flags must be tested again in case the test that we did on the * originating cpu raced. Since messages are handled in sequence, the * original message will have already been handled by the loop and either * replied to or queued. * * We really only need to interlock with MSGF_REPLY (a bit that is set on * our cpu when we reply). Note that MSGF_DONE is not set until the * reply reaches the originating cpu. Test both bits anyway. */ void netmsg_so_notify_abort(netmsg_t msg) { struct netmsg_so_notify_abort *abrtmsg = &msg->notify_abort; struct netmsg_so_notify *nmsg = abrtmsg->nm_notifymsg; struct signalsockbuf *ssb; /* * The original notify message is not destroyed until after the * abort request is returned, so we can check its state. */ lwkt_getpooltoken(nmsg->base.nm_so); if ((nmsg->base.lmsg.ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0) { ssb = (nmsg->nm_etype & NM_REVENT) ? &nmsg->base.nm_so->so_rcv : &nmsg->base.nm_so->so_snd; TAILQ_REMOVE(&ssb->ssb_kq.ki_mlist, nmsg, nm_list); lwkt_relpooltoken(nmsg->base.nm_so); lwkt_replymsg(&nmsg->base.lmsg, EINTR); } else { lwkt_relpooltoken(nmsg->base.nm_so); } /* * Reply to the abort message */ lwkt_replymsg(&abrtmsg->base.lmsg, 0); }
/* * Destroy a disconnected socket. This routine is a NOP if entities * still have a reference on the socket: * * so_pcb - The protocol stack still has a reference * SS_NOFDREF - There is no longer a file pointer reference */ void sofree(struct socket *so) { struct socket *head; /* * This is a bit hackish at the moment. We need to interlock * any accept queue we are on before we potentially lose the * last reference to avoid races against a re-reference from * someone operating on the queue. */ while ((head = so->so_head) != NULL) { lwkt_getpooltoken(head); if (so->so_head == head) break; lwkt_relpooltoken(head); } /* * Arbitrage the last free. */ KKASSERT(so->so_refs > 0); if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { if (head) lwkt_relpooltoken(head); return; } KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); /* * We're done, remove ourselves from the accept queue we are * on, if we are on one. */ if (head != NULL) { if (so->so_state & SS_INCOMP) { TAILQ_REMOVE(&head->so_incomp, so, so_list); head->so_incqlen--; } else if (so->so_state & SS_COMP) { /* * We must not decommission a socket that's * on the accept(2) queue. If we do, then * accept(2) may hang after select(2) indicated * that the listening socket was ready. */ lwkt_relpooltoken(head); return; } else { panic("sofree: not queued"); } soclrstate(so, SS_INCOMP); so->so_head = NULL; lwkt_relpooltoken(head); } ssb_release(&so->so_snd, so); sorflush(so); sodealloc(so); }
/* * Handle a predicate event request. This function is only called once * when the predicate message queueing request is received. */ void netmsg_so_notify(netmsg_t msg) { struct socket *so = msg->base.nm_so; struct signalsockbuf *ssb; ssb = (msg->notify.nm_etype & NM_REVENT) ? &so->so_rcv : &so->so_snd; /* * Reply immediately if the event has occured, otherwise queue the * request. * * NOTE: Socket can change if this is an accept predicate so cache * the token. */ lwkt_getpooltoken(so); atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); if (msg->notify.nm_predicate(&msg->notify)) { if (TAILQ_EMPTY(&ssb->ssb_mlist)) atomic_clear_int(&ssb->ssb_flags, SSB_MEVENT); lwkt_relpooltoken(so); lwkt_replymsg(&msg->base.lmsg, msg->base.lmsg.ms_error); } else { TAILQ_INSERT_TAIL(&ssb->ssb_mlist, &msg->notify, nm_list); /* * NOTE: * If predict ever blocks, 'tok' will be released, so * SSB_MEVENT set beforehand could have been cleared * when we reach here. In case that happens, we set * SSB_MEVENT again, after the notify has been queued. */ atomic_set_int(&ssb->ssb_flags, SSB_MEVENT); lwkt_relpooltoken(so); } }
/* * Close a socket on last file table reference removal. * Initiate disconnect if connected. * Free socket when disconnect complete. */ int soclose(struct socket *so, int fflag) { int error = 0; funsetown(&so->so_sigio); if (so->so_pcb == NULL) goto discard; if (so->so_state & SS_ISCONNECTED) { if ((so->so_state & SS_ISDISCONNECTING) == 0) { error = sodisconnect(so); if (error) goto drop; } if (so->so_options & SO_LINGER) { if ((so->so_state & SS_ISDISCONNECTING) && (fflag & FNONBLOCK)) goto drop; while (so->so_state & SS_ISCONNECTED) { error = tsleep(&so->so_timeo, PCATCH, "soclos", so->so_linger * hz); if (error) break; } } } drop: if (so->so_pcb) { int error2; error2 = so_pru_detach(so); if (error == 0) error = error2; } discard: lwkt_getpooltoken(so); if (so->so_options & SO_ACCEPTCONN) { struct socket *sp; while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { TAILQ_REMOVE(&so->so_incomp, sp, so_list); soclrstate(sp, SS_INCOMP); sp->so_head = NULL; so->so_incqlen--; soaborta(sp); } while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { TAILQ_REMOVE(&so->so_comp, sp, so_list); soclrstate(sp, SS_COMP); sp->so_head = NULL; so->so_qlen--; soaborta(sp); } } lwkt_relpooltoken(so); if (so->so_state & SS_NOFDREF) panic("soclose: NOFDREF"); sosetstate(so, SS_NOFDREF); /* take ref */ sofree(so); /* dispose of ref */ return (error); }
/* * Handle the first completed incoming connection, assumed to be already * on the socket's so_comp queue. */ static void ng_ksocket_finish_accept(priv_p priv) { struct socket *const head = priv->so; struct socket *so; struct sockaddr *sa = NULL; struct ng_mesg *resp; struct ng_ksocket_accept *resp_data; node_p node; priv_p priv2; int len; int error; lwkt_getpooltoken(head); so = TAILQ_FIRST(&head->so_comp); if (so == NULL) { /* Should never happen */ lwkt_relpooltoken(head); return; } TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; soclrstate(so, SS_COMP); so->so_head = NULL; soreference(so); lwkt_relpooltoken(head); /* XXX KNOTE(&head->so_rcv.ssb_sel.si_note, 0); */ soaccept(so, &sa); len = OFFSETOF(struct ng_ksocket_accept, addr); if (sa != NULL) len += sa->sa_len; NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len, M_WAITOK | M_NULLOK); if (resp == NULL) { soclose(so, FNONBLOCK); goto out; } resp->header.flags |= NGF_RESP; resp->header.token = priv->response_token; /* Clone a ksocket node to wrap the new socket */ error = ng_make_node_common(&ng_ksocket_typestruct, &node); if (error) { kfree(resp, M_NETGRAPH); soclose(so, FNONBLOCK); goto out; } if (ng_ksocket_constructor(node) != 0) { NG_NODE_UNREF(node); kfree(resp, M_NETGRAPH); soclose(so, FNONBLOCK); goto out; } priv2 = NG_NODE_PRIVATE(node); priv2->so = so; priv2->flags |= KSF_CLONED | KSF_EMBRYONIC; /* * Insert the cloned node into a list of embryonic children * on the parent node. When a hook is created on the cloned * node it will be removed from this list. When the parent * is destroyed it will destroy any embryonic children it has. */ LIST_INSERT_HEAD(&priv->embryos, priv2, siblings); so->so_upcallarg = (caddr_t)node; so->so_upcall = ng_ksocket_incoming; atomic_set_int(&priv->so->so_rcv.ssb_flags, SSB_UPCALL); atomic_set_int(&priv->so->so_snd.ssb_flags, SSB_UPCALL); /* Fill in the response data and send it or return it to the caller */ resp_data = (struct ng_ksocket_accept *)resp->data; resp_data->nodeid = NG_NODE_ID(node); if (sa != NULL) bcopy(sa, &resp_data->addr, sa->sa_len); NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0); out: if (sa != NULL) kfree(sa, M_SONAME); }