static void soaio_process_sb(struct socket *so, struct sockbuf *sb) { struct kaiocb *job; SOCKBUF_LOCK(sb); while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) { job = TAILQ_FIRST(&sb->sb_aiojobq); TAILQ_REMOVE(&sb->sb_aiojobq, job, list); if (!aio_clear_cancel_function(job)) continue; soaio_process_job(so, sb, job); } /* * If there are still pending requests, the socket must not be * ready so set SB_AIO to request a wakeup when the socket * becomes ready. */ if (!TAILQ_EMPTY(&sb->sb_aiojobq)) sb->sb_flags |= SB_AIO; sb->sb_flags &= ~SB_AIO_RUNNING; SOCKBUF_UNLOCK(sb); ACCEPT_LOCK(); SOCK_LOCK(so); sorele(so); }
/* * This does all of the accept except the final call to soaccept. The * caller will call soaccept after dropping its locks (soaccept may * call malloc). */ int svc_vc_accept(struct socket *head, struct socket **sop) { int error = 0; struct socket *so; if ((head->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto done; } #ifdef MAC error = mac_socket_check_accept(curthread->td_ucred, head); if (error != 0) goto done; #endif ACCEPT_LOCK(); if (TAILQ_EMPTY(&head->so_comp)) { ACCEPT_UNLOCK(); error = EWOULDBLOCK; goto done; } so = TAILQ_FIRST(&head->so_comp); KASSERT(!(so->so_qstate & SQ_INCOMP), ("svc_vc_accept: so SQ_INCOMP")); KASSERT(so->so_qstate & SQ_COMP, ("svc_vc_accept: so not SQ_COMP")); /* * Before changing the flags on the socket, we have to bump the * reference count. Otherwise, if the protocol calls sofree(), * the socket will be released due to a zero refcount. * XXX might not need soref() since this is simpler than kern_accept. */ SOCK_LOCK(so); /* soref() and so_state update */ soref(so); /* file descriptor reference */ TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_state |= (head->so_state & SS_NBIO); so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_UNLOCK(so); ACCEPT_UNLOCK(); *sop = so; /* connection has been removed from the listen queue */ KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); done: return (error); }
/*ARGSUSED*/ static bool_t svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct socket *so = NULL; struct sockaddr *sa = NULL; int error; SVCXPRT *new_xprt; /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to accept a * connection from the socket and turn it into a new * transport. If the accept fails, we have drained all pending * connections so we call xprt_inactive(). */ sx_xlock(&xprt->xp_lock); error = svc_vc_accept(xprt->xp_socket, &so); if (error == EWOULDBLOCK) { /* * We must re-test for new connections after taking * the lock to protect us in the case where a new * connection arrives after our call to accept fails * with EWOULDBLOCK. The pool lock protects us from * racing the upcall after our TAILQ_EMPTY() call * returns false. */ ACCEPT_LOCK(); mtx_lock(&xprt->xp_pool->sp_lock); if (TAILQ_EMPTY(&xprt->xp_socket->so_comp)) xprt_inactive_locked(xprt); mtx_unlock(&xprt->xp_pool->sp_lock); ACCEPT_UNLOCK(); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); sa = 0; error = soaccept(so, &sa); if (error) { /* * XXX not sure if I need to call sofree or soclose here. */ if (sa) free(sa, M_SONAME); return (FALSE); } /* * svc_vc_create_conn will call xprt_register - we don't need * to do anything with the new connection except derefence it. */ new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa); if (!new_xprt) { soclose(so); } else { SVC_RELEASE(new_xprt); } free(sa, M_SONAME); return (FALSE); /* there is never an rpc msg to be processed */ }
/* * XXX: Doing accept in a separate thread in each socket might not be the best way * to do stuff, but it's pretty clean and debuggable - and you probably won't * have hundreds of listening sockets anyway. */ static void icl_accept_thread(void *arg) { struct icl_listen_sock *ils; struct socket *head, *so; struct sockaddr *sa; int error; ils = arg; head = ils->ils_socket; ils->ils_running = true; for (;;) { ACCEPT_LOCK(); while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0 && ils->ils_disconnecting == false) { if (head->so_rcv.sb_state & SBS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH, "accept", 0); if (error) { ACCEPT_UNLOCK(); ICL_WARN("msleep failed with error %d", error); continue; } if (ils->ils_disconnecting) { ACCEPT_UNLOCK(); ICL_DEBUG("terminating"); ils->ils_running = false; kthread_exit(); return; } } if (head->so_error) { error = head->so_error; head->so_error = 0; ACCEPT_UNLOCK(); ICL_WARN("socket error %d", error); continue; } so = TAILQ_FIRST(&head->so_comp); KASSERT(so != NULL, ("NULL so")); KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP")); KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP")); /* * Before changing the flags on the socket, we have to bump the * reference count. Otherwise, if the protocol calls sofree(), * the socket will be released due to a zero refcount. */ SOCK_LOCK(so); /* soref() and so_state update */ soref(so); /* file descriptor reference */ TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_state |= (head->so_state & SS_NBIO); so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_UNLOCK(so); ACCEPT_UNLOCK(); sa = NULL; error = soaccept(so, &sa); if (error != 0) { ICL_WARN("soaccept error %d", error); if (sa != NULL) free(sa, M_SONAME); soclose(so); continue; } (ils->ils_listen->il_accept)(so, sa, ils->ils_id); } }
/* * Handle the first completed incoming connection, assumed to be already * on the socket's so_comp queue. */ static void ng_ksocket_finish_accept(priv_p priv) { struct socket *const head = priv->so; struct socket *so; struct sockaddr *sa = NULL; struct ng_mesg *resp; struct ng_ksocket_accept *resp_data; node_p node; priv_p priv2; int len; int error; ACCEPT_LOCK(); so = TAILQ_FIRST(&head->so_comp); if (so == NULL) { /* Should never happen */ ACCEPT_UNLOCK(); return; } TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_LOCK(so); soref(so); sosetstate(so, SS_NBIO); SOCK_UNLOCK(so); ACCEPT_UNLOCK(); /* XXX KNOTE(&head->so_rcv.sb_sel.si_note, 0); */ soaccept(so, &sa); len = OFFSETOF(struct ng_ksocket_accept, addr); if (sa != NULL) len += sa->sa_len; NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len, M_WAITOK | M_NULLOK); if (resp == NULL) { soclose(so); goto out; } resp->header.flags |= NGF_RESP; resp->header.token = priv->response_token; /* Clone a ksocket node to wrap the new socket */ error = ng_make_node_common(&ng_ksocket_typestruct, &node); if (error) { kfree(resp, M_NETGRAPH); soclose(so); goto out; } if (ng_ksocket_constructor(node) != 0) { NG_NODE_UNREF(node); kfree(resp, M_NETGRAPH); soclose(so); goto out; } priv2 = NG_NODE_PRIVATE(node); priv2->so = so; priv2->flags |= KSF_CLONED | KSF_EMBRYONIC; /* * Insert the cloned node into a list of embryonic children * on the parent node. When a hook is created on the cloned * node it will be removed from this list. When the parent * is destroyed it will destroy any embryonic children it has. */ LIST_INSERT_HEAD(&priv->embryos, priv2, siblings); so->so_upcallarg = (caddr_t)node; so->so_upcall = ng_ksocket_incoming; SOCKBUF_LOCK(&so->so_rcv); so->so_rcv.sb_flags |= SB_UPCALL; SOCKBUF_UNLOCK(&so->so_rcv); SOCKBUF_LOCK(&so->so_snd); so->so_snd.sb_flags |= SB_UPCALL; SOCKBUF_UNLOCK(&so->so_snd); /* Fill in the response data and send it or return it to the caller */ resp_data = (struct ng_ksocket_accept *)resp->data; resp_data->nodeid = NG_NODE_ID(node); if (sa != NULL) bcopy(sa, &resp_data->addr, sa->sa_len); NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0); out: if (sa != NULL) kfree(sa, M_SONAME); }
int ofp_accept(int sockfd, struct ofp_sockaddr *addr, ofp_socklen_t *addrlen) { struct ofp_sockaddr *sa = NULL; struct socket *so, *head = ofp_get_sock_by_fd(sockfd); if (!head) { ofp_errno = OFP_EBADF; return -1; } if ((head->so_options & OFP_SO_ACCEPTCONN) == 0) { ofp_errno = OFP_EINVAL; return -1; } ACCEPT_LOCK(); if ((head->so_state & SS_NBIO) && OFP_TAILQ_EMPTY(&head->so_comp)) { ACCEPT_UNLOCK(); ofp_errno = OFP_EWOULDBLOCK; return -1; } while (OFP_TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) { if (head->so_rcv.sb_state & SBS_CANTRCVMORE) { head->so_error = OFP_ECONNABORTED; break; } if (ofp_msleep(&head->so_timeo, ofp_accept_mtx(), 0, "accept", 0)) { ACCEPT_UNLOCK(); return -1; } } if (head->so_error) { ofp_errno = head->so_error; head->so_error = 0; ACCEPT_UNLOCK(); return -1; } so = OFP_TAILQ_FIRST(&head->so_comp); KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP")); KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP")); /* * Before changing the flags on the socket, we have to bump the * reference count. Otherwise, if the protocol calls ofp_sofree(), * the socket will be released due to a zero refcount. */ OFP_SOCK_LOCK(so); /* soref() and so_state update */ soref(so); /* file descriptor reference */ OFP_TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_state |= (head->so_state & SS_NBIO); so->so_qstate &= ~SQ_COMP; so->so_head = NULL; OFP_SOCK_UNLOCK(so); ACCEPT_UNLOCK(); /* connection has been removed from the listen queue */ /*KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);*/ sa = 0; ofp_errno = ofp_soaccept(so, &sa); if (ofp_errno) { /* * return a namelen of zero for older code which might * ignore the return value from accept. */ if (addr) *addrlen = 0; return -1; } if (sa == NULL) { if (addr) *addrlen = 0; return so->so_number; } if (addr) { /* check sa_len before it is destroyed */ if (*addrlen > sa->sa_len) *addrlen = sa->sa_len; memcpy(addr, sa, *addrlen); } free(sa); return so->so_number; }