struct socket* acceptfrom(struct socket* server) { // if ((so->so_options & SO_ACCEPTCONN) == 0) // return (EINVAL); // if ((so->so_state & SS_NBIO) && so->so_qlen == 0) // return (EWOULDBLOCK); // if (so->so_error): // error = so->so_error; // so->so_error = 0; // return (error); int s = splnet(); struct socket *so = server->so_q; if (!so) goto done; if (soqremque(so, 1) == 0) panic("accept"); struct mbuf *nam = m_get(M_WAIT, MT_SONAME); (void) soaccept(so, nam); m_freem(nam); done: splx(s); return so; }
/*ARGSUSED*/ static bool_t svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg, struct sockaddr **addrp, struct mbuf **mp) { struct socket *so = NULL; struct sockaddr *sa = NULL; int error; SVCXPRT *new_xprt; /* * The socket upcall calls xprt_active() which will eventually * cause the server to call us here. We attempt to accept a * connection from the socket and turn it into a new * transport. If the accept fails, we have drained all pending * connections so we call xprt_inactive(). */ sx_xlock(&xprt->xp_lock); error = svc_vc_accept(xprt->xp_socket, &so); if (error == EWOULDBLOCK) { /* * We must re-test for new connections after taking * the lock to protect us in the case where a new * connection arrives after our call to accept fails * with EWOULDBLOCK. The pool lock protects us from * racing the upcall after our TAILQ_EMPTY() call * returns false. */ ACCEPT_LOCK(); mtx_lock(&xprt->xp_pool->sp_lock); if (TAILQ_EMPTY(&xprt->xp_socket->so_comp)) xprt_inactive_locked(xprt); mtx_unlock(&xprt->xp_pool->sp_lock); ACCEPT_UNLOCK(); sx_xunlock(&xprt->xp_lock); return (FALSE); } if (error) { SOCKBUF_LOCK(&xprt->xp_socket->so_rcv); if (xprt->xp_upcallset) { xprt->xp_upcallset = 0; soupcall_clear(xprt->xp_socket, SO_RCV); } SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv); xprt_inactive(xprt); sx_xunlock(&xprt->xp_lock); return (FALSE); } sx_xunlock(&xprt->xp_lock); sa = 0; error = soaccept(so, &sa); if (error) { /* * XXX not sure if I need to call sofree or soclose here. */ if (sa) free(sa, M_SONAME); return (FALSE); } /* * svc_vc_create_conn will call xprt_register - we don't need * to do anything with the new connection except derefence it. */ new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa); if (!new_xprt) { soclose(so); } else { SVC_RELEASE(new_xprt); } free(sa, M_SONAME); return (FALSE); /* there is never an rpc msg to be processed */ }
int libcfs_sock_accept (struct socket **newsockp, struct socket *sock) { struct socket *so; struct sockaddr *sa; int error, s; CFS_DECL_FUNNEL_DATA; CFS_NET_IN; s = splnet(); if ((sock->so_options & SO_ACCEPTCONN) == 0) { splx(s); CFS_NET_EX; return (-EINVAL); } if ((sock->so_state & SS_NBIO) && sock->so_comp.tqh_first == NULL) { splx(s); CFS_NET_EX; return (-EWOULDBLOCK); } error = 0; while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) { if (sock->so_state & SS_CANTRCVMORE) { sock->so_error = ECONNABORTED; break; } error = tsleep((caddr_t)&sock->so_timeo, PSOCK | PCATCH, "accept", 0); if (error) { splx(s); CFS_NET_EX; return (-error); } } if (sock->so_error) { error = sock->so_error; sock->so_error = 0; splx(s); CFS_NET_EX; return (-error); } /* * At this point we know that there is at least one connection * ready to be accepted. Remove it from the queue prior to * allocating the file descriptor for it since falloc() may * block allowing another process to accept the connection * instead. */ so = TAILQ_FIRST(&sock->so_comp); TAILQ_REMOVE(&sock->so_comp, so, so_list); sock->so_qlen--; so->so_state &= ~SS_COMP; so->so_head = NULL; sa = 0; (void) soaccept(so, &sa); *newsockp = so; FREE(sa, M_SONAME); splx(s); CFS_NET_EX; return (-error); }
/* * XXX: Doing accept in a separate thread in each socket might not be the best way * to do stuff, but it's pretty clean and debuggable - and you probably won't * have hundreds of listening sockets anyway. */ static void icl_accept_thread(void *arg) { struct icl_listen_sock *ils; struct socket *head, *so; struct sockaddr *sa; int error; ils = arg; head = ils->ils_socket; ils->ils_running = true; for (;;) { ACCEPT_LOCK(); while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0 && ils->ils_disconnecting == false) { if (head->so_rcv.sb_state & SBS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH, "accept", 0); if (error) { ACCEPT_UNLOCK(); ICL_WARN("msleep failed with error %d", error); continue; } if (ils->ils_disconnecting) { ACCEPT_UNLOCK(); ICL_DEBUG("terminating"); ils->ils_running = false; kthread_exit(); return; } } if (head->so_error) { error = head->so_error; head->so_error = 0; ACCEPT_UNLOCK(); ICL_WARN("socket error %d", error); continue; } so = TAILQ_FIRST(&head->so_comp); KASSERT(so != NULL, ("NULL so")); KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP")); KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP")); /* * Before changing the flags on the socket, we have to bump the * reference count. Otherwise, if the protocol calls sofree(), * the socket will be released due to a zero refcount. */ SOCK_LOCK(so); /* soref() and so_state update */ soref(so); /* file descriptor reference */ TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_state |= (head->so_state & SS_NBIO); so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_UNLOCK(so); ACCEPT_UNLOCK(); sa = NULL; error = soaccept(so, &sa); if (error != 0) { ICL_WARN("soaccept error %d", error); if (sa != NULL) free(sa, M_SONAME); soclose(so); continue; } (ils->ils_listen->il_accept)(so, sa, ils->ils_id); } }
/* * Handle the first completed incoming connection, assumed to be already * on the socket's so_comp queue. */ static void ng_ksocket_finish_accept(priv_p priv) { struct socket *const head = priv->so; struct socket *so; struct sockaddr *sa = NULL; struct ng_mesg *resp; struct ng_ksocket_accept *resp_data; node_p node; priv_p priv2; int len; int error; ACCEPT_LOCK(); so = TAILQ_FIRST(&head->so_comp); if (so == NULL) { /* Should never happen */ ACCEPT_UNLOCK(); return; } TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; so->so_qstate &= ~SQ_COMP; so->so_head = NULL; SOCK_LOCK(so); soref(so); sosetstate(so, SS_NBIO); SOCK_UNLOCK(so); ACCEPT_UNLOCK(); /* XXX KNOTE(&head->so_rcv.sb_sel.si_note, 0); */ soaccept(so, &sa); len = OFFSETOF(struct ng_ksocket_accept, addr); if (sa != NULL) len += sa->sa_len; NG_MKMESSAGE(resp, NGM_KSOCKET_COOKIE, NGM_KSOCKET_ACCEPT, len, M_WAITOK | M_NULLOK); if (resp == NULL) { soclose(so); goto out; } resp->header.flags |= NGF_RESP; resp->header.token = priv->response_token; /* Clone a ksocket node to wrap the new socket */ error = ng_make_node_common(&ng_ksocket_typestruct, &node); if (error) { kfree(resp, M_NETGRAPH); soclose(so); goto out; } if (ng_ksocket_constructor(node) != 0) { NG_NODE_UNREF(node); kfree(resp, M_NETGRAPH); soclose(so); goto out; } priv2 = NG_NODE_PRIVATE(node); priv2->so = so; priv2->flags |= KSF_CLONED | KSF_EMBRYONIC; /* * Insert the cloned node into a list of embryonic children * on the parent node. When a hook is created on the cloned * node it will be removed from this list. When the parent * is destroyed it will destroy any embryonic children it has. */ LIST_INSERT_HEAD(&priv->embryos, priv2, siblings); so->so_upcallarg = (caddr_t)node; so->so_upcall = ng_ksocket_incoming; SOCKBUF_LOCK(&so->so_rcv); so->so_rcv.sb_flags |= SB_UPCALL; SOCKBUF_UNLOCK(&so->so_rcv); SOCKBUF_LOCK(&so->so_snd); so->so_snd.sb_flags |= SB_UPCALL; SOCKBUF_UNLOCK(&so->so_snd); /* Fill in the response data and send it or return it to the caller */ resp_data = (struct ng_ksocket_accept *)resp->data; resp_data->nodeid = NG_NODE_ID(node); if (sa != NULL) bcopy(sa, &resp_data->addr, sa->sa_len); NG_SEND_MSG_ID(error, node, resp, priv->response_addr, 0); out: if (sa != NULL) kfree(sa, M_SONAME); }
static int bsd_accept(cyg_file *fp, cyg_file *new_fp, struct sockaddr *name, socklen_t *anamelen) { socklen_t namelen = 0; int error = 0, s; struct socket *head, *so; struct sockaddr *sa; if( anamelen != NULL) namelen = *anamelen; s = splsoftnet(); head = (struct socket *)fp->f_data; if ((head->so_options & SO_ACCEPTCONN) == 0) { splx(s); return (EINVAL); } if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) { splx(s); return (EWOULDBLOCK); } while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) { if (head->so_state & SS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } error = tsleep((caddr_t)&head->so_timeo, PSOCK | PCATCH, "netcon", 0); if (error) { splx(s); return (error); } } if (head->so_error) { error = head->so_error; head->so_error = 0; splx(s); return (error); } /* * At this point we know that there is at least one connection * ready to be accepted. Remove it from the queue prior to * allocating the file descriptor for it since falloc() may * block allowing another process to accept the connection * instead. */ so = TAILQ_FIRST(&head->so_comp); TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; #if 0 // FIXME fflag = lfp->f_flag; error = falloc(p, &nfp, &fd); if (error) { /* * Probably ran out of file descriptors. Put the * unaccepted connection back onto the queue and * do another wakeup so some other process might * have a chance at it. */ TAILQ_INSERT_HEAD(&head->so_comp, so, so_list); head->so_qlen++; wakeup_one(&head->so_timeo); splx(s); goto done; } fhold(nfp); p->p_retval[0] = fd; /* connection has been removed from the listen queue */ KNOTE(&head->so_rcv.sb_sel.si_note, 0); #endif so->so_state &= ~SS_COMP; so->so_head = NULL; cyg_selinit(&so->so_rcv.sb_sel); cyg_selinit(&so->so_snd.sb_sel); new_fp->f_type = DTYPE_SOCKET; new_fp->f_flag |= FREAD|FWRITE; new_fp->f_offset = 0; new_fp->f_ops = &bsd_sock_fileops; new_fp->f_data = (CYG_ADDRWORD)so; new_fp->f_xops = (CYG_ADDRWORD)&bsd_sockops; sa = 0; error = soaccept(so, &sa); if (error) { /* * return a namelen of zero for older code which might * ignore the return value from accept. */ if (name != NULL) { *anamelen = 0; } goto noconnection; } if (sa == NULL) { namelen = 0; if (name) goto gotnoname; splx(s); error = 0; goto done; } if (name) { if (namelen > sa->sa_len) namelen = sa->sa_len; #ifdef COMPAT_OLDSOCK if (compat) ((struct osockaddr *)sa)->sa_family = sa->sa_family; #endif error = copyout(sa, (caddr_t)name, namelen); if (!error) gotnoname: *anamelen = namelen; } noconnection: #if 0 // FIXME /* * close the new descriptor, assuming someone hasn't ripped it * out from under us. */ if (error) { if (fdp->fd_ofiles[fd] == nfp) { fdp->fd_ofiles[fd] = NULL; fdrop(nfp, p); } } splx(s); /* * Release explicitly held references before returning. */ done: if (nfp != NULL) fdrop(nfp, p); fdrop(lfp, p); return (error); m_freem(nam); #else done: #endif splx(s); return (error); }
long t_accept(long s, struct sockaddr * addr, int * addrlen) { #ifdef SOCKDEBUG char logbuf[10]; #endif struct socket * so; struct mbuf * nam; so = LONG2SO(s); SOC_CHECK(so); DOMAIN_CHECK(so, *addrlen); so->so_error = 0; INET_TRACE (INETM_SOCKET, ("INET:accept:so %x so_qlen %d so_state %x\n", so, so->so_qlen, so->so_state)); if ((so->so_options & SO_ACCEPTCONN) == 0) { so->so_error = EINVAL; #ifdef SOCKDEBUG sprintf(logbuf, "t_accept[%d]: %d", __LINE__, so->so_error); glog_with_type(LOG_TYPE_DEBUG, logbuf, 1); #endif return SOCKET_ERROR; } if ((so->so_state & SS_NBIO) && so->so_qlen == 0) { so->so_error = EWOULDBLOCK; #ifdef SOCKDEBUG sprintf(logbuf, "t_accept[%d]: %d", __LINE__, so->so_error); glog_with_type(LOG_TYPE_DEBUG, logbuf, 1); #endif return SOCKET_ERROR; } LOCK_NET_RESOURCE(NET_RESID); while (so->so_qlen == 0 && so->so_error == 0) { if (so->so_state & SS_CANTRCVMORE) { so->so_error = ECONNABORTED; UNLOCK_NET_RESOURCE(NET_RESID); return SOCKET_ERROR; } tcp_sleep ((char *)&so->so_timeo); } if (so->so_error) { #ifdef SOCKDEBUG sprintf(logbuf, "t_accept[%d]: %d", __LINE__, so->so_error); glog_with_type(LOG_TYPE_DEBUG, logbuf, 1); #endif UNLOCK_NET_RESOURCE(NET_RESID); return SOCKET_ERROR; } nam = m_getwithdata (MT_SONAME, sizeof (struct sockaddr)); if (nam == NULL) { UNLOCK_NET_RESOURCE(NET_RESID); so->so_error = ENOMEM; #ifdef SOCKDEBUG sprintf(logbuf, "t_accept[%d]: %d", __LINE__, so->so_error); glog_with_type(LOG_TYPE_DEBUG, logbuf, 1); #endif return SOCKET_ERROR; } { struct socket *aso = so->so_q; if (soqremque (aso, 1) == 0) panic("accept"); so = aso; } (void)soaccept (so, nam); #ifdef TRACE_DEBUG { struct sockaddr_in *sin; sin = mtod(nam, struct sockaddr_in *); INET_TRACE (INETM_SOCKET, ("INET:accept:done so %lx port %d addr %lx\n", so, sin->sin_port, sin->sin_addr.s_addr)); } #endif /* TRACE_INET */ /* return the addressing info in the passed structure */ if (addr != NULL) MEMCPY(addr, nam->m_data, *addrlen); m_freem (nam); UNLOCK_NET_RESOURCE(NET_RESID); SOC_RANGE(so); return SO2LONG(so); }
int do_sys_accept(struct lwp *l, int sock, struct mbuf **name, register_t *new_sock, const sigset_t *mask, int flags, int clrflags) { file_t *fp, *fp2; struct mbuf *nam; int error, fd; struct socket *so, *so2; short wakeup_state = 0; if ((fp = fd_getfile(sock)) == NULL) return EBADF; if (fp->f_type != DTYPE_SOCKET) { fd_putfile(sock); return ENOTSOCK; } if ((error = fd_allocfile(&fp2, &fd)) != 0) { fd_putfile(sock); return error; } nam = m_get(M_WAIT, MT_SONAME); *new_sock = fd; so = fp->f_socket; solock(so); if (__predict_false(mask)) sigsuspendsetup(l, mask); if (!(so->so_proto->pr_flags & PR_LISTEN)) { error = EOPNOTSUPP; goto bad; } if ((so->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto bad; } if ((so->so_state & SS_NBIO) && so->so_qlen == 0) { error = EWOULDBLOCK; goto bad; } while (so->so_qlen == 0 && so->so_error == 0) { if (so->so_state & SS_CANTRCVMORE) { so->so_error = ECONNABORTED; break; } if (wakeup_state & SS_RESTARTSYS) { error = ERESTART; goto bad; } error = sowait(so, true, 0); if (error) { goto bad; } wakeup_state = so->so_state; } if (so->so_error) { error = so->so_error; so->so_error = 0; goto bad; } /* connection has been removed from the listen queue */ KNOTE(&so->so_rcv.sb_sel.sel_klist, NOTE_SUBMIT); so2 = TAILQ_FIRST(&so->so_q); if (soqremque(so2, 1) == 0) panic("accept"); fp2->f_type = DTYPE_SOCKET; fp2->f_flag = (fp->f_flag & ~clrflags) | ((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)| ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0); fp2->f_ops = &socketops; fp2->f_socket = so2; if (fp2->f_flag & FNONBLOCK) so2->so_state |= SS_NBIO; else so2->so_state &= ~SS_NBIO; error = soaccept(so2, nam); so2->so_cred = kauth_cred_dup(so->so_cred); sounlock(so); if (error) { /* an error occurred, free the file descriptor and mbuf */ m_freem(nam); mutex_enter(&fp2->f_lock); fp2->f_count++; mutex_exit(&fp2->f_lock); closef(fp2); fd_abort(curproc, NULL, fd); } else { fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0); fd_affix(curproc, fp2, fd); *name = nam; } fd_putfile(sock); if (__predict_false(mask)) sigsuspendteardown(l); return error; bad: sounlock(so); m_freem(nam); fd_putfile(sock); fd_abort(curproc, fp2, fd); if (__predict_false(mask)) sigsuspendteardown(l); return error; }
static int bsd_accept ( cyg_file *fp, cyg_file *new_fp, struct sockaddr *name, socklen_t *anamelen ) { struct mbuf *nam; socklen_t namelen = 0; int error = 0, s; register struct socket *so; if( anamelen != NULL ) namelen = *anamelen; s = splsoftnet(); so = (struct socket *)fp->f_data; if ((so->so_options & SO_ACCEPTCONN) == 0) { splx(s); return (EINVAL); } if ((so->so_state & SS_NBIO) && so->so_qlen == 0) { splx(s); return (EWOULDBLOCK); } while (so->so_qlen == 0 && so->so_error == 0) { if (so->so_state & SS_CANTRCVMORE) { so->so_error = ECONNABORTED; break; } error = tsleep((caddr_t)&so->so_timeo, PSOCK | PCATCH, netcon, 0); if (error) { splx(s); return (error); } } if (so->so_error) { error = so->so_error; so->so_error = 0; splx(s); return (error); } { struct socket *aso = so->so_q; if (soqremque(aso, 1) == 0) panic("accept"); so = aso; } cyg_selinit(&so->so_rcv.sb_sel); cyg_selinit(&so->so_snd.sb_sel); new_fp->f_type = DTYPE_SOCKET; new_fp->f_flag |= FREAD|FWRITE; new_fp->f_offset = 0; new_fp->f_ops = &bsd_sock_fileops; new_fp->f_data = (CYG_ADDRWORD)so; new_fp->f_xops = (CYG_ADDRWORD)&bsd_sockops; nam = m_get(M_WAIT, MT_SONAME); (void) soaccept(so, nam); if (name) { if (namelen > nam->m_len) namelen = nam->m_len; /* SHOULD COPY OUT A CHAIN HERE */ if ((error = copyout(mtod(nam, caddr_t), (caddr_t)name, namelen)) == 0) *anamelen = namelen; } m_freem(nam); splx(s); return (error); }
int sys_accept(struct proc *p, void *v, register_t *retval) { struct sys_accept_args /* { syscallarg(int) s; syscallarg(struct sockaddr *) name; syscallarg(socklen_t *) anamelen; } */ *uap = v; struct file *fp, *headfp; struct mbuf *nam; socklen_t namelen; int error, s, tmpfd; struct socket *head, *so; int nflag; if (SCARG(uap, name) && (error = copyin(SCARG(uap, anamelen), &namelen, sizeof (namelen)))) return (error); if ((error = getsock(p->p_fd, SCARG(uap, s), &fp)) != 0) return (error); headfp = fp; s = splsoftnet(); head = fp->f_data; redo: if ((head->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto bad; } if ((head->so_state & SS_NBIO) && head->so_qlen == 0) { if (head->so_state & SS_CANTRCVMORE) error = ECONNABORTED; else error = EWOULDBLOCK; goto bad; } while (head->so_qlen == 0 && head->so_error == 0) { if (head->so_state & SS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } error = tsleep(&head->so_timeo, PSOCK | PCATCH, "netcon", 0); if (error) { goto bad; } } if (head->so_error) { error = head->so_error; head->so_error = 0; goto bad; } /* Take note if socket was non-blocking. */ nflag = (headfp->f_flag & FNONBLOCK); fdplock(p->p_fd); error = falloc(p, &fp, &tmpfd); fdpunlock(p->p_fd); if (error != 0) { /* * Probably ran out of file descriptors. Wakeup * so some other process might have a chance at it. */ wakeup_one(&head->so_timeo); goto bad; } nam = m_get(M_WAIT, MT_SONAME); /* * Check whether the queue emptied while we slept: falloc() or * m_get() may have blocked, allowing the connection to be reset * or another thread or process to accept it. If so, start over. */ if (head->so_qlen == 0) { m_freem(nam); fdplock(p->p_fd); fdremove(p->p_fd, tmpfd); closef(fp, p); fdpunlock(p->p_fd); goto redo; } /* * Do not sleep after we have taken the socket out of the queue. */ so = TAILQ_FIRST(&head->so_q); if (soqremque(so, 1) == 0) panic("accept"); /* connection has been removed from the listen queue */ KNOTE(&head->so_rcv.sb_sel.si_note, 0); fp->f_type = DTYPE_SOCKET; fp->f_flag = FREAD | FWRITE | nflag; fp->f_ops = &socketops; fp->f_data = so; error = soaccept(so, nam); if (!error && SCARG(uap, name)) { error = copyaddrout(p, nam, SCARG(uap, name), namelen, SCARG(uap, anamelen)); } if (error) { /* if an error occurred, free the file descriptor */ fdplock(p->p_fd); fdremove(p->p_fd, tmpfd); closef(fp, p); fdpunlock(p->p_fd); } else { FILE_SET_MATURE(fp, p); *retval = tmpfd; } m_freem(nam); bad: splx(s); FRELE(headfp, p); return (error); }
int accept (int s, struct sockaddr *name, int *namelen) { int fd; struct socket *head, *so; struct mbuf *nam; rtems_bsdnet_semaphore_obtain (); if ((head = rtems_bsdnet_fdToSocket (s)) == NULL) { rtems_bsdnet_semaphore_release (); return -1; } if ((head->so_options & SO_ACCEPTCONN) == 0) { errno = EINVAL; rtems_bsdnet_semaphore_release (); return -1; } if ((head->so_state & SS_NBIO) && head->so_comp.tqh_first == NULL) { errno = EWOULDBLOCK; rtems_bsdnet_semaphore_release (); return -1; } while (head->so_comp.tqh_first == NULL && head->so_error == 0) { if (head->so_state & SS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } head->so_error = soconnsleep (head); } if (head->so_error) { errno = head->so_error; head->so_error = 0; rtems_bsdnet_semaphore_release (); return -1; } so = head->so_comp.tqh_first; TAILQ_REMOVE(&head->so_comp, so, so_list); head->so_qlen--; fd = rtems_bsdnet_makeFdForSocket (so); if (fd < 0) { TAILQ_INSERT_HEAD(&head->so_comp, so, so_list); head->so_qlen++; soconnwakeup (head); rtems_bsdnet_semaphore_release (); return -1; } so->so_state &= ~SS_COMP; so->so_head = NULL; nam = m_get(M_WAIT, MT_SONAME); (void) soaccept(so, nam); if (name) { /* check length before it is destroyed */ if (*namelen > nam->m_len) *namelen = nam->m_len; memcpy (name, mtod(nam, caddr_t), *namelen); } m_freem(nam); rtems_bsdnet_semaphore_release (); return (fd); }