void so_pru_send_async(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr0, struct mbuf *control, struct thread *td) { struct netmsg_pru_send *msg; struct sockaddr *addr = NULL; KASSERT(so->so_proto->pr_flags & PR_ASYNC_SEND, ("async pru_send is not supported")); if (addr0 != NULL) { addr = kmalloc(addr0->sa_len, M_SONAME, M_WAITOK | M_NULLOK); if (addr == NULL) { /* * Fail to allocate address; fallback to * synchronized pru_send. */ so_pru_send(so, flags, m, addr0, control, td); return; } memcpy(addr, addr0, addr0->sa_len); flags |= PRUS_FREEADDR; } flags |= PRUS_NOREPLY; if (td != NULL && (so->so_proto->pr_flags & PR_ASEND_HOLDTD)) { lwkt_hold(td); flags |= PRUS_HELDTD; } msg = &m->m_hdr.mh_sndmsg; netmsg_init(&msg->base, so, &netisr_apanic_rport, 0, so->so_proto->pr_usrreqs->pru_send); msg->nm_flags = flags; msg->nm_m = m; msg->nm_addr = addr; msg->nm_control = control; msg->nm_td = td; lwkt_sendmsg(so->so_port, &msg->base.lmsg); }
/* * A specialization of sosend() for UDP based on protocol-specific knowledge: * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that * sosendallatonce() returns true, * the "atomic" variable is true, * and sosendudp() blocks until space is available for the entire send. * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or * PR_IMPLOPCL flags set. * UDP has no out-of-band data. * UDP has no control data. * UDP does not support MSG_EOR. */ int sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { boolean_t dontroute; /* temporary SO_DONTROUTE setting */ size_t resid; int error; int space; if (td->td_lwp != NULL) td->td_lwp->lwp_ru.ru_msgsnd++; if (control) m_freem(control); KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; restart: error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; if (so->so_state & SS_CANTSENDMORE) gotoerr(EPIPE); if (so->so_error) { error = so->so_error; so->so_error = 0; goto release; } if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) gotoerr(EDESTADDRREQ); if (resid > so->so_snd.ssb_hiwat) gotoerr(EMSGSIZE); space = ssb_space(&so->so_snd); if (uio && (space < 0 || (size_t)space < resid)) { if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) gotoerr(EWOULDBLOCK); ssb_unlock(&so->so_snd); error = ssb_wait(&so->so_snd); if (error) goto out; goto restart; } if (uio) { top = m_uiomove(uio); if (top == NULL) goto release; } dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE); if (dontroute) so->so_options |= SO_DONTROUTE; error = so_pru_send(so, 0, top, addr, NULL, td); top = NULL; /* sent or freed in lower layer */ if (dontroute) so->so_options &= ~SO_DONTROUTE; release: ssb_unlock(&so->so_snd); out: if (top) m_freem(top); return (error); }
/* * Send on a socket. * If send must go all at once and message is larger than * send buffering, then hard error. * Lock against other senders. * If must go all at once and not enough room now, then * inform user that this would block and do nothing. * Otherwise, if nonblocking, send as much as possible. * The data to be sent is described by "uio" if nonzero, * otherwise by the mbuf chain "top" (which must be null * if uio is not). Data provided in mbuf chain must be small * enough to send all at once. * * Returns nonzero on error, timeout or signal; callers * must check for short counts if EINTR/ERESTART are returned. * Data and control buffers are freed on return. */ int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags, struct thread *td) { struct mbuf **mp; struct mbuf *m; size_t resid; int space, len; int clen = 0, error, dontroute, mlen; int atomic = sosendallatonce(so) || top; int pru_flags; if (uio) { resid = uio->uio_resid; } else { resid = (size_t)top->m_pkthdr.len; #ifdef INVARIANTS len = 0; for (m = top; m; m = m->m_next) len += m->m_len; KKASSERT(top->m_pkthdr.len == len); #endif } /* * WARNING! resid is unsigned, space and len are signed. space * can wind up negative if the sockbuf is overcommitted. * * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM * type sockets since that's an error. */ if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { error = EINVAL; goto out; } dontroute = (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && (so->so_proto->pr_flags & PR_ATOMIC); if (td->td_lwp != NULL) td->td_lwp->lwp_ru.ru_msgsnd++; if (control) clen = control->m_len; #define gotoerr(errcode) { error = errcode; goto release; } restart: error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); if (error) goto out; do { if (so->so_state & SS_CANTSENDMORE) gotoerr(EPIPE); if (so->so_error) { error = so->so_error; so->so_error = 0; goto release; } if ((so->so_state & SS_ISCONNECTED) == 0) { /* * `sendto' and `sendmsg' is allowed on a connection- * based socket if it supports implied connect. * Return ENOTCONN if not connected and no address is * supplied. */ if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { if ((so->so_state & SS_ISCONFIRMING) == 0 && !(resid == 0 && clen != 0)) gotoerr(ENOTCONN); } else if (addr == 0) gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? ENOTCONN : EDESTADDRREQ); } if ((atomic && resid > so->so_snd.ssb_hiwat) || clen > so->so_snd.ssb_hiwat) { gotoerr(EMSGSIZE); } space = ssb_space(&so->so_snd); if (flags & MSG_OOB) space += 1024; if ((space < 0 || (size_t)space < resid + clen) && uio && (atomic || space < so->so_snd.ssb_lowat || space < clen)) { if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) gotoerr(EWOULDBLOCK); ssb_unlock(&so->so_snd); error = ssb_wait(&so->so_snd); if (error) goto out; goto restart; } mp = ⊤ space -= clen; do { if (uio == NULL) { /* * Data is prepackaged in "top". */ resid = 0; if (flags & MSG_EOR) top->m_flags |= M_EOR; } else do { if (resid > INT_MAX) resid = INT_MAX; m = m_getl((int)resid, MB_WAIT, MT_DATA, top == NULL ? M_PKTHDR : 0, &mlen); if (top == NULL) { m->m_pkthdr.len = 0; m->m_pkthdr.rcvif = NULL; } len = imin((int)szmin(mlen, resid), space); if (resid < MINCLSIZE) { /* * For datagram protocols, leave room * for protocol headers in first mbuf. */ if (atomic && top == 0 && len < mlen) MH_ALIGN(m, len); } space -= len; error = uiomove(mtod(m, caddr_t), (size_t)len, uio); resid = uio->uio_resid; m->m_len = len; *mp = m; top->m_pkthdr.len += len; if (error) goto release; mp = &m->m_next; if (resid == 0) { if (flags & MSG_EOR) top->m_flags |= M_EOR; break; } } while (space > 0 && atomic); if (dontroute) so->so_options |= SO_DONTROUTE; if (flags & MSG_OOB) { pru_flags = PRUS_OOB; } else if ((flags & MSG_EOF) && (so->so_proto->pr_flags & PR_IMPLOPCL) && (resid == 0)) { /* * If the user set MSG_EOF, the protocol * understands this flag and nothing left to * send then use PRU_SEND_EOF instead of PRU_SEND. */ pru_flags = PRUS_EOF; } else if (resid > 0 && space > 0) { /* If there is more to send, set PRUS_MORETOCOME */ pru_flags = PRUS_MORETOCOME; } else { pru_flags = 0; } /* * XXX all the SS_CANTSENDMORE checks previously * done could be out of date. We could have recieved * a reset packet in an interrupt or maybe we slept * while doing page faults in uiomove() etc. We could * probably recheck again inside the splnet() protection * here, but there are probably other places that this * also happens. We must rethink this. */ error = so_pru_send(so, pru_flags, top, addr, control, td); if (dontroute) so->so_options &= ~SO_DONTROUTE; clen = 0; control = 0; top = NULL; mp = ⊤ if (error) goto release; } while (resid && space > 0); } while (resid); release: ssb_unlock(&so->so_snd); out: if (top) m_freem(top); if (control) m_freem(control); return (error); }