Exemple #1
0
/*
 * Drop data from (the front of) a sockbuf.  If the current record is
 * exhausted this routine will move onto the next one and continue dropping
 * data.
 */
void
sbdrop(struct sockbuf *sb, int len)
{
	struct mbuf *m;
	struct mbuf *free_chain = NULL;

	sbcheck(sb);
	crit_enter();

	m = sb->sb_mb;
	while (m && len > 0) {
		if (m->m_len > len) {
			m->m_len -= len;
			m->m_data += len;
			sb->sb_cc -= len;
			atomic_subtract_long(&sb->sb_cc_prealloc, len);
			break;
		}
		len -= m->m_len;
		m = sbunlinkmbuf(sb, m, &free_chain);
		if (m == NULL && len)
			m = sb->sb_mb;
	}

	/*
	 * Remove any trailing 0-length mbufs in the current record.  If
	 * the last record for which data was removed is now empty, m will be
	 * NULL.
	 */
	while (m && m->m_len == 0) {
		m = sbunlinkmbuf(sb, m, &free_chain);
	}
	crit_exit();
	if (free_chain)
		m_freem(free_chain);
	sbcheck(sb);
}
Exemple #2
0
/*
 * Implement receive operations on a socket.
 *
 * We depend on the way that records are added to the signalsockbuf
 * by sbappend*.  In particular, each record (mbufs linked through m_next)
 * must begin with an address if the protocol so specifies,
 * followed by an optional mbuf or mbufs containing ancillary data,
 * and then zero or more mbufs of data.
 *
 * Although the signalsockbuf is locked, new data may still be appended.
 * A token inside the ssb_lock deals with MP issues and still allows
 * the network to access the socket if we block in a uio.
 *
 * The caller may receive the data as a single mbuf chain by supplying
 * an mbuf **mp0 for use in returning the chain.  The uio is then used
 * only for the count in uio_resid.
 */
int
soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
	  struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
{
	struct mbuf *m, *n;
	struct mbuf *free_chain = NULL;
	int flags, len, error, offset;
	struct protosw *pr = so->so_proto;
	int moff, type = 0;
	size_t resid, orig_resid;

	if (uio)
		resid = uio->uio_resid;
	else
		resid = (size_t)(sio->sb_climit - sio->sb_cc);
	orig_resid = resid;

	if (psa)
		*psa = NULL;
	if (controlp)
		*controlp = NULL;
	if (flagsp)
		flags = *flagsp &~ MSG_EOR;
	else
		flags = 0;
	if (flags & MSG_OOB) {
		m = m_get(MB_WAIT, MT_DATA);
		if (m == NULL)
			return (ENOBUFS);
		error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
		if (error)
			goto bad;
		if (sio) {
			do {
				sbappend(sio, m);
				KKASSERT(resid >= (size_t)m->m_len);
				resid -= (size_t)m->m_len;
			} while (resid > 0 && m);
		} else {
			do {
				uio->uio_resid = resid;
				error = uiomove(mtod(m, caddr_t),
						(int)szmin(resid, m->m_len),
						uio);
				resid = uio->uio_resid;
				m = m_free(m);
			} while (uio->uio_resid && error == 0 && m);
		}
bad:
		if (m)
			m_freem(m);
		return (error);
	}
	if ((so->so_state & SS_ISCONFIRMING) && resid)
		so_pru_rcvd(so, 0);

	/*
	 * The token interlocks against the protocol thread while
	 * ssb_lock is a blocking lock against other userland entities.
	 */
	lwkt_gettoken(&so->so_rcv.ssb_token);
restart:
	error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
	if (error)
		goto done;

	m = so->so_rcv.ssb_mb;
	/*
	 * If we have less data than requested, block awaiting more
	 * (subject to any timeout) if:
	 *   1. the current count is less than the low water mark, or
	 *   2. MSG_WAITALL is set, and it is possible to do the entire
	 *	receive operation at once if we block (resid <= hiwat).
	 *   3. MSG_DONTWAIT is not set
	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
	 * we have to do the receive in sections, and thus risk returning
	 * a short count if a timeout or signal occurs after we start.
	 */
	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
	    (size_t)so->so_rcv.ssb_cc < resid) &&
	    (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
	    ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) &&
	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
		KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
		if (so->so_error) {
			if (m)
				goto dontblock;
			error = so->so_error;
			if ((flags & MSG_PEEK) == 0)
				so->so_error = 0;
			goto release;
		}
		if (so->so_state & SS_CANTRCVMORE) {
			if (m)
				goto dontblock;
			else
				goto release;
		}
		for (; m; m = m->m_next) {
			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
				m = so->so_rcv.ssb_mb;
				goto dontblock;
			}
		}
		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
		    (pr->pr_flags & PR_CONNREQUIRED)) {
			error = ENOTCONN;
			goto release;
		}
		if (resid == 0)
			goto release;
		if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
			error = EWOULDBLOCK;
			goto release;
		}
		ssb_unlock(&so->so_rcv);
		error = ssb_wait(&so->so_rcv);
		if (error)
			goto done;
		goto restart;
	}
dontblock:
	if (uio && uio->uio_td && uio->uio_td->td_proc)
		uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;

	/*
	 * note: m should be == sb_mb here.  Cache the next record while
	 * cleaning up.  Note that calling m_free*() will break out critical
	 * section.
	 */
	KKASSERT(m == so->so_rcv.ssb_mb);

	/*
	 * Skip any address mbufs prepending the record.
	 */
	if (pr->pr_flags & PR_ADDR) {
		KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
		orig_resid = 0;
		if (psa)
			*psa = dup_sockaddr(mtod(m, struct sockaddr *));
		if (flags & MSG_PEEK)
			m = m->m_next;
		else
			m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
	}