示例#1
0
/*
 * Append mbuf chain m to the last record in the
 * socket buffer sb.  The additional space associated
 * the mbuf chain is recorded in sb.  Empty mbufs are
 * discarded and mbufs are compacted where possible.
 */
void
sbappend(struct sockbuf *sb, struct mbuf *m)
{
	struct mbuf	*n;

	KASSERT(solocked(sb->sb_so));

	if (m == 0)
		return;

#ifdef MBUFTRACE
	m_claimm(m, sb->sb_mowner);
#endif

	SBLASTRECORDCHK(sb, "sbappend 1");

	if ((n = sb->sb_lastrecord) != NULL) {
		/*
		 * XXX Would like to simply use sb_mbtail here, but
		 * XXX I need to verify that I won't miss an EOR that
		 * XXX way.
		 */
		do {
			if (n->m_flags & M_EOR) {
				sbappendrecord(sb, m); /* XXXXXX!!!! */
				return;
			}
		} while (n->m_next && (n = n->m_next));
	} else {
		/*
		 * If this is the first record in the socket buffer, it's
		 * also the last record.
		 */
		sb->sb_lastrecord = m;
	}
	sbcompress(sb, m, n);
	SBLASTRECORDCHK(sb, "sbappend 2");
}
示例#2
0
/*
 * This version of sbappend() should only be used when the caller
 * absolutely knows that there will never be more than one record
 * in the socket buffer, that is, a stream protocol (such as TCP).
 */
void
sbappendstream(struct sockbuf *sb, struct mbuf *m)
{

	KDASSERT(m->m_nextpkt == NULL);
	KASSERT(sb->sb_mb == sb->sb_lastrecord);

	SBLASTMBUFCHK(sb, __func__);

	sbcompress(sb, m, sb->sb_mbtail);

	sb->sb_lastrecord = sb->sb_mb;
	SBLASTRECORDCHK(sb, __func__);
}
示例#3
0
/*
 * This version of sbappend() should only be used when the caller absolutely
 * knows that there will never be more than one record in the socket buffer,
 * that is, a stream protocol (such as TCP).
 */
void
sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
{
	SOCKBUF_LOCK_ASSERT(sb);

	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));

	SBLASTMBUFCHK(sb);

	sbcompress(sb, m, sb->sb_mbtail);

	sb->sb_lastrecord = sb->sb_mb;
	SBLASTRECORDCHK(sb);
}
示例#4
0
/*
 * As above, except the mbuf chain
 * begins a new record.
 */
void
sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
{
	struct mbuf *m;

	if (m0 == NULL)
		return;

	/*
	 * Put the first mbuf on the queue.
	 * Note this permits zero length records.
	 */
	sballoc(sb, m0);
	SBLASTRECORDCHK(sb, "sbappendrecord 1");
	SBLINKRECORD(sb, m0);
	m = m0->m_next;
	m0->m_next = NULL;
	if (m && (m0->m_flags & M_EOR)) {
		m0->m_flags &= ~M_EOR;
		m->m_flags |= M_EOR;
	}
	sbcompress(sb, m, m0);
	SBLASTRECORDCHK(sb, "sbappendrecord 2");
}
示例#5
0
/* Helper routine that appends data, control, and address to a sockbuf. */
static int
sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa,
    struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last)
{
	struct mbuf *m, *n, *nlast;
#if MSIZE <= 256
	if (asa->sa_len > MLEN)
		return (0);
#endif
	m = m_get(M_NOWAIT, MT_SONAME);
	if (m == NULL)
		return (0);
	m->m_len = asa->sa_len;
	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
	if (m0) {
		m_clrprotoflags(m0);
		m_tag_delete_chain(m0, NULL);
		/*
		 * Clear some persistent info from pkthdr.
		 * We don't use m_demote(), because some netgraph consumers
		 * expect M_PKTHDR presence.
		 */
		m0->m_pkthdr.rcvif = NULL;
		m0->m_pkthdr.flowid = 0;
		m0->m_pkthdr.csum_flags = 0;
		m0->m_pkthdr.fibnum = 0;
		m0->m_pkthdr.rsstype = 0;
	}
	if (ctrl_last)
		ctrl_last->m_next = m0;	/* concatenate data to control */
	else
		control = m0;
	m->m_next = control;
	for (n = m; n->m_next != NULL; n = n->m_next)
		sballoc(sb, n);
	sballoc(sb, n);
	nlast = n;
	SBLINKRECORD(sb, m);

	sb->sb_mbtail = nlast;
	SBLASTMBUFCHK(sb);

	SBLASTRECORDCHK(sb);
	return (1);
}
/*
 * Append address and data, and optionally, control (ancillary) data to the
 * receive queue of a socket.  If present, m0 must include a packet header
 * with total length.  Returns 0 if no space in sockbuf or insufficient
 * mbufs.
 */
int
sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
    struct mbuf *m0, struct mbuf *control)
{
	struct mbuf *m, *n, *nlast;
	int space = asa->sa_len;

	SOCKBUF_LOCK_ASSERT(sb);

	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
		panic("sbappendaddr_locked");
	if (m0)
		space += m0->m_pkthdr.len;
	space += m_length(control, &n);

	if (space > sbspace(sb))
		return (0);
#if MSIZE <= 256
	if (asa->sa_len > MLEN)
		return (0);
#endif
	MGET(m, M_DONTWAIT, MT_SONAME);
	if (m == 0)
		return (0);
	m->m_len = asa->sa_len;
	bcopy(asa, mtod(m, caddr_t), asa->sa_len);
	if (n){
		CHECK_ADD_LINKCNT(n, m0, NULL, "sbappendaddr_locked");
		n->m_next = m0;		/* concatenate data to control */
	}else
		control = m0;
	CHECK_ADD_LINKCNT(m, control, NULL, "sbappendaddr_locked");	
	m->m_next = control;
	for (n = m; n->m_next != NULL; n = n->m_next)
		sballoc(sb, n);
	sballoc(sb, n);
	nlast = n;
	SBLINKRECORD(sb, m);

	sb->sb_mbtail = nlast;
	SBLASTMBUFCHK(sb);

	SBLASTRECORDCHK(sb);
	return (1);
}
示例#7
0
/*
 * This version of sbappend() should only be used when the caller absolutely
 * knows that there will never be more than one record in the socket buffer,
 * that is, a stream protocol (such as TCP).
 */
void
sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
{
	SOCKBUF_LOCK_ASSERT(sb);

	KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
	KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));

	SBLASTMBUFCHK(sb);

	/* Remove all packet headers and mbuf tags to get a pure data chain. */
	m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);

	sbcompress(sb, m, sb->sb_mbtail);

	sb->sb_lastrecord = sb->sb_mb;
	SBLASTRECORDCHK(sb);
}
示例#8
0
/*
 * This version of sbappend() should only be used when the caller
 * absolutely knows that there will never be more than one record
 * in the socket buffer, that is, a stream protocol (such as TCP).
 */
void
sbappendstream(struct sockbuf *sb, struct mbuf *m)
{

	KASSERT(solocked(sb->sb_so));
	KDASSERT(m->m_nextpkt == NULL);
	KASSERT(sb->sb_mb == sb->sb_lastrecord);

	SBLASTMBUFCHK(sb, __func__);

#ifdef MBUFTRACE
	m_claimm(m, sb->sb_mowner);
#endif
	service_reflect_tx_socket_buf(sb->sb_so, m->m_len);		
	sbcompress(sb, m, sb->sb_mbtail);

	sb->sb_lastrecord = sb->sb_mb;
	SBLASTRECORDCHK(sb, __func__);
}
示例#9
0
/*
 * Append address and data, and optionally, control (ancillary) data
 * to the receive queue of a socket.  If present,
 * m0 must include a packet header with total length.
 * Returns 0 if no space in sockbuf or insufficient mbufs.
 */
int
sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, struct mbuf *m0,
	struct mbuf *control)
{
	struct mbuf	*m, *n, *nlast;
	int		space, len;

	KASSERT(solocked(sb->sb_so));

	space = asa->sa_len;

	if (m0 != NULL) {
		if ((m0->m_flags & M_PKTHDR) == 0)
			panic("sbappendaddr");
		space += m0->m_pkthdr.len;
#ifdef MBUFTRACE
		m_claimm(m0, sb->sb_mowner);
#endif
	}
	for (n = control; n; n = n->m_next) {
		space += n->m_len;
		MCLAIM(n, sb->sb_mowner);
		if (n->m_next == 0)	/* keep pointer to last control buf */
			break;
	}
	if (space > sbspace(sb))
		return (0);
	MGET(m, M_DONTWAIT, MT_SONAME);
	if (m == 0)
		return (0);
	MCLAIM(m, sb->sb_mowner);
	/*
	 * XXX avoid 'comparison always true' warning which isn't easily
	 * avoided.
	 */
	len = asa->sa_len;
	if (len > MLEN) {
		MEXTMALLOC(m, asa->sa_len, M_NOWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_free(m);
			return (0);
		}
	}
	m->m_len = asa->sa_len;
	memcpy(mtod(m, void *), asa, asa->sa_len);
	if (n)
		n->m_next = m0;		/* concatenate data to control */
	else
		control = m0;
	m->m_next = control;

	SBLASTRECORDCHK(sb, "sbappendaddr 1");

	for (n = m; n->m_next != NULL; n = n->m_next)
		sballoc(sb, n);
	sballoc(sb, n);
	nlast = n;
	SBLINKRECORD(sb, m);

	sb->sb_mbtail = nlast;
	SBLASTMBUFCHK(sb, "sbappendaddr");
	SBLASTRECORDCHK(sb, "sbappendaddr 2");

	return (1);
}
示例#10
0
int
sbappendaddrchain(struct sockbuf *sb, const struct sockaddr *asa,
		  struct mbuf *m0, int sbprio)
{
	int space;
	struct mbuf *m, *n, *n0, *nlast;
	int error;

	KASSERT(solocked(sb->sb_so));

	/*
	 * XXX sbprio reserved for encoding priority of this* request:
	 *  SB_PRIO_NONE --> honour normal sb limits
	 *  SB_PRIO_ONESHOT_OVERFLOW --> if socket has any space,
	 *	take whole chain. Intended for large requests
	 *      that should be delivered atomically (all, or none).
	 * SB_PRIO_OVERDRAFT -- allow a small (2*MLEN) overflow
	 *       over normal socket limits, for messages indicating
	 *       buffer overflow in earlier normal/lower-priority messages
	 * SB_PRIO_BESTEFFORT -->  ignore limits entirely.
	 *       Intended for  kernel-generated messages only.
	 *        Up to generator to avoid total mbuf resource exhaustion.
	 */
	(void)sbprio;

	if (m0 && (m0->m_flags & M_PKTHDR) == 0)
		panic("sbappendaddrchain");

	space = sbspace(sb);

#ifdef notyet
	/*
	 * Enforce SB_PRIO_* limits as described above.
	 */
#endif

	n0 = NULL;
	nlast = NULL;
	for (m = m0; m; m = m->m_nextpkt) {
		struct mbuf *np;

#ifdef MBUFTRACE
		m_claimm(m, sb->sb_mowner);
#endif

		/* Prepend sockaddr to this record (m) of input chain m0 */
	  	n = m_prepend_sockaddr(sb, m, asa);
		if (n == NULL) {
			error = ENOBUFS;
			goto bad;
		}

		/* Append record (asa+m) to end of new chain n0 */
		if (n0 == NULL) {
			n0 = n;
		} else {
			nlast->m_nextpkt = n;
		}
		/* Keep track of last record on new chain */
		nlast = n;

		for (np = n; np; np = np->m_next)
			sballoc(sb, np);
	}

	SBLASTRECORDCHK(sb, "sbappendaddrchain 1");

	/* Drop the entire chain of (asa+m) records onto the socket */
	SBLINKRECORDCHAIN(sb, n0, nlast);

	SBLASTRECORDCHK(sb, "sbappendaddrchain 2");

	for (m = nlast; m->m_next; m = m->m_next)
		;
	sb->sb_mbtail = m;
	SBLASTMBUFCHK(sb, "sbappendaddrchain");

	return (1);

bad:
	/*
	 * On error, free the prepended addreseses. For consistency
	 * with sbappendaddr(), leave it to our caller to free
	 * the input record chain passed to us as m0.
	 */
	while ((n = n0) != NULL) {
	  	struct mbuf *np;

		/* Undo the sballoc() of this record */
		for (np = n; np; np = np->m_next)
			sbfree(sb, np);

		n0 = n->m_nextpkt;	/* iterate at next prepended address */
		MFREE(n, np);		/* free prepended address (not data) */
	}
	return 0;
}
示例#11
0
static int
kttcp_soreceive(struct socket *so, unsigned long long slen,
    unsigned long long *done, struct lwp *l, int *flagsp)
{
	struct mbuf *m, **mp;
	int flags, len, error, offset, moff, type;
	long long orig_resid, resid;
	const struct protosw *pr;
	struct mbuf *nextrecord;

	pr = so->so_proto;
	mp = NULL;
	type = 0;
	resid = orig_resid = slen;
	if (flagsp)
		flags = *flagsp &~ MSG_EOR;
	else
 		flags = 0;
	if (flags & MSG_OOB) {
		m = m_get(M_WAIT, MT_DATA);
		solock(so);
		error = (*pr->pr_usrreqs->pr_recvoob)(so, m, flags & MSG_PEEK);
		sounlock(so);
		if (error)
			goto bad;
		do {
			resid -= min(resid, m->m_len);
			m = m_free(m);
		} while (resid && error == 0 && m);
 bad:
		if (m)
			m_freem(m);
		return (error);
	}
	if (mp)
		*mp = NULL;
	solock(so);
 restart:
	if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0)
		return (error);
	m = so->so_rcv.sb_mb;
	/*
	 * If we have less data than requested, block awaiting more
	 * (subject to any timeout) if:
	 *   1. the current count is less than the low water mark,
	 *   2. MSG_WAITALL is set, and it is possible to do the entire
	 *	receive operation at once if we block (resid <= hiwat), or
	 *   3. MSG_DONTWAIT is not set.
	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
	 * we have to do the receive in sections, and thus risk returning
	 * a short count if a timeout or signal occurs after we start.
	 */
	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
	    so->so_rcv.sb_cc < resid) &&
	    (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
	    ((flags & MSG_WAITALL) && resid <= so->so_rcv.sb_hiwat)) &&
	    m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
#ifdef DIAGNOSTIC
		if (m == NULL && so->so_rcv.sb_cc)
			panic("receive 1");
#endif
		if (so->so_error) {
			if (m)
				goto dontblock;
			error = so->so_error;
			if ((flags & MSG_PEEK) == 0)
				so->so_error = 0;
			goto release;
		}
		if (so->so_state & SS_CANTRCVMORE) {
			if (m)
				goto dontblock;
			else
				goto release;
		}
		for (; m; m = m->m_next)
			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
				m = so->so_rcv.sb_mb;
				goto dontblock;
			}
		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
		    (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
			error = ENOTCONN;
			goto release;
		}
		if (resid == 0)
			goto release;
		if ((so->so_state & SS_NBIO) ||
		    (flags & (MSG_DONTWAIT|MSG_NBIO))) {
			error = EWOULDBLOCK;
			goto release;
		}
		sbunlock(&so->so_rcv);
		error = sbwait(&so->so_rcv);
		if (error) {
			sounlock(so);
			return (error);
		}
		goto restart;
	}
 dontblock:
	/*
	 * On entry here, m points to the first record of the socket buffer.
	 * While we process the initial mbufs containing address and control
	 * info, we save a copy of m->m_nextpkt into nextrecord.
	 */
#ifdef notyet /* XXXX */
	if (uio->uio_lwp)
		uio->uio_lwp->l_ru.ru_msgrcv++;
#endif
	KASSERT(m == so->so_rcv.sb_mb);
	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 1");
	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 1");
	nextrecord = m->m_nextpkt;
	if (pr->pr_flags & PR_ADDR) {
#ifdef DIAGNOSTIC
		if (m->m_type != MT_SONAME)
			panic("receive 1a");
#endif
		orig_resid = 0;
		if (flags & MSG_PEEK) {
			m = m->m_next;
		} else {
			sbfree(&so->so_rcv, m);
			MFREE(m, so->so_rcv.sb_mb);
			m = so->so_rcv.sb_mb;
		}
	}
	while (m && m->m_type == MT_CONTROL && error == 0) {
		if (flags & MSG_PEEK) {
			m = m->m_next;
		} else {
			sbfree(&so->so_rcv, m);
			MFREE(m, so->so_rcv.sb_mb);
			m = so->so_rcv.sb_mb;
		}
	}

	/*
	 * If m is non-NULL, we have some data to read.  From now on,
	 * make sure to keep sb_lastrecord consistent when working on
	 * the last packet on the chain (nextrecord == NULL) and we
	 * change m->m_nextpkt.
	 */
	if (m) {
		if ((flags & MSG_PEEK) == 0) {
			m->m_nextpkt = nextrecord;
			/*
			 * If nextrecord == NULL (this is a single chain),
			 * then sb_lastrecord may not be valid here if m
			 * was changed earlier.
			 */
			if (nextrecord == NULL) {
				KASSERT(so->so_rcv.sb_mb == m);
				so->so_rcv.sb_lastrecord = m;
			}
		}
		type = m->m_type;
		if (type == MT_OOBDATA)
			flags |= MSG_OOB;
	} else {
		if ((flags & MSG_PEEK) == 0) {
			KASSERT(so->so_rcv.sb_mb == m);
			so->so_rcv.sb_mb = nextrecord;
			SB_EMPTY_FIXUP(&so->so_rcv);
		}
	}
	SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 2");
	SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 2");

	moff = 0;
	offset = 0;
	while (m && resid > 0 && error == 0) {
		if (m->m_type == MT_OOBDATA) {
			if (type != MT_OOBDATA)
				break;
		} else if (type == MT_OOBDATA)
			break;
#ifdef DIAGNOSTIC
		else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
			panic("receive 3");
#endif
		so->so_state &= ~SS_RCVATMARK;
		len = resid;
		if (so->so_oobmark && len > so->so_oobmark - offset)
			len = so->so_oobmark - offset;
		if (len > m->m_len - moff)
			len = m->m_len - moff;
		/*
		 * If mp is set, just pass back the mbufs.
		 * Otherwise copy them out via the uio, then free.
		 * Sockbuf must be consistent here (points to current mbuf,
		 * it points to next record) when we drop priority;
		 * we must note any additions to the sockbuf when we
		 * block interrupts again.
		 */
		resid -= len;
		if (len == m->m_len - moff) {
			if (m->m_flags & M_EOR)
				flags |= MSG_EOR;
			if (flags & MSG_PEEK) {
				m = m->m_next;
				moff = 0;
			} else {
				nextrecord = m->m_nextpkt;
				sbfree(&so->so_rcv, m);
				if (mp) {
					*mp = m;
					mp = &m->m_next;
					so->so_rcv.sb_mb = m = m->m_next;
					*mp = NULL;
				} else {
					MFREE(m, so->so_rcv.sb_mb);
					m = so->so_rcv.sb_mb;
				}
				/*
				 * If m != NULL, we also know that
				 * so->so_rcv.sb_mb != NULL.
				 */
				KASSERT(so->so_rcv.sb_mb == m);
				if (m) {
					m->m_nextpkt = nextrecord;
					if (nextrecord == NULL)
						so->so_rcv.sb_lastrecord = m;
				} else {
					so->so_rcv.sb_mb = nextrecord;
					SB_EMPTY_FIXUP(&so->so_rcv);
				}
				SBLASTRECORDCHK(&so->so_rcv,
				    "kttcp_soreceive 3");
				SBLASTMBUFCHK(&so->so_rcv,
				    "kttcp_soreceive 3");
			}
		} else {
			if (flags & MSG_PEEK)
				moff += len;
			else {
				if (mp) {
					sounlock(so);
					*mp = m_copym(m, 0, len, M_WAIT);
					solock(so);
				}
				m->m_data += len;
				m->m_len -= len;
				so->so_rcv.sb_cc -= len;
			}
		}
		if (so->so_oobmark) {
			if ((flags & MSG_PEEK) == 0) {
				so->so_oobmark -= len;
				if (so->so_oobmark == 0) {
					so->so_state |= SS_RCVATMARK;
					break;
				}
			} else {
				offset += len;
				if (offset == so->so_oobmark)
					break;
			}
		}
		if (flags & MSG_EOR)
			break;
		/*
		 * If the MSG_WAITALL flag is set (for non-atomic socket),
		 * we must not quit until "uio->uio_resid == 0" or an error
		 * termination.  If a signal/timeout occurs, return
		 * with a short count but without error.
		 * Keep sockbuf locked against other readers.
		 */
		while (flags & MSG_WAITALL && m == NULL && resid > 0 &&
		    !sosendallatonce(so) && !nextrecord) {
			if (so->so_error || so->so_state & SS_CANTRCVMORE)
				break;
			/*
			 * If we are peeking and the socket receive buffer is
			 * full, stop since we can't get more data to peek at.
			 */
			if ((flags & MSG_PEEK) && sbspace(&so->so_rcv) <= 0)
				break;
			/*
			 * If we've drained the socket buffer, tell the
			 * protocol in case it needs to do something to
			 * get it filled again.
			 */
			if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) {
				(*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
			}
			SBLASTRECORDCHK(&so->so_rcv,
			    "kttcp_soreceive sbwait 2");
			SBLASTMBUFCHK(&so->so_rcv,
			    "kttcp_soreceive sbwait 2");
			error = sbwait(&so->so_rcv);
			if (error) {
				sbunlock(&so->so_rcv);
				sounlock(so);
				return (0);
			}
			if ((m = so->so_rcv.sb_mb) != NULL)
				nextrecord = m->m_nextpkt;
		}
	}

	if (m && pr->pr_flags & PR_ATOMIC) {
		flags |= MSG_TRUNC;
		if ((flags & MSG_PEEK) == 0)
			(void) sbdroprecord(&so->so_rcv);
	}
	if ((flags & MSG_PEEK) == 0) {
		if (m == NULL) {
			/*
			 * First part is an SB_EMPTY_FIXUP().  Second part
			 * makes sure sb_lastrecord is up-to-date if
			 * there is still data in the socket buffer.
			 */
			so->so_rcv.sb_mb = nextrecord;
			if (so->so_rcv.sb_mb == NULL) {
				so->so_rcv.sb_mbtail = NULL;
				so->so_rcv.sb_lastrecord = NULL;
			} else if (nextrecord->m_nextpkt == NULL)
				so->so_rcv.sb_lastrecord = nextrecord;
		}
		SBLASTRECORDCHK(&so->so_rcv, "kttcp_soreceive 4");
		SBLASTMBUFCHK(&so->so_rcv, "kttcp_soreceive 4");
		if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) {
			(*pr->pr_usrreqs->pr_rcvd)(so, flags, l);
		}
	}
	if (orig_resid == resid && orig_resid &&
	    (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
		sbunlock(&so->so_rcv);
		goto restart;
	}

	if (flagsp)
		*flagsp |= flags;
 release:
	sbunlock(&so->so_rcv);
	sounlock(so);
	*done = slen - resid;
#if 0
	printf("soreceive: error %d slen %llu resid %lld\n", error, slen, resid);
#endif
	return (error);
}
示例#12
0
/*
 * Slightly changed version of sosend()
 */
static int
kttcp_sosend(struct socket *so, unsigned long long slen,
	     unsigned long long *done, struct lwp *l, int flags)
{
	struct mbuf **mp, *m, *top;
	long space, len, mlen;
	int error, dontroute, atomic;
	long long resid;

	atomic = sosendallatonce(so);
	resid = slen;
	top = NULL;
	/*
	 * In theory resid should be unsigned.
	 * However, space must be signed, as it might be less than 0
	 * if we over-committed, and we must use a signed comparison
	 * of space and resid.  On the other hand, a negative resid
	 * causes us to loop sending 0-length segments to the protocol.
	 */
	if (resid < 0) {
		error = EINVAL;
		goto out;
	}
	dontroute =
	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
	    (so->so_proto->pr_flags & PR_ATOMIC);
	l->l_ru.ru_msgsnd++;
#define	snderr(errno)	{ error = errno; goto release; }
	solock(so);
 restart:
	if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0)
		goto out;
	do {
		if (so->so_state & SS_CANTSENDMORE)
			snderr(EPIPE);
		if (so->so_error) {
			error = so->so_error;
			so->so_error = 0;
			goto release;
		}
		if ((so->so_state & SS_ISCONNECTED) == 0) {
			if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
				snderr(ENOTCONN);
			} else {
				snderr(EDESTADDRREQ);
			}
		}
		space = sbspace(&so->so_snd);
		if (flags & MSG_OOB)
			space += 1024;
		if ((atomic && resid > so->so_snd.sb_hiwat))
			snderr(EMSGSIZE);
		if (space < resid && (atomic || space < so->so_snd.sb_lowat)) {
			if (so->so_state & SS_NBIO)
				snderr(EWOULDBLOCK);
			SBLASTRECORDCHK(&so->so_rcv,
			    "kttcp_soreceive sbwait 1");
			SBLASTMBUFCHK(&so->so_rcv,
			    "kttcp_soreceive sbwait 1");
			sbunlock(&so->so_snd);
			error = sbwait(&so->so_snd);
			if (error)
				goto out;
			goto restart;
		}
		mp = &top;
		do {
			sounlock(so);
			do {
				if (top == 0) {
					m = m_gethdr(M_WAIT, MT_DATA);
					mlen = MHLEN;
					m->m_pkthdr.len = 0;
					m->m_pkthdr.rcvif = NULL;
				} else {
					m = m_get(M_WAIT, MT_DATA);
					mlen = MLEN;
				}
				if (resid >= MINCLSIZE && space >= MCLBYTES) {
					m_clget(m, M_WAIT);
					if ((m->m_flags & M_EXT) == 0)
						goto nopages;
					mlen = MCLBYTES;
#ifdef	MAPPED_MBUFS
					len = lmin(MCLBYTES, resid);
#else
					if (atomic && top == 0) {
						len = lmin(MCLBYTES - max_hdr,
						    resid);
						m->m_data += max_hdr;
					} else
						len = lmin(MCLBYTES, resid);
#endif
					space -= len;
				} else {
nopages:
					len = lmin(lmin(mlen, resid), space);
					space -= len;
					/*
					 * For datagram protocols, leave room
					 * for protocol headers in first mbuf.
					 */
					if (atomic && top == 0 && len < mlen)
						MH_ALIGN(m, len);
				}
				resid -= len;
				m->m_len = len;
				*mp = m;
				top->m_pkthdr.len += len;
				if (error)
					goto release;
				mp = &m->m_next;
				if (resid <= 0) {
					if (flags & MSG_EOR)
						top->m_flags |= M_EOR;
					break;
				}
			} while (space > 0 && atomic);
			solock(so);

			if (so->so_state & SS_CANTSENDMORE)
				snderr(EPIPE);
			if (dontroute)
				so->so_options |= SO_DONTROUTE;
			if (resid > 0)
				so->so_state |= SS_MORETOCOME;
			if (flags & MSG_OOB)
				error = (*so->so_proto->pr_usrreqs->pr_sendoob)(so,
				    top, NULL);
			else
				error = (*so->so_proto->pr_usrreqs->pr_send)(so,
				    top, NULL, NULL, l);
			if (dontroute)
				so->so_options &= ~SO_DONTROUTE;
			if (resid > 0)
				so->so_state &= ~SS_MORETOCOME;
			top = 0;
			mp = &top;
			if (error)
				goto release;
		} while (resid && space > 0);
	} while (resid);

 release:
	sbunlock(&so->so_snd);
 out:
 	sounlock(so);
	if (top)
		m_freem(top);
	*done = slen - resid;
#if 0
	printf("sosend: error %d slen %llu resid %lld\n", error, slen, resid);
#endif
	return (error);
}