int smb_rq_dmem(struct mbdata *mbp, const char *src, size_t size) { struct mbuf *m; char * dst; int cplen, error; if (size == 0) return 0; m = mbp->mb_cur; if ((error = m_getm(m, size, &m)) != 0) return error; while (size > 0) { cplen = M_TRAILINGSPACE(m); if (cplen == 0) { m = m->m_next; continue; } if (cplen > (int)size) cplen = size; dst = mtod(m, char *) + m->m_len; nls_mem_toext(dst, src, cplen); size -= cplen; src += cplen; m->m_len += cplen; mbp->mb_count += cplen; } mbp->mb_pos = mtod(m, char *) + m->m_len; mbp->mb_cur = m; return 0; }
/* * Create a "control" mbuf containing the specified data with the specified * type for presentation on a socket buffer. */ struct mbuf * sbcreatecontrol(caddr_t p, int size, int type, int level) { struct cmsghdr *cp; struct mbuf *m; if (CMSG_SPACE((u_int)size) > MCLBYTES) return ((struct mbuf *) NULL); if (CMSG_SPACE((u_int)size) > MLEN) m = m_getcl(M_NOWAIT, MT_CONTROL, 0); else m = m_get(M_NOWAIT, MT_CONTROL); if (m == NULL) return ((struct mbuf *) NULL); cp = mtod(m, struct cmsghdr *); m->m_len = 0; KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), ("sbcreatecontrol: short mbuf")); /* * Don't leave the padding between the msg header and the * cmsg data and the padding after the cmsg data un-initialized. */ bzero(cp, CMSG_SPACE((u_int)size)); if (p != NULL) (void)memcpy(CMSG_DATA(cp), p, size); m->m_len = CMSG_SPACE(size); cp->cmsg_len = CMSG_LEN(size); cp->cmsg_level = level; cp->cmsg_type = type; return (m); }
int mb_put_mem(struct mbdata *mbp, const char *source, size_t size) { struct mbuf *m; char * dst; size_t cplen; int error; if (size == 0) return 0; m = mbp->mb_cur; if ((error = m_getm(m, size, &m)) != 0) return error; while (size > 0) { cplen = M_TRAILINGSPACE(m); if (cplen == 0) { m = m->m_next; continue; } if (cplen > size) cplen = size; dst = mtod(m, char *) + m->m_len; if (source) { bcopy(source, dst, cplen); source += cplen; } else bzero(dst, cplen); size -= cplen; m->m_len += cplen; mbp->mb_count += cplen; } mbp->mb_pos = mtod(m, char *) + m->m_len; mbp->mb_cur = m; return 0; }
void mb_initm(struct mbchain *mbp, struct mbuf *m) { bzero(mbp, sizeof(*mbp)); mbp->mb_top = mbp->mb_cur = m; mbp->mb_mleft = M_TRAILINGSPACE(m); }
/* * Append the data in mbuf chain (m) into the socket buffer sb following mbuf * (n). If (n) is NULL, the buffer is presumed empty. * * When the data is compressed, mbufs in the chain may be handled in one of * three ways: * * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no * record boundary, and no change in data type). * * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into * an mbuf already in the socket buffer. This can occur if an * appropriate mbuf exists, there is room, and no merging of data types * will occur. * * (3) The mbuf may be appended to the end of the existing mbuf chain. * * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as * end-of-record. */ void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) { int eor = 0; struct mbuf *o; SOCKBUF_LOCK_ASSERT(sb); while (m) { eor |= m->m_flags & M_EOR; if (m->m_len == 0 && (eor == 0 || (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) { if (sb->sb_lastrecord == m) sb->sb_lastrecord = m->m_next; m = m_free(m); continue; } if (n && (n->m_flags & M_EOR) == 0 && M_WRITABLE(n) && ((sb->sb_flags & SB_NOCOALESCE) == 0) && m->m_len <= M_TRAILINGSPACE(n) && n->m_type == m->m_type) { if (n->m_flags & M_HOLE) { n->m_len += m->m_len; sb->sb_cc += m->m_len; m = m_free(m); continue; } else if (m->m_len <= MCLBYTES / 4) { /* XXX: Don't copy too much */ bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, (unsigned)m->m_len); n->m_len += m->m_len; sb->sb_cc += m->m_len; if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) /* XXX: Probably don't need.*/ sb->sb_ctl += m->m_len; m = m_free(m); continue; } } if (n) n->m_next = m; else sb->sb_mb = m; sb->sb_mbtail = m; sballoc(sb, m); n = m; m->m_flags &= ~M_EOR; m = m->m_next; n->m_next = 0; } if (eor) { KASSERT(n != NULL, ("sbcompress: eor && n == NULL")); n->m_flags |= eor; } SBLASTMBUFCHK(sb); }
/* * Create a "control" mbuf containing the specified data with the specified * type for presentation on a socket buffer. */ struct mbuf * sbcreatecontrol(caddr_t p, int size, int type, int level) { struct cmsghdr *cp; struct mbuf *m; if (CMSG_SPACE((u_int)size) > MCLBYTES) return ((struct mbuf *) NULL); if (CMSG_SPACE((u_int)size) > MLEN) m = m_getcl(M_DONTWAIT, MT_CONTROL, 0); else m = m_get(M_DONTWAIT, MT_CONTROL); if (m == NULL) return ((struct mbuf *) NULL); cp = mtod(m, struct cmsghdr *); m->m_len = 0; KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), ("sbcreatecontrol: short mbuf")); if (p != NULL) (void)memcpy(CMSG_DATA(cp), p, size); m->m_len = CMSG_SPACE(size); cp->cmsg_len = CMSG_LEN(size); cp->cmsg_level = level; cp->cmsg_type = type; return (m); }
/* * Defragment an mbuf chain, returning at most maxfrags separate * mbufs+clusters. If this is not possible NULL is returned and * the original mbuf chain is left in it's present (potentially * modified) state. We use two techniques: collapsing consecutive * mbufs and replacing consecutive mbufs by a cluster. * * NB: this should really be named m_defrag but that name is taken */ struct mbuf * m_collapse(struct mbuf *m0, int how, int maxfrags) { struct mbuf *m, *n, *n2, **prev; u_int curfrags; /* * Calculate the current number of frags. */ curfrags = 0; for (m = m0; m != NULL; m = m->m_next) curfrags++; /* * First, try to collapse mbufs. Note that we always collapse * towards the front so we don't need to deal with moving the * pkthdr. This may be suboptimal if the first mbuf has much * less data than the following. */ m = m0; again: for (;;) { n = m->m_next; if (n == NULL) break; if (M_WRITABLE(m) && n->m_len < M_TRAILINGSPACE(m)) { bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, n->m_len); m->m_len += n->m_len; m->m_next = n->m_next; m_free(n); if (--curfrags <= maxfrags) return m0; } else
m_megapullup(PNATState pData, struct mbuf *m, int len) #endif { struct mbuf *mcl; if (len > m->m_pkthdr.len) goto bad; /* Do not reallocate packet if it is sequentional, * writable and has some extra space for expansion. * XXX: Constant 100bytes is completely empirical. */ #define RESERVE 100 if (m->m_next == NULL && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= RESERVE) return (m); if (len <= MCLBYTES - RESERVE) { #ifndef VBOX mcl = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); #else mcl = m_getcl(pData, M_DONTWAIT, MT_DATA, M_PKTHDR); #endif } else if (len < MJUM16BYTES) { int size; if (len <= MJUMPAGESIZE - RESERVE) { size = MJUMPAGESIZE; } else if (len <= MJUM9BYTES - RESERVE) { size = MJUM9BYTES; } else { size = MJUM16BYTES; }; #ifndef VBOX mcl = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size); #else mcl = m_getjcl(pData, M_DONTWAIT, MT_DATA, M_PKTHDR, size); #endif } else { goto bad; } if (mcl == NULL) goto bad; m_move_pkthdr(mcl, m); m_copydata(m, 0, len, mtod(mcl, caddr_t)); mcl->m_len = mcl->m_pkthdr.len = len; #ifndef VBOX m_freem(m); #else m_freem(pData, m); #endif return (mcl); bad: #ifndef VBOX m_freem(m); #else m_freem(pData, m); #endif return (NULL); }
int mb_put_mem(struct mbchain *mbp, c_caddr_t source, int size, int type) { struct mbuf *m; caddr_t dst; c_caddr_t src; int cplen, error, mleft, count; size_t srclen, dstlen; m = mbp->mb_cur; mleft = mbp->mb_mleft; while (size > 0) { if (mleft == 0) { if (m->m_next == NULL) m = m_getm(m, size, M_WAIT, MT_DATA); else m = m->m_next; mleft = M_TRAILINGSPACE(m); continue; } cplen = mleft > size ? size : mleft; srclen = dstlen = cplen; dst = mtod(m, caddr_t) + m->m_len; switch (type) { case MB_MCUSTOM: srclen = size; dstlen = mleft; error = mbp->mb_copy(mbp, source, dst, &srclen, &dstlen); if (error) return error; break; case MB_MINLINE: for (src = source, count = cplen; count; count--) *dst++ = *src++; break; case MB_MSYSTEM: bcopy(source, dst, cplen); break; case MB_MUSER: error = copyin(source, dst, cplen); if (error) return error; break; case MB_MZERO: bzero(dst, cplen); break; } size -= srclen; source += srclen; m->m_len += dstlen; mleft -= dstlen; mbp->mb_count += dstlen; } mbp->mb_cur = m; mbp->mb_mleft = mleft; return 0; }
int mb_put_mem(struct mbchain *mbp, const char *source, size_t size, int type) { struct mbuf *m; char *dst; const char *src; int error; size_t cplen, mleft, count; m = mbp->mb_cur; mleft = mbp->mb_mleft; while (size > 0) { if (mleft == 0) { if (m->m_next == NULL) { m = m_getm(m, size, M_WAIT, MT_DATA); if (m == NULL) return ENOBUFS; } m = m->m_next; mleft = M_TRAILINGSPACE(m); continue; } cplen = mleft > size ? size : mleft; dst = mtod(m, char *) + m->m_len; switch (type) { case MB_MCUSTOM: error = mbp->mb_copy(mbp, source, dst, cplen); if (error) return error; break; case MB_MINLINE: for (src = source, count = cplen; count; count--) *dst++ = *src++; break; case MB_MSYSTEM: memcpy(dst, source, cplen); break; case MB_MUSER: error = copyin(source, dst, cplen); if (error) return error; break; case MB_MZERO: memset(dst, 0, cplen); break; } size -= cplen; source += cplen; m->m_len += cplen; mleft -= cplen; mbp->mb_count += cplen; } mbp->mb_cur = m; mbp->mb_mleft = mleft; return 0; }
int mb_put_mbuf(struct mbchain *mbp, struct mbuf *m) { mbp->mb_cur->m_next = m; while (m) { mbp->mb_count += m->m_len; if (m->m_next == NULL) break; m = m->m_next; } mbp->mb_mleft = M_TRAILINGSPACE(m); mbp->mb_cur = m; return 0; }
/* * Compress mbuf chain m into the socket * buffer sb following mbuf n. If n * is null, the buffer is presumed empty. */ void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) { int eor = 0; struct mbuf *o; while (m) { eor |= m->m_flags & M_EOR; if (m->m_len == 0 && (eor == 0 || (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) { if (sb->sb_lastrecord == m) sb->sb_lastrecord = m->m_next; m = m_free(m); continue; } if (n && (n->m_flags & M_EOR) == 0 && /* M_TRAILINGSPACE() checks buffer writeability */ m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */ m->m_len <= M_TRAILINGSPACE(n) && n->m_type == m->m_type) { memcpy(mtod(n, caddr_t) + n->m_len, mtod(m, caddr_t), m->m_len); n->m_len += m->m_len; sb->sb_cc += m->m_len; if (m->m_type != MT_CONTROL && m->m_type != MT_SONAME) sb->sb_datacc += m->m_len; m = m_free(m); continue; } if (n) n->m_next = m; else sb->sb_mb = m; sb->sb_mbtail = m; sballoc(sb, m); n = m; m->m_flags &= ~M_EOR; m = m->m_next; n->m_next = NULL; } if (eor) { if (n) n->m_flags |= eor; else printf("semi-panic: sbcompress"); } SBLASTMBUFCHK(sb, __func__); }
/* * Line specific (tty) write routine. */ int pppwrite(struct rtems_termios_tty *tty, rtems_libio_rw_args_t *rw_args) { struct sockaddr dst; int n; int len; int maximum = rw_args->count; char *out_buffer = rw_args->buffer; register struct ppp_softc *sc = (struct ppp_softc *)tty->t_sc; struct mbuf *m; struct mbuf *m0; struct mbuf **mp; rtems_bsdnet_semaphore_obtain(); for (mp = &m0; maximum; mp = &m->m_next) { MGET(m, M_WAIT, MT_DATA); if ((*mp = m) == NULL) { m_freem(m0); return (ENOBUFS); } m->m_len = 0; if (maximum >= MCLBYTES / 2) { MCLGET(m, M_DONTWAIT); } len = M_TRAILINGSPACE(m); if (len > maximum) { memcpy(mtod(m, u_char *),out_buffer,maximum); m->m_len = maximum; maximum = 0; } else { memcpy(mtod(m, u_char *),out_buffer,len); m->m_len = len; maximum -= len; out_buffer += len; } } dst.sa_family = AF_UNSPEC; bcopy(mtod(m0, u_char *), dst.sa_data, PPP_HDRLEN); m0->m_data += PPP_HDRLEN; m0->m_len -= PPP_HDRLEN; n = pppoutput(&sc->sc_if, m0, &dst, (struct rtentry *)0); rtems_bsdnet_semaphore_release(); return ( n ); }
/* * Copy data from a buffer back into the indicated mbuf chain, * starting "off" bytes from the beginning, extending the mbuf * chain if necessary. */ void m_copyback(struct mbuf *m0, int off, int len, c_caddr_t cp) { int mlen; struct mbuf *m = m0, *n; int totlen = 0; if (m0 == NULL) return; while (off > (mlen = m->m_len)) { off -= mlen; totlen += mlen; if (m->m_next == NULL) { n = m_get(M_NOWAIT, m->m_type); if (n == NULL) goto out; bzero(mtod(n, caddr_t), MLEN); n->m_len = min(MLEN, len + off); m->m_next = n; } m = m->m_next; } while (len > 0) { if (m->m_next == NULL && (len > m->m_len - off)) { m->m_len += min(len - (m->m_len - off), M_TRAILINGSPACE(m)); } mlen = min (m->m_len - off, len); bcopy(cp, off + mtod(m, caddr_t), (u_int)mlen); cp += mlen; len -= mlen; mlen += off; off = 0; totlen += mlen; if (len == 0) break; if (m->m_next == NULL) { n = m_get(M_NOWAIT, m->m_type); if (n == NULL) break; n->m_len = min(MLEN, len); m->m_next = n; } m = m->m_next; } out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) m->m_pkthdr.len = totlen; }
/* * Concatenate mbuf chain n to m. * n might be copied into m (when n->m_len is small), therefore data portion of * n could be copied into an mbuf of different mbuf type. * Therefore both chains should be of the same type (e.g. MT_DATA). * Any m_pkthdr is not updated. */ void m_cat(struct mbuf *m, struct mbuf *n) { while (m->m_next) m = m->m_next; while (n) { if (M_READONLY(m) || n->m_len > M_TRAILINGSPACE(m)) { /* just join the two chains */ m->m_next = n; return; } /* splat the data from one into the other */ memcpy(mtod(m, caddr_t) + m->m_len, mtod(n, caddr_t), n->m_len); m->m_len += n->m_len; n = m_free(n); } }
struct vte_txdesc * vte_encap(struct vte_softc *sc, struct mbuf **m_head) { struct vte_txdesc *txd; struct mbuf *m, *n; int copy, error, padlen; txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; m = *m_head; /* * Controller doesn't auto-pad, so we have to make sure pad * short frames out to the minimum frame length. */ if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; else padlen = 0; /* * Controller does not support multi-fragmented TX buffers. * Controller spends most of its TX processing time in * de-fragmenting TX buffers. Either faster CPU or more * advanced controller DMA engine is required to speed up * TX path processing. * To mitigate the de-fragmenting issue, perform deep copy * from fragmented mbuf chains to a pre-allocated mbuf * cluster with extra cost of kernel memory. For frames * that is composed of single TX buffer, the deep copy is * bypassed. */ copy = 0; if (m->m_next != NULL) copy++; if (padlen > 0 && (padlen > M_TRAILINGSPACE(m))) copy++; if (copy != 0) { /* Avoid expensive m_defrag(9) and do deep copy. */ n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); n->m_pkthdr.len = m->m_pkthdr.len; n->m_len = m->m_pkthdr.len; m = n; txd->tx_flags |= VTE_TXMBUF; }
/* * Compress mbuf chain m into the socket * buffer sb following mbuf n. If n * is null, the buffer is presumed empty. */ void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) { int eor; struct mbuf *o; KASSERT(solocked(sb->sb_so)); eor = 0; while (m) { eor |= m->m_flags & M_EOR; if (m->m_len == 0 && (eor == 0 || (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) { if (sb->sb_lastrecord == m) sb->sb_lastrecord = m->m_next; m = m_free(m); continue; } if (n && (n->m_flags & M_EOR) == 0 && /* M_TRAILINGSPACE() checks buffer writeability */ m->m_len <= MCLBYTES / 4 && /* XXX Don't copy too much */ m->m_len <= M_TRAILINGSPACE(n) && n->m_type == m->m_type) { memcpy(mtod(n, char *) + n->m_len, mtod(m, void *), (unsigned)m->m_len); n->m_len += m->m_len; sb->sb_cc += m->m_len; m = m_free(m); continue; } if (n) n->m_next = m; else sb->sb_mb = m; sb->sb_mbtail = m; sballoc(sb, m); n = m; m->m_flags &= ~M_EOR; m = m->m_next; n->m_next = 0; }
/* * Concatenate mbuf chain n to m. * Both chains must be of the same type (e.g. MT_DATA). * Any m_pkthdr is not updated. */ void m_cat(struct mbuf *m, struct mbuf *n) { while (m->m_next) m = m->m_next; while (n) { if (!M_WRITABLE(m) || M_TRAILINGSPACE(m) < n->m_len) { /* just join the two chains */ m->m_next = n; return; } /* splat the data from one into the other */ bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, (u_int)n->m_len); m->m_len += n->m_len; n = m_free(n); } }
/* int mb_fixhdr(struct mbdata *mbp) { struct mbuf *m = mbp->mb_top; int len = 0; while (m) { len += m->m_len; m = m->m_next; } mbp->mb_top->m_pkthdr.len = len; return len; } */ int m_getm(struct mbuf *top, size_t len, struct mbuf **mpp) { struct mbuf *m, *mp; int error; for (mp = top; ; mp = mp->m_next) { len -= M_TRAILINGSPACE(mp); if (mp->m_next == NULL) break; } if (len > 0) { if ((error = m_get(len, &m)) != 0) return error; mp->m_next = m; } *mpp = top; return 0; }
void * nfsm_build_xx(int s, struct mbuf **mb, caddr_t *bpos) { struct mbuf *mb2; void *ret; if (s > M_TRAILINGSPACE(*mb)) { mb2 = m_get(M_WAITOK, MT_DATA); if (s > MLEN) panic("build > MLEN"); (*mb)->m_next = mb2; *mb = mb2; (*mb)->m_len = 0; *bpos = mtod(*mb, caddr_t); } ret = *bpos; (*mb)->m_len += s; *bpos += s; return (ret); }
/* * Check if object of size 'size' fit to the current position and * allocate new mbuf if not. Advance pointers and increase length of mbuf(s). * Return pointer to the object placeholder or NULL if any error occured. */ int mb_fit(struct mbdata *mbp, size_t size, void **pp) { struct mbuf *m, *mn; int error; m = mbp->mb_cur; if (M_TRAILINGSPACE(m) < (int)size) { if ((error = m_get(size, &mn)) != 0) return error; mbp->mb_pos = mtod(mn, char *); mbp->mb_cur = m->m_next = mn; m = mn; } m->m_len += size; *pp = mbp->mb_pos; mbp->mb_pos += size; mbp->mb_count += size; return 0; }
/* * Check if object of size 'size' fit to the current position and * allocate new mbuf if not. Advance pointers and increase length of mbuf(s). * Return pointer to the object placeholder or NULL if any error occured. * Note: size should be <= MLEN */ caddr_t mb_reserve(struct mbchain *mbp, int size) { struct mbuf *m, *mn; caddr_t bpos; if (size > MLEN) panic("mb_reserve: size = %d\n", size); m = mbp->mb_cur; if (mbp->mb_mleft < size) { mn = m_get(M_WAIT, MT_DATA); mbp->mb_cur = m->m_next = mn; m = mn; m->m_len = 0; mbp->mb_mleft = M_TRAILINGSPACE(m); } mbp->mb_mleft -= size; mbp->mb_count += size; bpos = mtod(m, caddr_t) + m->m_len; m->m_len += size; return bpos; }
/* * Append the specified data to the indicated mbuf chain, * Extend the mbuf chain if the new data does not fit in * existing space. * * Return 1 if able to complete the job; otherwise 0. */ int m_append(struct mbuf *m0, int len, c_caddr_t cp) { struct mbuf *m, *n; int remainder, space; for (m = m0; m->m_next != NULL; m = m->m_next) ; remainder = len; space = M_TRAILINGSPACE(m); if (space > 0) { /* * Copy into available space. */ if (space > remainder) space = remainder; bcopy(cp, mtod(m, caddr_t) + m->m_len, space); m->m_len += space; cp += space, remainder -= space; } while (remainder > 0) { /* * Allocate a new mbuf; could check space * and allocate a cluster instead. */ n = m_get(M_NOWAIT, m->m_type); if (n == NULL) break; n->m_len = min(MLEN, remainder); bcopy(cp, mtod(n, caddr_t), n->m_len); cp += n->m_len, remainder -= n->m_len; m->m_next = n; m = n; } if (m0->m_flags & M_PKTHDR) m0->m_pkthdr.len += len - remainder; return (remainder == 0); }
/* * Create a writable copy of the mbuf chain. While doing this * we compact the chain with a goal of producing a chain with * at most two mbufs. The second mbuf in this chain is likely * to be a cluster. The primary purpose of this work is to create * a writable packet for encryption, compression, etc. The * secondary goal is to linearize the data so the data can be * passed to crypto hardware in the most efficient manner possible. */ struct mbuf * m_clone(struct mbuf *m0) { struct mbuf *m, *mprev; struct mbuf *n, *mfirst, *mlast; int len, off; IPSEC_ASSERT(m0 != NULL, ("m_clone: null mbuf")); mprev = NULL; for (m = m0; m != NULL; m = mprev->m_next) { /* * Regular mbufs are ignored unless there's a cluster * in front of it that we can use to coalesce. We do * the latter mainly so later clusters can be coalesced * also w/o having to handle them specially (i.e. convert * mbuf+cluster -> cluster). This optimization is heavily * influenced by the assumption that we're running over * Ethernet where MCLBYTES is large enough that the max * packet size will permit lots of coalescing into a * single cluster. This in turn permits efficient * crypto operations, especially when using hardware. */ if ((m->m_flags & M_EXT) == 0) { if (mprev && (mprev->m_flags & M_EXT) && m->m_len <= M_TRAILINGSPACE(mprev)) { /* XXX: this ignores mbuf types */ memcpy(mtod(mprev, char *) + mprev->m_len, mtod(m, char *), m->m_len); mprev->m_len += m->m_len; mprev->m_next = m->m_next; /* unlink from chain */ m_free(m); /* reclaim mbuf */ IPSEC_STATINC(IPSEC_STAT_MBCOALESCED); } else { mprev = m; } continue; }
DECLINLINE(int) tftpAddOptionToOACK(PNATState pData, struct mbuf *pMBuf, const char *pszOptName, uint64_t u64OptValue) { char aszOptionBuffer[256]; size_t iOptLength = 0; int rc = VINF_SUCCESS; int cbMBufCurrent = pMBuf->m_len; LogFlowFunc(("pMBuf:%p, pszOptName:%s, u16OptValue:%ld\n", pMBuf, pszOptName, u64OptValue)); AssertPtrReturn(pMBuf, VERR_INVALID_PARAMETER); AssertPtrReturn(pszOptName, VERR_INVALID_PARAMETER); RT_ZERO(aszOptionBuffer); iOptLength += RTStrPrintf(aszOptionBuffer, 256 , "%s", pszOptName) + 1; iOptLength += RTStrPrintf(aszOptionBuffer + iOptLength, 256 - iOptLength , "%llu", u64OptValue) + 1; if (iOptLength > M_TRAILINGSPACE(pMBuf)) rc = VERR_BUFFER_OVERFLOW; /* buffer too small */ else { pMBuf->m_len += iOptLength; m_copyback(pData, pMBuf, cbMBufCurrent, iOptLength, aszOptionBuffer); } LogFlowFuncLeaveRC(rc); return rc; }
/* * Check if object of size 'size' fit to the current position and * allocate new mbuf if not. Advance pointers and increase length of mbuf(s). * Return pointer to the object placeholder or NULL if any error occurred. * Note: size should be <= MLEN */ void * mb_reserve(struct mbchain *mbp, size_t size) { struct mbuf *m, *mn; void *bpos; if (size > MLEN) panic("mb_reserve: size = %zu", size); m = mbp->mb_cur; if (mbp->mb_mleft < size) { mn = m_get(M_WAIT, MT_DATA); if (mn == NULL) return NULL; mbp->mb_cur = m->m_next = mn; m = mn; m->m_len = 0; mbp->mb_mleft = M_TRAILINGSPACE(m); } mbp->mb_mleft -= size; mbp->mb_count += size; bpos = mtod(m, char *) + m->m_len; m->m_len += size; return bpos; }
/* * admsw_start: [ifnet interface function] * * Start packet transmission on the interface. */ static void admsw_start(struct ifnet *ifp) { struct admsw_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct admsw_descsoft *ds; struct admsw_desc *desc; bus_dmamap_t dmamap; struct ether_header *eh; int error, nexttx, len, i; static int vlan = 0; /* * Loop through the send queues, setting up transmit descriptors * unitl we drain the queues, or use up all available transmit * descriptors. */ for (;;) { vlan++; if (vlan == SW_DEVS) vlan = 0; i = vlan; for (;;) { ifp = sc->sc_ifnet[i]; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) == IFF_DRV_RUNNING) { /* Grab a packet off the queue. */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 != NULL) break; } i++; if (i == SW_DEVS) i = 0; if (i == vlan) return; } vlan = i; m = NULL; /* Get a spare descriptor. */ if (sc->sc_txfree == 0) { /* No more slots left; notify upper layer. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } nexttx = sc->sc_txnext; desc = &sc->sc_txldescs[nexttx]; ds = &sc->sc_txlsoft[nexttx]; dmamap = ds->ds_dmamap; /* * Load the DMA map. If this fails, the packet either * didn't fit in the alloted number of segments, or we * were short on resources. In this case, we'll copy * and try again. */ if (m0->m_pkthdr.len < ETHER_MIN_LEN || bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { device_printf(sc->sc_dev, "unable to allocate Tx mbuf\n"); break; } if (m0->m_pkthdr.len > MHLEN) { MCLGET(m, M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { device_printf(sc->sc_dev, "unable to allocate Tx cluster\n"); m_freem(m); break; } } m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; if (m->m_pkthdr.len < ETHER_MIN_LEN) { if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) panic("admsw_start: M_TRAILINGSPACE\n"); memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; } error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "unable to load Tx buffer, error = %d\n", error); break; } } if (m != NULL) { m_freem(m0); m0 = m; } /* * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE); if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2) panic("admsw_start: nsegs == %d\n", ds->ds_nsegs); desc->data = ds->ds_addr[0]; desc->len = len = ds->ds_len[0]; if (ds->ds_nsegs > 1) { len += ds->ds_len[1]; desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE; } else desc->cntl = 0; desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); eh = mtod(m0, struct ether_header *); if (ntohs(eh->ether_type) == ETHERTYPE_IP && m0->m_pkthdr.csum_flags & CSUM_IP) desc->status |= ADM5120_DMA_CSUM; if (nexttx == ADMSW_NTXLDESC - 1) desc->data |= ADM5120_DMA_RINGEND; desc->data |= ADM5120_DMA_OWN; /* Sync the descriptor. */ ADMSW_CDTXLSYNC(sc, nexttx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); REG_WRITE(SEND_TRIG_REG, 1); /* printf("send slot %d\n",nexttx); */ /* * Store a pointer to the packet so we can free it later. */ ds->ds_mbuf = m0; /* Advance the Tx pointer. */ sc->sc_txfree--; sc->sc_txnext = ADMSW_NEXTTXL(nexttx); /* Pass the packet to any BPF listeners. */ BPF_MTAP(ifp, m0); /* Set a watchdog timer in case the chip flakes out. */ sc->sc_timer = 5; }
/* * receive incoming data from device, store in mbuf chain and * pass on complete packets to bt device */ static void bt3c_receive(struct bt3c_softc *sc) { struct mbuf *m = sc->sc_rxp; int space = 0; uint16_t count; uint8_t b; /* * If we already started a packet, find the * trailing end of it. */ if (m) { while (m->m_next) m = m->m_next; space = M_TRAILINGSPACE(m); } count = bt3c_read(sc, BT3C_RX_COUNT); bt3c_set_address(sc, BT3C_RX_FIFO); while (count > 0) { if (space == 0) { if (m == NULL) { /* new packet */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { aprint_error_dev(sc->sc_dev, "out of memory\n"); sc->sc_stats.err_rx++; goto out; /* (lost sync) */ } sc->sc_rxp = m; m->m_pkthdr.len = m->m_len = 0; space = MHLEN; sc->sc_state = BT3C_RECV_PKT_TYPE; sc->sc_want = 1; } else { /* extend mbuf */ MGET(m->m_next, M_DONTWAIT, MT_DATA); if (m->m_next == NULL) { aprint_error_dev(sc->sc_dev, "out of memory\n"); sc->sc_stats.err_rx++; goto out; /* (lost sync) */ } m = m->m_next; m->m_len = 0; space = MLEN; if (sc->sc_want > MINCLSIZE) { MCLGET(m, M_DONTWAIT); if (m->m_flags & M_EXT) space = MCLBYTES; } } } b = bt3c_get(sc); mtod(m, uint8_t *)[m->m_len++] = b; count--; space--; sc->sc_rxp->m_pkthdr.len++; sc->sc_stats.byte_rx++; sc->sc_want--; if (sc->sc_want > 0) continue; /* want more */ switch (sc->sc_state) { case BT3C_RECV_PKT_TYPE: /* Got packet type */ switch (b) { case HCI_ACL_DATA_PKT: sc->sc_state = BT3C_RECV_ACL_HDR; sc->sc_want = sizeof(hci_acldata_hdr_t) - 1; break; case HCI_SCO_DATA_PKT: sc->sc_state = BT3C_RECV_SCO_HDR; sc->sc_want = sizeof(hci_scodata_hdr_t) - 1; break; case HCI_EVENT_PKT: sc->sc_state = BT3C_RECV_EVENT_HDR; sc->sc_want = sizeof(hci_event_hdr_t) - 1; break; default: aprint_error_dev(sc->sc_dev, "Unknown packet type=%#x!\n", b); sc->sc_stats.err_rx++; m_freem(sc->sc_rxp); sc->sc_rxp = NULL; goto out; /* (lost sync) */ } break; /* * we assume (correctly of course :) that the packet headers * all fit into a single pkthdr mbuf */ case BT3C_RECV_ACL_HDR: /* Got ACL Header */ sc->sc_state = BT3C_RECV_ACL_DATA; sc->sc_want = mtod(m, hci_acldata_hdr_t *)->length; sc->sc_want = le16toh(sc->sc_want); break; case BT3C_RECV_SCO_HDR: /* Got SCO Header */ sc->sc_state = BT3C_RECV_SCO_DATA; sc->sc_want = mtod(m, hci_scodata_hdr_t *)->length; break; case BT3C_RECV_EVENT_HDR: /* Got Event Header */ sc->sc_state = BT3C_RECV_EVENT_DATA; sc->sc_want = mtod(m, hci_event_hdr_t *)->length; break; case BT3C_RECV_ACL_DATA: /* ACL Packet Complete */ if (!hci_input_acl(sc->sc_unit, sc->sc_rxp)) sc->sc_stats.err_rx++; sc->sc_stats.acl_rx++; sc->sc_rxp = m = NULL; space = 0; break; case BT3C_RECV_SCO_DATA: /* SCO Packet Complete */ if (!hci_input_sco(sc->sc_unit, sc->sc_rxp)) sc->sc_stats.err_rx++; sc->sc_stats.sco_rx++; sc->sc_rxp = m = NULL; space = 0; break; case BT3C_RECV_EVENT_DATA: /* Event Packet Complete */ if (!hci_input_event(sc->sc_unit, sc->sc_rxp)) sc->sc_stats.err_rx++; sc->sc_stats.evt_rx++; sc->sc_rxp = m = NULL; space = 0; break; default: panic("%s: invalid state %d!\n", device_xname(sc->sc_dev), sc->sc_state); } } out: bt3c_write(sc, BT3C_RX_COUNT, 0x0000); }
struct mbuf * m_copym0(struct mbuf *m0, int off, int len, int wait, int deep) { struct mbuf *m, *n, **np; struct mbuf *top; int copyhdr = 0; if (off < 0 || len < 0) panic("m_copym0: off %d, len %d", off, len); if (off == 0 && m0->m_flags & M_PKTHDR) copyhdr = 1; if ((m = m_getptr(m0, off, &off)) == NULL) panic("m_copym0: short mbuf chain"); np = ⊤ top = NULL; while (len > 0) { if (m == NULL) { if (len != M_COPYALL) panic("m_copym0: m == NULL and not COPYALL"); break; } MGET(n, wait, m->m_type); *np = n; if (n == NULL) goto nospace; if (copyhdr) { if (m_dup_pkthdr(n, m0, wait)) goto nospace; if (len != M_COPYALL) n->m_pkthdr.len = len; copyhdr = 0; } n->m_len = min(len, m->m_len - off); if (m->m_flags & M_EXT) { if (!deep) { n->m_data = m->m_data + off; n->m_ext = m->m_ext; MCLADDREFERENCE(m, n); } else { /* * we are unsure about the way m was allocated. * copy into multiple MCLBYTES cluster mbufs. */ MCLGET(n, wait); n->m_len = 0; n->m_len = M_TRAILINGSPACE(n); n->m_len = min(n->m_len, len); n->m_len = min(n->m_len, m->m_len - off); memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, n->m_len); } } else memcpy(mtod(n, caddr_t), mtod(m, caddr_t) + off, n->m_len); if (len != M_COPYALL) len -= n->m_len; off += n->m_len; #ifdef DIAGNOSTIC if (off > m->m_len) panic("m_copym0 overrun"); #endif if (off == m->m_len) { m = m->m_next; off = 0; } np = &n->m_next; } return (top); nospace: m_freem(top); return (NULL); }
/* * Copy data from a buffer back into the indicated mbuf chain, * starting "off" bytes from the beginning, extending the mbuf * chain if necessary. The mbuf needs to be properly initialized * including the setting of m_len. */ int m_copyback(struct mbuf *m0, int off, int len, const void *_cp, int wait) { int mlen, totlen = 0; struct mbuf *m = m0, *n; caddr_t cp = (caddr_t)_cp; int error = 0; if (m0 == NULL) return (0); while (off > (mlen = m->m_len)) { off -= mlen; totlen += mlen; if (m->m_next == NULL) { if ((n = m_get(wait, m->m_type)) == NULL) { error = ENOBUFS; goto out; } if (off + len > MLEN) { MCLGETI(n, wait, NULL, off + len); if (!(n->m_flags & M_EXT)) { m_free(n); error = ENOBUFS; goto out; } } memset(mtod(n, caddr_t), 0, off); n->m_len = len + off; m->m_next = n; } m = m->m_next; } while (len > 0) { /* extend last packet to be filled fully */ if (m->m_next == NULL && (len > m->m_len - off)) m->m_len += min(len - (m->m_len - off), M_TRAILINGSPACE(m)); mlen = min(m->m_len - off, len); memmove(mtod(m, caddr_t) + off, cp, mlen); cp += mlen; len -= mlen; totlen += mlen + off; if (len == 0) break; off = 0; if (m->m_next == NULL) { if ((n = m_get(wait, m->m_type)) == NULL) { error = ENOBUFS; goto out; } if (len > MLEN) { MCLGETI(n, wait, NULL, len); if (!(n->m_flags & M_EXT)) { m_free(n); error = ENOBUFS; goto out; } } n->m_len = len; m->m_next = n; } m = m->m_next; } out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) m->m_pkthdr.len = totlen; return (error); }