/* * Routine to copy from device local memory into mbufs. * Note that `off' argument is offset into first mbuf of target chain from * which to begin copying the data to. */ struct mbuf * m_devget(char *buf, int totlen, int off, struct ifnet *ifp, void (*copy)(char *from, caddr_t to, u_int len)) { struct mbuf *m; struct mbuf *top = NULL, **mp = ⊤ int len; if (off < 0 || off > MHLEN) return (NULL); while (totlen > 0) { if (top == NULL) { /* First one, must be PKTHDR */ if (totlen + off >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); len = MCLBYTES; } else { m = m_gethdr(M_NOWAIT, MT_DATA); len = MHLEN; /* Place initial small packet/header at end of mbuf */ if (m && totlen + off + max_linkhdr <= MLEN) { m->m_data += max_linkhdr; len -= max_linkhdr; } } if (m == NULL) return NULL; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = totlen; } else { if (totlen + off >= MINCLSIZE) { m = m_getcl(M_NOWAIT, MT_DATA, 0); len = MCLBYTES; } else { m = m_get(M_NOWAIT, MT_DATA); len = MLEN; } if (m == NULL) { m_freem(top); return NULL; } } if (off) { m->m_data += off; len -= off; off = 0; } m->m_len = len = min(totlen, len); if (copy) copy(buf, mtod(m, caddr_t), (u_int)len); else bcopy(buf, mtod(m, caddr_t), (u_int)len); buf += len; *mp = m; mp = &m->m_next; totlen -= len; } return (top); }
m_megapullup(PNATState pData, struct mbuf *m, int len) #endif { struct mbuf *mcl; if (len > m->m_pkthdr.len) goto bad; /* Do not reallocate packet if it is sequentional, * writable and has some extra space for expansion. * XXX: Constant 100bytes is completely empirical. */ #define RESERVE 100 if (m->m_next == NULL && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= RESERVE) return (m); if (len <= MCLBYTES - RESERVE) { #ifndef VBOX mcl = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); #else mcl = m_getcl(pData, M_DONTWAIT, MT_DATA, M_PKTHDR); #endif } else if (len < MJUM16BYTES) { int size; if (len <= MJUMPAGESIZE - RESERVE) { size = MJUMPAGESIZE; } else if (len <= MJUM9BYTES - RESERVE) { size = MJUM9BYTES; } else { size = MJUM16BYTES; }; #ifndef VBOX mcl = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size); #else mcl = m_getjcl(pData, M_DONTWAIT, MT_DATA, M_PKTHDR, size); #endif } else { goto bad; } if (mcl == NULL) goto bad; m_move_pkthdr(mcl, m); m_copydata(m, 0, len, mtod(mcl, caddr_t)); mcl->m_len = mcl->m_pkthdr.len = len; #ifndef VBOX m_freem(m); #else m_freem(pData, m); #endif return (mcl); bad: #ifndef VBOX m_freem(m); #else m_freem(pData, m); #endif return (NULL); }
/* * Create a "control" mbuf containing the specified data with the specified * type for presentation on a socket buffer. */ struct mbuf * sbcreatecontrol(caddr_t p, int size, int type, int level) { struct cmsghdr *cp; struct mbuf *m; if (CMSG_SPACE((u_int)size) > MCLBYTES) return ((struct mbuf *) NULL); if (CMSG_SPACE((u_int)size) > MLEN) m = m_getcl(M_DONTWAIT, MT_CONTROL, 0); else m = m_get(M_DONTWAIT, MT_CONTROL); if (m == NULL) return ((struct mbuf *) NULL); cp = mtod(m, struct cmsghdr *); m->m_len = 0; KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), ("sbcreatecontrol: short mbuf")); if (p != NULL) (void)memcpy(CMSG_DATA(cp), p, size); m->m_len = CMSG_SPACE(size); cp->cmsg_len = CMSG_LEN(size); cp->cmsg_level = level; cp->cmsg_type = type; return (m); }
/* * m_getjcl() returns an mbuf with a cluster of the specified size attached. * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. */ struct mbuf * m_getjcl(int how, short type, int flags, int size) { struct mb_args args; struct mbuf *m, *n; uma_zone_t zone; if (size == MCLBYTES) return m_getcl(how, type, flags); args.flags = flags; args.type = type; m = uma_zalloc_arg(zone_mbuf, &args, how); if (m == NULL) return (NULL); zone = m_getzone(size); n = uma_zalloc_arg(zone, m, how); if (n == NULL) { uma_zfree(zone_mbuf, m); return (NULL); } return (m); }
/* * Convert to mbufs from vbox scatter-gather data structure */ static struct mbuf * vboxNetFltFreeBSDSGMBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG) { struct mbuf *m; int error; unsigned int i; if (pSG->cbTotal == 0) return (NULL); m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); if (m == NULL) return (NULL); m->m_pkthdr.len = m->m_len = 0; m->m_pkthdr.rcvif = NULL; for (i = 0; i < pSG->cSegsUsed; i++) { error = m_append(m, pSG->aSegs[i].cb, pSG->aSegs[i].pv); if (error == 0) { m_freem(m); return (NULL); } } return (m); }
/* * Allocate and setup a management frame of the specified * size. We return the mbuf and a pointer to the start * of the contiguous data area that's been reserved based * on the packet length. The data area is forced to 32-bit * alignment and the buffer length to a multiple of 4 bytes. * This is done mainly so beacon frames (that require this) * can use this interface too. */ struct mbuf * ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen) { struct mbuf *m; u_int len; /* * NB: we know the mbuf routines will align the data area * so we don't need to do anything special. */ len = roundup2(headroom + pktlen, 4); KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len)); if (len < MINCLSIZE) { m = m_gethdr(M_NOWAIT, MT_DATA); /* * Align the data in case additional headers are added. * This should only happen when a WEP header is added * which only happens for shared key authentication mgt * frames which all fit in MHLEN. */ if (m != NULL) MH_ALIGN(m, len); } else { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m != NULL) MC_ALIGN(m, len); } if (m != NULL) { m->m_data += headroom; *frm = m->m_data; } return m; }
/* * Create a "control" mbuf containing the specified data with the specified * type for presentation on a socket buffer. */ struct mbuf * sbcreatecontrol(caddr_t p, int size, int type, int level) { struct cmsghdr *cp; struct mbuf *m; if (CMSG_SPACE((u_int)size) > MCLBYTES) return ((struct mbuf *) NULL); if (CMSG_SPACE((u_int)size) > MLEN) m = m_getcl(M_NOWAIT, MT_CONTROL, 0); else m = m_get(M_NOWAIT, MT_CONTROL); if (m == NULL) return ((struct mbuf *) NULL); cp = mtod(m, struct cmsghdr *); m->m_len = 0; KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m), ("sbcreatecontrol: short mbuf")); /* * Don't leave the padding between the msg header and the * cmsg data and the padding after the cmsg data un-initialized. */ bzero(cp, CMSG_SPACE((u_int)size)); if (p != NULL) (void)memcpy(CMSG_DATA(cp), p, size); m->m_len = CMSG_SPACE(size); cp->cmsg_len = CMSG_LEN(size); cp->cmsg_level = level; cp->cmsg_type = type; return (m); }
static status_t compat_write(void *cookie, off_t position, const void *buffer, size_t *numBytes) { struct ifnet *ifp = cookie; struct mbuf *mb; //if_printf(ifp, "compat_write(%lld, %p, [%lu])\n", position, // buffer, *numBytes); if (*numBytes > MHLEN) mb = m_getcl(0, MT_DATA, M_PKTHDR); else mb = m_gethdr(0, MT_DATA); if (mb == NULL) return ENOBUFS; // if we waited, check after if the ifp is still valid mb->m_pkthdr.len = mb->m_len = min_c(*numBytes, (size_t)MCLBYTES); memcpy(mtod(mb, void *), buffer, mb->m_len); return ifp->if_output(ifp, mb, NULL, NULL); }
/* * Allocate a given length worth of mbufs and/or clusters (whatever fits * best) and return a pointer to the top of the allocated chain. If an * existing mbuf chain is provided, then we will append the new chain * to the existing one but still return the top of the newly allocated * chain. */ struct mbuf * m_getm2(struct mbuf *m, int len, int how, short type, int flags) { struct mbuf *mb, *nm = NULL, *mtail = NULL; KASSERT(len >= 0, ("%s: len is < 0", __func__)); /* Validate flags. */ flags &= (M_PKTHDR | M_EOR); /* Packet header mbuf must be first in chain. */ if ((flags & M_PKTHDR) && m != NULL) flags &= ~M_PKTHDR; /* Loop and append maximum sized mbufs to the chain tail. */ while (len > 0) { if (len > MCLBYTES) mb = m_getjcl(how, type, (flags & M_PKTHDR), MJUMPAGESIZE); else if (len >= MINCLSIZE) mb = m_getcl(how, type, (flags & M_PKTHDR)); else if (flags & M_PKTHDR) mb = m_gethdr(how, type); else mb = m_get(how, type); /* Fail the whole operation if one mbuf can't be allocated. */ if (mb == NULL) { if (nm != NULL) m_freem(nm); return (NULL); } /* Book keeping. */ len -= M_SIZE(mb); if (mtail != NULL) mtail->m_next = mb; else nm = mb; mtail = mb; flags &= ~M_PKTHDR; /* Only valid on the first mbuf. */ } if (flags & M_EOR) mtail->m_flags |= M_EOR; /* Only valid on the last mbuf. */ /* If mbuf was supplied, append new chain to the end of it. */ if (m != NULL) { for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next) ; mtail->m_next = nm; mtail->m_flags &= ~M_EOR; } else m = nm; return (m); }
static struct mbuf * ffec_alloc_mbufcl(struct ffec_softc *sc) { struct mbuf *m; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; return (m); }
static struct mbuf * awg_alloc_mbufcl(struct awg_softc *sc) { struct mbuf *m; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m != NULL) m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; return (m); }
static void gx_rx_intr(void *arg) { struct gx_softc *sc = arg; GXEMUL_ETHER_LOCK(sc); for (;;) { uint64_t status, length; struct mbuf *m; /* * XXX * Limit number of packets received at once? */ status = GXEMUL_ETHER_DEV_READ(GXEMUL_ETHER_DEV_STATUS); if (status == GXEMUL_ETHER_DEV_STATUS_RX_MORE) { GXEMUL_ETHER_DEV_WRITE(GXEMUL_ETHER_DEV_COMMAND, GXEMUL_ETHER_DEV_COMMAND_RX); continue; } if (status != GXEMUL_ETHER_DEV_STATUS_RX_OK) break; length = GXEMUL_ETHER_DEV_READ(GXEMUL_ETHER_DEV_LENGTH); if (length > MCLBYTES - ETHER_ALIGN) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); continue; } m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "no memory for receive mbuf.\n"); if_inc_counter(sc->sc_ifp, IFCOUNTER_IQDROPS, 1); GXEMUL_ETHER_UNLOCK(sc); return; } /* Align incoming frame so IP headers are aligned. */ m->m_data += ETHER_ALIGN; memcpy(m->m_data, (const void *)(uintptr_t)GXEMUL_ETHER_DEV_FUNCTION(GXEMUL_ETHER_DEV_BUFFER), length); m->m_pkthdr.rcvif = sc->sc_ifp; m->m_pkthdr.len = m->m_len = length; if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); GXEMUL_ETHER_UNLOCK(sc); (*sc->sc_ifp->if_input)(sc->sc_ifp, m); GXEMUL_ETHER_LOCK(sc); } GXEMUL_ETHER_UNLOCK(sc); }
struct mbuf * uether_newbuf(void) { struct mbuf *m_new; m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, ETHER_ALIGN); return (m_new); }
static inline struct mbuf * netmap_get_mbuf(int len) { struct mbuf *m; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m) { m->m_flags |= M_NOFREE; /* XXXNP: Almost certainly incorrect. */ m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save m->m_ext.ext_free = (void *)netmap_default_mbuf_destructor; m->m_ext.ext_type = EXT_EXTREF; ND(5, "create m %p refcnt %d", m, GET_MBUF_REFCNT(m)); } return m; }
static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR | M_NOFREE); if (m) { m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save m->m_ext.ext_free = (void *)netmap_default_mbuf_destructor; m->m_ext.ext_type = EXT_EXTREF; ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); } return m; }
static void octm_rx_intr(void *arg) { struct octm_softc *sc = arg; cvmx_mixx_isr_t mixx_isr; int len; mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(sc->sc_port)); if (!mixx_isr.s.irthresh) { device_printf(sc->sc_dev, "stray interrupt.\n"); return; } for (;;) { struct mbuf *m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "no memory for receive mbuf.\n"); return; } len = cvmx_mgmt_port_receive(sc->sc_port, MCLBYTES, m->m_data); if (len > 0) { m->m_pkthdr.rcvif = sc->sc_ifp; m->m_pkthdr.len = m->m_len = len; sc->sc_ifp->if_ipackets++; (*sc->sc_ifp->if_input)(sc->sc_ifp, m); continue; } m_freem(m); if (len == 0) break; sc->sc_ifp->if_ierrors++; } /* Acknowledge interrupts. */ cvmx_write_csr(CVMX_MIXX_ISR(sc->sc_port), mixx_isr.u64); cvmx_read_csr(CVMX_MIXX_ISR(sc->sc_port)); }
/* * New mbuf */ static int lgue_newbuf(struct lgue_softc *sc, int len, struct mbuf **m_buf) { struct ifnet *ifp; ifp = &sc->lgue_arpcom.ac_if; *m_buf = NULL; /* Allocate mbuf */ *m_buf = m_getcl(MB_DONTWAIT, MT_DATA, MT_HEADER); if (*m_buf == NULL) { if_printf(ifp, " no memory for rx buffer --- packet dropped!\n"); return(ENOBUFS); } (*m_buf)->m_len = (*m_buf)->m_pkthdr.len = MCLBYTES; m_adj(*m_buf, ETHER_ALIGN); return(0); }
static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m) { /* m_getcl() (mb_ctor_mbuf) has an assert that checks that * M_NOFREE flag is not specified as third argument, * so we have to set M_NOFREE after m_getcl(). */ m->m_flags |= M_NOFREE; m->m_ext.ext_arg1 = m->m_ext.ext_buf; // XXX save m->m_ext.ext_free = (void *)netmap_default_mbuf_destructor; m->m_ext.ext_type = EXT_EXTREF; ND(5, "create m %p refcnt %d", m, MBUF_REFCNT(m)); } return m; }
/** * Fill the supplied hardware pool with mbufs * * @param pool Pool to allocate an mbuf for * @param size Size of the buffer needed for the pool * @param elements Number of buffers to allocate */ int cvm_oct_mem_fill_fpa(int pool, int size, int elements) { int freed = elements; while (freed) { KASSERT(size <= MCLBYTES - 128, ("mbuf clusters are too small")); struct mbuf *m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m == NULL)) { printf("Failed to allocate mbuf for hardware pool %d\n", pool); break; } m->m_data += 128 - (((uintptr_t)m->m_data) & 0x7f); *(struct mbuf **)(m->m_data - sizeof(void *)) = m; cvmx_fpa_free(m->m_data, pool, DONT_WRITEBACK(size/128)); freed--; } return (elements - freed); }
/* * newnfs_realign: * * Check for badly aligned mbuf data and realign by copying the unaligned * portion of the data into a new mbuf chain and freeing the portions * of the old chain that were replaced. * * We cannot simply realign the data within the existing mbuf chain * because the underlying buffers may contain other rpc commands and * we cannot afford to overwrite them. * * We would prefer to avoid this situation entirely. The situation does * not occur with NFS/UDP and is supposed to only occassionally occur * with TCP. Use vfs.nfs.realign_count and realign_test to check this. * */ int newnfs_realign(struct mbuf **pm, int how) { struct mbuf *m, *n; int off, space; ++nfs_realign_test; while ((m = *pm) != NULL) { if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) { /* * NB: we can't depend on m_pkthdr.len to help us * decide what to do here. May not be worth doing * the m_length calculation as m_copyback will * expand the mbuf chain below as needed. */ space = m_length(m, NULL); if (space >= MINCLSIZE) { /* NB: m_copyback handles space > MCLBYTES */ n = m_getcl(how, MT_DATA, 0); } else n = m_get(how, MT_DATA); if (n == NULL) return (ENOMEM); /* * Align the remainder of the mbuf chain. */ n->m_len = 0; off = 0; while (m != NULL) { m_copyback(n, off, m->m_len, mtod(m, caddr_t)); off += m->m_len; m = m->m_next; } m_freem(*pm); *pm = n; ++nfs_realign_count; break; } pm = &m->m_next; } return (0); }
/* * Initialize an RX descriptor and attach an MBUF cluster. */ static int kr_newbuf(struct kr_softc *sc, int idx) { struct kr_desc *desc; struct kr_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint64_t)); if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag, sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->kr_cdata.kr_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap; sc->kr_cdata.kr_rx_sparemap = map; bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->desc; desc->kr_ca = segs[0].ds_addr; desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len); rxd->saved_ca = desc->kr_ca ; rxd->saved_ctl = desc->kr_ctl ; return (0); }
static void uhso_ifnet_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhso_softc *sc = usbd_xfer_softc(xfer); struct mbuf *m; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); UHSO_DPRINTF(3, "status=%d, actlen=%d\n", USB_GET_STATE(xfer), actlen); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: if (actlen > 0 && (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)) { pc = usbd_xfer_get_frame(xfer, 0); m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); usbd_copy_out(pc, 0, mtod(m, uint8_t *), actlen); m->m_pkthdr.len = m->m_len = actlen; /* Enqueue frame for further processing */ _IF_ENQUEUE(&sc->sc_rxq, m); if (!callout_pending(&sc->sc_c) || !callout_active(&sc->sc_c)) { callout_schedule(&sc->sc_c, 1); } } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); break; default: UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error)); if (error == USB_ERR_CANCELLED) break; usbd_xfer_set_stall(xfer); goto tr_setup; }
struct mbuf* ieee80211_getmgtframe(uint8_t** frm, int headroom, int pktlen) { struct mbuf* m; u_int len; len = roundup2(headroom + pktlen, 4); KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len)); if (len < MINCLSIZE) { m = m_gethdr(M_NOWAIT, MT_DATA); if (m != NULL) MH_ALIGN(m, len); } else { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m != NULL) MC_ALIGN(m, len); } if (m != NULL) { m->m_data += headroom; *frm = (uint8_t*)m->m_data; } return m; }
/* Async. stream output */ static void fwe_as_input(struct fw_xferq *xferq) { struct mbuf *m, *m0; struct ifnet *ifp; struct fwe_softc *fwe; struct fw_bulkxfer *sxfer; struct fw_pkt *fp; u_char *c; #if defined(__DragonFly__) || __FreeBSD_version < 500000 struct ether_header *eh; #endif fwe = (struct fwe_softc *)xferq->sc; ifp = fwe->eth_softc.ifp; /* We do not need a lock here because the bottom half is serialized */ while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->stvalid, link); fp = mtod(sxfer->mbuf, struct fw_pkt *); if (fwe->fd.fc->irx_post != NULL) fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld); m = sxfer->mbuf; /* insert new rbuf */ sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); } else printf("%s: m_getcl failed\n", __FUNCTION__); if (sxfer->resp != 0 || fp->mode.stream.len < ETHER_ALIGN + sizeof(struct ether_header)) { m_freem(m); ifp->if_ierrors ++; continue; } m->m_data += HDR_LEN + ETHER_ALIGN; c = mtod(m, u_char *); #if defined(__DragonFly__) || __FreeBSD_version < 500000 eh = (struct ether_header *)c; m->m_data += sizeof(struct ether_header); m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN - sizeof(struct ether_header); #else m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN; #endif m->m_pkthdr.rcvif = ifp; #if 0 FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23], c[20], c[21], c[22], c[23] ); #endif #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_input(ifp, eh, m); #else (*ifp->if_input)(ifp, m); #endif ifp->if_ipackets ++; } if (STAILQ_FIRST(&xferq->stfree) != NULL) fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch); }
static void fwe_init(void *arg) { struct fwe_softc *fwe = ((struct fwe_eth_softc *)arg)->fwe; struct firewire_comm *fc; struct ifnet *ifp = fwe->eth_softc.ifp; struct fw_xferq *xferq; struct fw_xfer *xfer; struct mbuf *m; int i; FWEDEBUG(ifp, "initializing\n"); /* XXX keep promiscoud mode */ ifp->if_flags |= IFF_PROMISC; fc = fwe->fd.fc; if (fwe->dma_ch < 0) { fwe->dma_ch = fw_open_isodma(fc, /* tx */0); if (fwe->dma_ch < 0) return; xferq = fc->ir[fwe->dma_ch]; xferq->flag |= FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM; fwe->stream_ch = stream_ch; fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch; xferq->flag &= ~0xff; xferq->flag |= fwe->stream_ch & 0xff; /* register fwe_input handler */ xferq->sc = (caddr_t) fwe; xferq->hand = fwe_as_input; xferq->bnchunk = rx_queue_len; xferq->bnpacket = 1; xferq->psize = MCLBYTES; xferq->queued = 0; xferq->buf = NULL; xferq->bulkxfer = (struct fw_bulkxfer *) malloc( sizeof(struct fw_bulkxfer) * xferq->bnchunk, M_FWE, M_WAITOK); if (xferq->bulkxfer == NULL) { printf("if_fwe: malloc failed\n"); return; } STAILQ_INIT(&xferq->stvalid); STAILQ_INIT(&xferq->stfree); STAILQ_INIT(&xferq->stdma); xferq->stproc = NULL; for (i = 0; i < xferq->bnchunk; i ++) { m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); xferq->bulkxfer[i].mbuf = m; m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, &xferq->bulkxfer[i], link); } STAILQ_INIT(&fwe->xferlist); for (i = 0; i < TX_MAX_QUEUE; i++) { xfer = fw_xfer_alloc(M_FWE); if (xfer == NULL) break; xfer->send.spd = tx_speed; xfer->fc = fwe->fd.fc; xfer->sc = (caddr_t)fwe; xfer->hand = fwe_output_callback; STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link); } } else xferq = fc->ir[fwe->dma_ch]; /* start dma */ if ((xferq->flag & FWXFERQ_RUNNING) == 0) fc->irx_enable(fc, fwe->dma_ch); #if defined(__FreeBSD__) ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #else ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; #endif #if 0 /* attempt to start output */ fwe_start(ifp); #endif }
static void usie_if_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct usie_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m0; struct mbuf *m = NULL; struct usie_desc *rxd; uint32_t actlen; uint16_t err; uint16_t pkt; uint16_t ipl; uint16_t len; uint16_t diff; uint8_t pad; uint8_t ipv; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%u\n", actlen); if (actlen < sizeof(struct usie_hip)) { DPRINTF("data too short %u\n", actlen); goto tr_setup; } m = sc->sc_rxm; sc->sc_rxm = NULL; /* fall though */ case USB_ST_SETUP: tr_setup: if (sc->sc_rxm == NULL) { sc->sc_rxm = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE /* could be bigger than MCLBYTES */ ); } if (sc->sc_rxm == NULL) { DPRINTF("could not allocate Rx mbuf\n"); ifp->if_ierrors++; usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); } else { /* * Directly loading a mbuf cluster into DMA to * save some data copying. This works because * there is only one cluster. */ usbd_xfer_set_frame_data(xfer, 0, mtod(sc->sc_rxm, caddr_t), MIN(MJUMPAGESIZE, USIE_RXSZ_MAX)); usbd_xfer_set_frames(xfer, 1); } usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF("USB transfer error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto tr_setup; } if (sc->sc_rxm != NULL) { m_freem(sc->sc_rxm); sc->sc_rxm = NULL; } break; } if (m == NULL) return; mtx_unlock(&sc->sc_mtx); m->m_pkthdr.len = m->m_len = actlen; err = pkt = 0; /* HW can aggregate multiple frames in a single USB xfer */ for (;;) { rxd = mtod(m, struct usie_desc *); len = be16toh(rxd->hip.len) & USIE_HIP_IP_LEN_MASK; pad = (rxd->hip.id & USIE_HIP_PAD) ? 1 : 0; ipl = (len - pad - ETHER_HDR_LEN); if (ipl >= len) { DPRINTF("Corrupt frame\n"); m_freem(m); break; } diff = sizeof(struct usie_desc) + ipl + pad; if (((rxd->hip.id & USIE_HIP_MASK) != USIE_HIP_IP) || (be16toh(rxd->desc_type) & USIE_TYPE_MASK) != USIE_IP_RX) { DPRINTF("received wrong type of packet\n"); m->m_data += diff; m->m_pkthdr.len = (m->m_len -= diff); err++; if (m->m_pkthdr.len > 0) continue; m_freem(m); break; } switch (be16toh(rxd->ethhdr.ether_type)) { case ETHERTYPE_IP: ipv = NETISR_IP; break; #ifdef INET6 case ETHERTYPE_IPV6: ipv = NETISR_IPV6; break; #endif default: DPRINTF("unsupported ether type\n"); err++; break; } /* the last packet */ if (m->m_pkthdr.len <= diff) { m->m_data += (sizeof(struct usie_desc) + pad); m->m_pkthdr.len = m->m_len = ipl; m->m_pkthdr.rcvif = ifp; BPF_MTAP(sc->sc_ifp, m); netisr_dispatch(ipv, m); break; } /* copy aggregated frames to another mbuf */ m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m0 == NULL)) { DPRINTF("could not allocate mbuf\n"); err++; m_freem(m); break; } m_copydata(m, sizeof(struct usie_desc) + pad, ipl, mtod(m0, caddr_t)); m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = m0->m_len = ipl; BPF_MTAP(sc->sc_ifp, m0); netisr_dispatch(ipv, m0); m->m_data += diff; m->m_pkthdr.len = (m->m_len -= diff); } mtx_lock(&sc->sc_mtx); ifp->if_ierrors += err; ifp->if_ipackets += pkt; }
static void usie_cns_req(struct usie_softc *sc, uint32_t id, uint16_t obj) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; struct usb_xfer *xfer; struct usie_hip *hip; struct usie_cns *cns; uint8_t *param; uint8_t *tmp; uint8_t cns_len; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m == NULL)) { DPRINTF("could not allocate mbuf\n"); ifp->if_ierrors++; return; } /* to align usie_hip{} on 32 bit */ m->m_data += 3; param = mtod(m, uint8_t *); *param++ = USIE_HIP_FRM_CHR; hip = (struct usie_hip *)param; cns = (struct usie_cns *)(hip + 1); tmp = param + USIE_HIPCNS_MIN - 2; switch (obj) { case USIE_CNS_OB_LINK_UPDATE: cns_len = 2; cns->op = USIE_CNS_OP_SET; *tmp++ = 1; /* profile ID, always use 1 for now */ *tmp++ = id == USIE_CNS_ID_INIT ? 1 : 0; break; case USIE_CNS_OB_PROF_WRITE: cns_len = 245; cns->op = USIE_CNS_OP_SET; *tmp++ = 1; /* profile ID, always use 1 for now */ *tmp++ = 2; memcpy(tmp, &sc->sc_net, 34); memset(tmp + 35, 0, 245 - 36); tmp += 243; break; case USIE_CNS_OB_RSSI: cns_len = 0; cns->op = USIE_CNS_OP_REQ; break; default: DPRINTF("unsupported CnS object type\n"); return; } *tmp = USIE_HIP_FRM_CHR; hip->len = htobe16(sizeof(struct usie_cns) + cns_len); hip->id = USIE_HIP_CNS2M; hip->param = 0; /* none for CnS */ cns->obj = htobe16(obj); cns->id = htobe32(id); cns->len = cns_len; cns->rsv0 = cns->rsv1 = 0; /* always '0' */ param = (uint8_t *)(cns + 1); DPRINTF("param: %16D\n", param, ":"); m->m_pkthdr.len = m->m_len = USIE_HIPCNS_MIN + cns_len + 2; xfer = sc->sc_uc_xfer[USIE_HIP_IF][USIE_UC_TX]; if (usbd_xfer_get_priv(xfer) == NULL) { usbd_xfer_set_priv(xfer, m); usbd_transfer_start(xfer); } else { DPRINTF("Dropped CNS event\n"); m_freem(m); } }
/* * Output a Neighbor Solicitation Message. Caller specifies: * - ICMP6 header source IP6 address * - ND6 header target IP6 address * - ND6 header source datalink address * * Based on RFC 2461 * Based on RFC 2462 (duplicate address detection) * * ln - for source address determination * nonce - If non-NULL, NS is used for duplicate address detection and * the value (length is ND_OPT_NONCE_LEN) is used as a random nonce. */ static void nd6_ns_output_fib(struct ifnet *ifp, const struct in6_addr *daddr6, const struct in6_addr *taddr6, struct llentry *ln, uint8_t *nonce, u_int fibnum) { struct mbuf *m; struct m_tag *mtag; struct ip6_hdr *ip6; struct nd_neighbor_solicit *nd_ns; struct ip6_moptions im6o; int icmp6len; int maxlen; caddr_t mac; struct route_in6 ro; if (IN6_IS_ADDR_MULTICAST(taddr6)) return; /* estimate the size of message */ maxlen = sizeof(*ip6) + sizeof(*nd_ns); maxlen += (sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7; KASSERT(max_linkhdr + maxlen <= MCLBYTES, ( "%s: max_linkhdr + maxlen > MCLBYTES (%d + %d > %d)", __func__, max_linkhdr, maxlen, MCLBYTES)); if (max_linkhdr + maxlen > MHLEN) m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); else m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) return; M_SETFIB(m, fibnum); bzero(&ro, sizeof(ro)); if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) { m->m_flags |= M_MCAST; im6o.im6o_multicast_ifp = ifp; im6o.im6o_multicast_hlim = 255; im6o.im6o_multicast_loop = 0; } icmp6len = sizeof(*nd_ns); m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len; m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ /* fill neighbor solicitation packet */ ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_flow = 0; ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; /* ip6->ip6_plen will be set later */ ip6->ip6_nxt = IPPROTO_ICMPV6; ip6->ip6_hlim = 255; if (daddr6) ip6->ip6_dst = *daddr6; else { ip6->ip6_dst.s6_addr16[0] = IPV6_ADDR_INT16_MLL; ip6->ip6_dst.s6_addr16[1] = 0; ip6->ip6_dst.s6_addr32[1] = 0; ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_ONE; ip6->ip6_dst.s6_addr32[3] = taddr6->s6_addr32[3]; ip6->ip6_dst.s6_addr8[12] = 0xff; if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0) goto bad; } if (nonce == NULL) { struct ifaddr *ifa; /* * RFC2461 7.2.2: * "If the source address of the packet prompting the * solicitation is the same as one of the addresses assigned * to the outgoing interface, that address SHOULD be placed * in the IP Source Address of the outgoing solicitation. * Otherwise, any one of the addresses assigned to the * interface should be used." * * We use the source address for the prompting packet * (saddr6), if: * - saddr6 is given from the caller (by giving "ln"), and * - saddr6 belongs to the outgoing interface. * Otherwise, we perform the source address selection as usual. */ struct in6_addr *hsrc; hsrc = NULL; if (ln != NULL) { LLE_RLOCK(ln); if (ln->la_hold != NULL) { struct ip6_hdr *hip6; /* hold ip6 */ /* * assuming every packet in la_hold has the same IP * header */ hip6 = mtod(ln->la_hold, struct ip6_hdr *); /* XXX pullup? */ if (sizeof(*hip6) < ln->la_hold->m_len) { ip6->ip6_src = hip6->ip6_src; hsrc = &hip6->ip6_src; } } LLE_RUNLOCK(ln); } if (hsrc && (ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, hsrc)) != NULL) { /* ip6_src set already. */ ifa_free(ifa); } else { int error; struct sockaddr_in6 dst_sa; struct in6_addr src_in; struct ifnet *oifp; bzero(&dst_sa, sizeof(dst_sa)); dst_sa.sin6_family = AF_INET6; dst_sa.sin6_len = sizeof(dst_sa); dst_sa.sin6_addr = ip6->ip6_dst; oifp = ifp; error = in6_selectsrc(&dst_sa, NULL, NULL, &ro, NULL, &oifp, &src_in); if (error) { char ip6buf[INET6_ADDRSTRLEN]; nd6log((LOG_DEBUG, "%s: source can't be " "determined: dst=%s, error=%d\n", __func__, ip6_sprintf(ip6buf, &dst_sa.sin6_addr), error)); goto bad; } ip6->ip6_src = src_in; } } else {
static void cdce_ncm_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, 0); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct mbuf *m; int sumdata; int sumlen; int actlen; int aframes; int temp; int nframes; int x; int offset; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: usbd_xfer_status(xfer, &actlen, &sumlen, &aframes, NULL); DPRINTFN(1, "received %u bytes in %u frames\n", actlen, aframes); if (actlen < (sizeof(sc->sc_ncm.hdr) + sizeof(sc->sc_ncm.dpt))) { DPRINTFN(1, "frame too short\n"); goto tr_setup; } usbd_copy_out(pc, 0, &(sc->sc_ncm.hdr), sizeof(sc->sc_ncm.hdr)); if ((sc->sc_ncm.hdr.dwSignature[0] != 'N') || (sc->sc_ncm.hdr.dwSignature[1] != 'C') || (sc->sc_ncm.hdr.dwSignature[2] != 'M') || (sc->sc_ncm.hdr.dwSignature[3] != 'H')) { DPRINTFN(1, "invalid HDR signature: " "0x%02x:0x%02x:0x%02x:0x%02x\n", sc->sc_ncm.hdr.dwSignature[0], sc->sc_ncm.hdr.dwSignature[1], sc->sc_ncm.hdr.dwSignature[2], sc->sc_ncm.hdr.dwSignature[3]); goto tr_stall; } temp = UGETW(sc->sc_ncm.hdr.wBlockLength); if (temp > sumlen) { DPRINTFN(1, "unsupported block length %u/%u\n", temp, sumlen); goto tr_stall; } temp = UGETW(sc->sc_ncm.hdr.wDptIndex); if ((temp + sizeof(sc->sc_ncm.dpt)) > actlen) { DPRINTFN(1, "invalid DPT index: 0x%04x\n", temp); goto tr_stall; } usbd_copy_out(pc, temp, &(sc->sc_ncm.dpt), sizeof(sc->sc_ncm.dpt)); if ((sc->sc_ncm.dpt.dwSignature[0] != 'N') || (sc->sc_ncm.dpt.dwSignature[1] != 'C') || (sc->sc_ncm.dpt.dwSignature[2] != 'M') || (sc->sc_ncm.dpt.dwSignature[3] != '0')) { DPRINTFN(1, "invalid DPT signature" "0x%02x:0x%02x:0x%02x:0x%02x\n", sc->sc_ncm.dpt.dwSignature[0], sc->sc_ncm.dpt.dwSignature[1], sc->sc_ncm.dpt.dwSignature[2], sc->sc_ncm.dpt.dwSignature[3]); goto tr_stall; } nframes = UGETW(sc->sc_ncm.dpt.wLength) / 4; /* Subtract size of header and last zero padded entry */ if (nframes >= (2 + 1)) nframes -= (2 + 1); else nframes = 0; DPRINTFN(1, "nframes = %u\n", nframes); temp += sizeof(sc->sc_ncm.dpt); if ((temp + (4 * nframes)) > actlen) goto tr_stall; if (nframes > CDCE_NCM_SUBFRAMES_MAX) { DPRINTFN(1, "Truncating number of frames from %u to %u\n", nframes, CDCE_NCM_SUBFRAMES_MAX); nframes = CDCE_NCM_SUBFRAMES_MAX; } usbd_copy_out(pc, temp, &(sc->sc_ncm.dp), (4 * nframes)); sumdata = 0; for (x = 0; x != nframes; x++) { offset = UGETW(sc->sc_ncm.dp[x].wFrameIndex); temp = UGETW(sc->sc_ncm.dp[x].wFrameLength); if ((offset == 0) || (temp < sizeof(struct ether_header)) || (temp > (MCLBYTES - ETHER_ALIGN))) { DPRINTFN(1, "NULL frame detected at %d\n", x); m = NULL; /* silently ignore this frame */ continue; } else if ((offset + temp) > actlen) { DPRINTFN(1, "invalid frame " "detected at %d\n", x); m = NULL; /* silently ignore this frame */ continue; } else if (temp > (MHLEN - ETHER_ALIGN)) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); } else { m = m_gethdr(M_DONTWAIT, MT_DATA); } DPRINTFN(16, "frame %u, offset = %u, length = %u \n", x, offset, temp); /* check if we have a buffer */ if (m) { m_adj(m, ETHER_ALIGN); usbd_copy_out(pc, offset, m->m_data, temp); /* enqueue */ uether_rxmbuf(&sc->sc_ue, m, temp); sumdata += temp; } else { ifp->if_ierrors++; } } DPRINTFN(1, "Efficiency: %u/%u bytes\n", sumdata, actlen); case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, sc->sc_ncm.rx_max); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); uether_rxflush(&sc->sc_ue); /* must be last */ break; default: /* Error */ DPRINTFN(1, "error = %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { tr_stall: /* try to clear stall first */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); usbd_transfer_submit(xfer); } break; } }
/* * Copy a packet header mbuf chain into a completely new chain, including * copying any mbuf clusters. Use this instead of m_copypacket() when * you need a writable copy of an mbuf chain. */ struct mbuf * m_dup(const struct mbuf *m, int how) { struct mbuf **p, *top = NULL; int remain, moff, nsize; MBUF_CHECKSLEEP(how); /* Sanity check */ if (m == NULL) return (NULL); M_ASSERTPKTHDR(m); /* While there's more data, get a new mbuf, tack it on, and fill it */ remain = m->m_pkthdr.len; moff = 0; p = ⊤ while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ struct mbuf *n; /* Get the next new mbuf */ if (remain >= MINCLSIZE) { n = m_getcl(how, m->m_type, 0); nsize = MCLBYTES; } else { n = m_get(how, m->m_type); nsize = MLEN; } if (n == NULL) goto nospace; if (top == NULL) { /* First one, must be PKTHDR */ if (!m_dup_pkthdr(n, m, how)) { m_free(n); goto nospace; } if ((n->m_flags & M_EXT) == 0) nsize = MHLEN; n->m_flags &= ~M_RDONLY; } n->m_len = 0; /* Link it into the new chain */ *p = n; p = &n->m_next; /* Copy data from original mbuf(s) into new mbuf */ while (n->m_len < nsize && m != NULL) { int chunk = min(nsize - n->m_len, m->m_len - moff); bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); moff += chunk; n->m_len += chunk; remain -= chunk; if (moff == m->m_len) { m = m->m_next; moff = 0; } } /* Check correct total mbuf length */ KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), ("%s: bogus m_pkthdr.len", __func__)); } return (top); nospace: m_freem(top); return (NULL); }