void * ff_mbuf_gethdr(void *pkt, uint16_t total, void *data, uint16_t len, uint8_t rx_csum) { struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { return NULL; } if (m_pkthdr_init(m, M_NOWAIT) != 0) { return NULL; } m_extadd(m, data, len, ff_mbuf_ext_free, pkt, NULL, 0, EXT_DISPOSABLE); m->m_pkthdr.len = total; m->m_len = len; m->m_next = NULL; m->m_nextpkt = NULL; if (rx_csum) { m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } return (void *)m; }
static e_RxStoreResponse dtsec_rm_fqr_rx_callback(t_Handle app, t_Handle fqr, t_Handle portal, uint32_t fqid_off, t_DpaaFD *frame) { struct dtsec_softc *sc; struct mbuf *m; m = NULL; sc = app; KASSERT(DPAA_FD_GET_FORMAT(frame) == e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF, ("%s(): Got unsupported frame format 0x%02X!", __func__, DPAA_FD_GET_FORMAT(frame))); KASSERT(DPAA_FD_GET_OFFSET(frame) == 0, ("%s(): Only offset 0 is supported!", __func__)); if (DPAA_FD_GET_STATUS(frame) != 0) { device_printf(sc->sc_dev, "RX error: 0x%08X\n", DPAA_FD_GET_STATUS(frame)); goto err; } m = m_gethdr(M_NOWAIT, MT_HEADER); if (m == NULL) goto err; m_extadd(m, DPAA_FD_GET_ADDR(frame), FM_PORT_BUFFER_SIZE, dtsec_rm_fqr_mext_free, DPAA_FD_GET_ADDR(frame), sc, 0, EXT_NET_DRV); m->m_pkthdr.rcvif = sc->sc_ifnet; m->m_len = DPAA_FD_GET_LENGTH(frame); m_fixhdr(m); (*sc->sc_ifnet->if_input)(sc->sc_ifnet, m); return (e_RX_STORE_RESPONSE_CONTINUE); err: bman_put_buffer(sc->sc_rx_pool, DPAA_FD_GET_ADDR(frame)); if (m != NULL) m_freem(m); return (e_RX_STORE_RESPONSE_CONTINUE); }
static inline struct mbuf * nm_os_get_mbuf(struct ifnet *ifp, int len) { struct mbuf *m; (void)ifp; (void)len; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { return m; } m_extadd(m, NULL /* buf */, 0 /* size */, void_mbuf_dtor, NULL, NULL, 0, EXT_NET_DRV); return m; }
void * ff_mbuf_get(void *m, void *data, uint16_t len) { struct mbuf *prev = (struct mbuf *)m; struct mbuf *mb = m_get(M_NOWAIT, MT_DATA); if (mb == NULL) { return NULL; } m_extadd(mb, data, len, NULL, NULL, NULL, 0, 0); mb->m_next = NULL; mb->m_nextpkt = NULL; mb->m_len = len; if (prev != NULL) { prev->m_next = mb; } return (void *)mb; }
static void if_netmap_receive(void *arg) { struct if_netmap_softc *sc; struct ifnet *ifp; struct uhi_pollfd pfd; struct mbuf *m; struct if_netmap_bufinfo *bi; void *slotbuf; uint32_t slotindex; uint32_t pktlen; uint32_t cur; uint32_t avail; uint32_t reserved; uint32_t returned; uint32_t new_reserved; unsigned int n; int rv; int done; /* Zero-copy receive * * A packet header mbuf is allocated for each received netmap * buffer, and the netmap buffer is attached to this mbuf as * external storage, along with a free routine and piece of context * that enables the free routine to move the netmap buffer on its * way back to the receive ring. The per-buffer context objects * (struct if_netmap_bufinfo) are managed by this driver. * * When the mbuf layer calls the free routine for an mbuf-attached * netmap buffer, its associated context object is added to a list * that is part of the pool of those objects. On each pass through * the receive loop below, all of the context objects that have been * returned to the list since the last pass are processed, and their * associated netmap buffers are returned to the receive ring. * * With this approach, a given netmap buffer may be available for * netmap's use on the ring, may be newly available for our * consumption on the ring, may have been passed to the stack for * processing and not yet returned, or may have been returned to us * from the stack but not yet returned to the netmap ring. */ sc = (struct if_netmap_softc *)arg; ifp = sc->ifp; if (sc->cfg->cpu >= 0) sched_bind(sc->rx_thread.thr, sc->cfg->cpu); rv = if_netmap_rxsync(sc->nm_host_ctx, NULL, NULL, NULL); if (rv == -1) printf("could not sync rx descriptors before receive loop\n"); reserved = if_netmap_rxreserved(sc->nm_host_ctx); sc->hw_rx_rsvd_begin = if_netmap_rxcur(sc->nm_host_ctx); sc->rx_thread.last_stop_check = ticks; done = 0; for (;;) { while (!done && (0 == (avail = if_netmap_rxavail(sc->nm_host_ctx)))) { memset(&pfd, 0, sizeof pfd); pfd.fd = sc->fd; pfd.events = UHI_POLLIN; rv = uhi_poll(&pfd, 1, IF_NETMAP_THREAD_STOP_CHECK_MS); if (rv == 0) { done = if_netmap_stoppable_thread_check(&sc->rx_thread); } else if (rv == -1) printf("error from poll for receive\n"); } if (ticks - sc->rx_thread.last_stop_check >= sc->stop_check_ticks) { done = if_netmap_stoppable_thread_check(&sc->rx_thread); } if (done) break; cur = if_netmap_rxcur(sc->nm_host_ctx); new_reserved = 0; for (n = 0; n < avail; n++) { slotbuf = if_netmap_rxslot(sc->nm_host_ctx, &cur, &pktlen, &slotindex); ifp->if_ipackets++; ifp->if_ibytes += pktlen; bi = if_netmap_bufinfo_alloc(&sc->rx_bufinfo, slotindex); if (NULL == bi) { /* copy receive */ ifp->if_icopies++; /* could streamline this a little since we * know the data is going to fit in a * cluster */ m = m_devget(slotbuf, pktlen, ETHER_ALIGN, sc->ifp, NULL); /* Recover this buffer at the far end of the * reserved trail from prior zero-copy * activity. */ if_netmap_rxsetslot(sc->nm_host_ctx, &sc->hw_rx_rsvd_begin, slotindex); } else { /* zero-copy receive */ ifp->if_izcopies++; m = m_gethdr(M_DONTWAIT, MT_DATA); if (NULL == m) { if_netmap_bufinfo_unalloc(&sc->rx_bufinfo); if_netmap_rxsetslot(sc->nm_host_ctx, &sc->hw_rx_rsvd_begin, slotindex); } else { /* XXX presumably in this path the * IP header isn't aligned on a * 32-bit boundary because the * ethernet header is and there is * no ETHER_ALIGN adjustment? this * would be an issue for ip_src and * ip_dst on platforms that don't * support 16-bit aligned access to * 32-bit values. */ m->m_pkthdr.len = m->m_len = pktlen; m->m_pkthdr.rcvif = sc->ifp; m->m_ext.ref_cnt = &bi->refcnt; m_extadd(m, slotbuf, if_netmap_rxbufsize(sc->nm_host_ctx), if_netmap_free, sc, bi, 0, EXT_EXTREF); new_reserved++; } } if (m) { sc->ifp->if_input(sc->ifp, m); } else { ifp->if_iqdrops++; } } avail -= n; reserved += new_reserved; /* Return any netmap buffers freed by the stack to the ring */ returned = if_netmap_sweep_trail(sc); reserved -= returned; rv = if_netmap_rxsync(sc->nm_host_ctx, &avail, &cur, &reserved); if (rv == -1) printf("could not sync rx descriptors after receive\n"); } if_netmap_stoppable_thread_done(&sc->rx_thread); }