static void cdce_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct mbuf *m; struct mbuf *mt; uint32_t crc; uint8_t x; int actlen, aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(1, "\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %u bytes in %u frames\n", actlen, aframes); ifp->if_opackets++; /* free all previous TX buffers */ cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: for (x = 0; x != CDCE_FRAMES_MAX; x++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->sc_flags & CDCE_FLAG_ZAURUS) { /* * Zaurus wants a 32-bit CRC appended * to every frame */ crc = cdce_m_crc32(m, 0, m->m_pkthdr.len); crc = htole32(crc); if (!m_append(m, 4, (void *)&crc)) { m_freem(m); ifp->if_oerrors++; continue; } } if (m->m_len != m->m_pkthdr.len) { mt = m_defrag(m, M_DONTWAIT); if (mt == NULL) { m_freem(m); ifp->if_oerrors++; continue; } m = mt; } if (m->m_pkthdr.len > MCLBYTES) { m->m_pkthdr.len = MCLBYTES; } sc->sc_tx_buf[x] = m; usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len); /* * If there's a BPF listener, bounce a copy of * this frame to him: */ BPF_MTAP(ifp, m); } if (x != 0) { usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); /* free all previous TX buffers */ cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX); /* count output errors */ ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
int busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **m, bus_dma_segment_t *segs, int *nsegs) { struct mbuf *n = *m; int seg_count, defragged = 0, err = 0; bus_dma_segment_t *psegs; KASSERT(n->m_pkthdr.len, ("packet has zero header len")); if (n->m_pkthdr.len <= PIO_LEN) return (0); retry: psegs = segs; seg_count = 0; if (n->m_next == NULL) { busdma_map_mbuf_fast(tag, map, n, segs); *nsegs = 1; return (0); } #if defined(__i386__) || defined(__amd64__) while (n && seg_count < TX_MAX_SEGS) { /* * firmware doesn't like empty segments */ if (__predict_true(n->m_len != 0)) { seg_count++; busdma_map_mbuf_fast(tag, map, n, psegs); psegs++; } n = n->m_next; } #else err = bus_dmamap_load_mbuf_sg(tag, map, *m, segs, &seg_count, 0); #endif if (seg_count == 0) { if (cxgb_debug) printf("empty segment chain\n"); err = EFBIG; goto err_out; } else if (err == EFBIG || seg_count >= TX_MAX_SEGS) { if (cxgb_debug) printf("mbuf chain too long: %d max allowed %d\n", seg_count, TX_MAX_SEGS); if (!defragged) { n = m_defrag(*m, M_NOWAIT); if (n == NULL) { err = ENOBUFS; goto err_out; } *m = n; defragged = 1; goto retry; } err = EFBIG; goto err_out; } *nsegs = seg_count; err_out: return (err); }
static int ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ixl_tx_buf *buf; struct i40e_tx_desc *txd = NULL; struct mbuf *m_head, *m; int i, j, error, nsegs, maxsegs; int first, last = 0; u16 vtag = 0; u32 cmd, off; bus_dmamap_t map; bus_dma_tag_t tag; bus_dma_segment_t segs[IXL_MAX_TSO_SEGS]; cmd = off = 0; m_head = *m_headp; /* * Important to capture the first descriptor * used because it will contain the index of * the one we tell the hardware to report back */ first = txr->next_avail; buf = &txr->buffers[first]; map = buf->map; tag = txr->tx_tag; maxsegs = IXL_MAX_TX_SEGS; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { /* Use larger mapping for TSO */ tag = txr->tso_tag; maxsegs = IXL_MAX_TSO_SEGS; if (ixl_tso_detect_sparse(m_head)) { m = m_defrag(m_head, M_NOWAIT); *m_headp = m; } } /* * Map the packet for DMA. */ error = bus_dmamap_load_mbuf_sg(tag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m; m = m_collapse(*m_headp, M_NOWAIT, maxsegs); if (m == NULL) { que->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again */ error = bus_dmamap_load_mbuf_sg(tag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error == ENOMEM) { que->tx_dma_setup++; return (error); } else if (error != 0) { que->tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } } else if (error == ENOMEM) { que->tx_dma_setup++; return (error); } else if (error != 0) { que->tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* Make certain there are enough descriptors */ if (nsegs > txr->avail - 2) { txr->no_desc++; error = ENOBUFS; goto xmit_fail; } m_head = *m_headp; /* Set up the TSO/CSUM offload */ if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { error = ixl_tx_setup_offload(que, m_head, &cmd, &off); if (error) goto xmit_fail; } cmd |= I40E_TX_DESC_CMD_ICRC; /* Grab the VLAN tag */ if (m_head->m_flags & M_VLANTAG) { cmd |= I40E_TX_DESC_CMD_IL2TAG1; vtag = htole16(m_head->m_pkthdr.ether_vtag); } i = txr->next_avail; for (j = 0; j < nsegs; j++) { bus_size_t seglen; buf = &txr->buffers[i]; buf->tag = tag; /* Keep track of the type tag */ txd = &txr->base[i]; seglen = segs[j].ds_len; txd->buffer_addr = htole64(segs[j].ds_addr); txd->cmd_type_offset_bsz = htole64(I40E_TX_DESC_DTYPE_DATA | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT)); last = i; /* descriptor that will get completion IRQ */ if (++i == que->num_desc) i = 0; buf->m_head = NULL; buf->eop_index = -1; } /* Set the last descriptor for report */ txd->cmd_type_offset_bsz |= htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); txr->avail -= nsegs; txr->next_avail = i; buf->m_head = m_head; /* Swap the dma map between the first and last descriptor */ txr->buffers[first].map = buf->map; buf->map = map; bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE); /* Set the index of the descriptor that will be marked done */ buf = &txr->buffers[first]; buf->eop_index = last; bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the * hardware that this frame is available to transmit. */ ++txr->total_packets; wr32(hw, txr->tail, i); ixl_flush(hw); /* Mark outstanding work */ if (que->busy == 0) que->busy = 1; return (0); xmit_fail: bus_dmamap_unload(tag, buf->map); return (error); }
void cpsw_start(struct ifnet *ifp) { struct cpsw_softc * const sc = ifp->if_softc; struct cpsw_ring_data * const rdp = sc->sc_rdp; struct cpsw_cpdma_bd bd; struct mbuf *m; bus_dmamap_t dm; u_int eopi = ~0; u_int seg; u_int txfree; int txstart = -1; int error; bool pad; u_int mlen; if (!ISSET(ifp->if_flags, IFF_RUNNING) || ISSET(ifp->if_flags, IFF_OACTIVE) || IFQ_IS_EMPTY(&ifp->if_snd)) return; if (sc->sc_txnext >= sc->sc_txhead) txfree = CPSW_NTXDESCS - 1 + sc->sc_txhead - sc->sc_txnext; else txfree = sc->sc_txhead - sc->sc_txnext - 1; for (;;) { if (txfree <= CPSW_TXFRAGS) { SET(ifp->if_flags, IFF_OACTIVE); break; } IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; IFQ_DEQUEUE(&ifp->if_snd, m); dm = rdp->tx_dm[sc->sc_txnext]; error = bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT); switch (error) { case 0: break; case EFBIG: /* mbuf chain is too fragmented */ if (m_defrag(m, M_DONTWAIT) == 0 && bus_dmamap_load_mbuf(sc->sc_bdt, dm, m, BUS_DMA_NOWAIT) == 0) break; /* FALLTHROUGH */ default: m_freem(m); ifp->if_oerrors++; continue; } mlen = dm->dm_mapsize; pad = mlen < CPSW_PAD_LEN; KASSERT(rdp->tx_mb[sc->sc_txnext] == NULL); rdp->tx_mb[sc->sc_txnext] = m; #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize, BUS_DMASYNC_PREWRITE); if (txstart == -1) txstart = sc->sc_txnext; eopi = sc->sc_txnext; for (seg = 0; seg < dm->dm_nsegs; seg++) { bd.next = cpsw_txdesc_paddr(sc, TXDESC_NEXT(sc->sc_txnext)); bd.bufptr = dm->dm_segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = dm->dm_segs[seg].ds_len; bd.pktlen = 0; bd.flags = 0; if (seg == 0) { bd.flags = CPDMA_BD_OWNER | CPDMA_BD_SOP; bd.pktlen = MAX(mlen, CPSW_PAD_LEN); } if (seg == dm->dm_nsegs - 1 && !pad) bd.flags |= CPDMA_BD_EOP; cpsw_set_txdesc(sc, sc->sc_txnext, &bd); txfree--; eopi = sc->sc_txnext; sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext); } if (pad) { bd.next = cpsw_txdesc_paddr(sc, TXDESC_NEXT(sc->sc_txnext)); bd.bufptr = sc->sc_txpad_pa; bd.bufoff = 0; bd.buflen = CPSW_PAD_LEN - mlen; bd.pktlen = 0; bd.flags = CPDMA_BD_EOP; cpsw_set_txdesc(sc, sc->sc_txnext, &bd); txfree--; eopi = sc->sc_txnext; sc->sc_txnext = TXDESC_NEXT(sc->sc_txnext); } } if (txstart >= 0) { ifp->if_timer = 5; /* terminate the new chain */ KASSERT(eopi == TXDESC_PREV(sc->sc_txnext)); cpsw_set_txdesc_next(sc, TXDESC_PREV(sc->sc_txnext), 0); /* link the new chain on */ cpsw_set_txdesc_next(sc, TXDESC_PREV(txstart), cpsw_txdesc_paddr(sc, txstart)); if (sc->sc_txeoq) { /* kick the dma engine */ sc->sc_txeoq = false; bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_TX_HDP(0), cpsw_txdesc_paddr(sc, txstart)); } } }