int sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs) { bus_dma_segment_t *psegs; struct mbuf *m; int seg_count; int defragged; int err; m = *mp; defragged = err = seg_count = 0; KASSERT(m->m_pkthdr.len, ("packet has zero header length")); retry: psegs = segs; seg_count = 0; if (m->m_next == NULL) { sfxge_map_mbuf_fast(tag, map, m, segs); *nsegs = 1; return (0); } #if defined(__i386__) || defined(__amd64__) while (m && seg_count < maxsegs) { /* * firmware doesn't like empty segments */ if (m->m_len != 0) { seg_count++; sfxge_map_mbuf_fast(tag, map, m, psegs); psegs++; } m = m->m_next; } #else err = bus_dmamap_load_mbuf_sg(tag, map, *mp, segs, &seg_count, 0); #endif if (seg_count == 0) { err = EFBIG; goto err_out; } else if (err == EFBIG || seg_count >= maxsegs) { if (!defragged) { m = m_defrag(*mp, M_NOWAIT); if (m == NULL) { err = ENOBUFS; goto err_out; } *mp = m; defragged = 1; goto retry; } err = EFBIG; goto err_out; } *nsegs = seg_count; err_out: return (err); }
static void sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) { struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; unsigned int batch; unsigned int rxfill; unsigned int mblksize; int ntodo; efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; prefetch_read_many(sc->enp); prefetch_read_many(rxq->common); SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; rxfill = rxq->added - rxq->completed; KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), ("rxfill > EFX_RXQ_LIMIT(rxq->entries)")); ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), ("ntodo > EFX_RQX_LIMIT(rxq->entries)")); if (ntodo == 0) return; batch = 0; mblksize = sc->rx_buffer_size - sc->rx_buffer_align; while (ntodo-- > 0) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; bus_dma_segment_t seg; struct mbuf *m; id = (rxq->added + batch) & rxq->ptr_mask; rx_desc = &rxq->queue[id]; KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); rx_desc->flags = EFX_DISCARD; m = rx_desc->mbuf = sfxge_rx_alloc_mbuf(sc); if (m == NULL) break; /* m_len specifies length of area to be mapped for DMA */ m->m_len = mblksize; m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE); m->m_data += sc->rx_buffer_align; sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); addr[batch++] = seg.ds_addr; if (batch == SFXGE_REFILL_BATCH) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; batch = 0; } } if (ntodo != 0) sfxge_rx_schedule_refill(rxq, retrying); if (batch != 0) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; } /* Make the descriptors visible to the hardware */ bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, BUS_DMASYNC_PREWRITE); efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); /* The queue could still be empty if no descriptors were actually * pushed, in which case there will be no event to cause the next * refill, so we must schedule a refill ourselves. */ if(rxq->pushed == rxq->completed) { sfxge_rx_schedule_refill(rxq, retrying); } }
static void sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) { struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; unsigned int batch; unsigned int rxfill; unsigned int mblksize; int ntodo; efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; prefetch_read_many(sc->enp); prefetch_read_many(rxq->common); mtx_assert(&evq->lock, MA_OWNED); if (rxq->init_state != SFXGE_RXQ_STARTED) return; rxfill = rxq->added - rxq->completed; KASSERT(rxfill <= EFX_RXQ_LIMIT(SFXGE_NDESCS), ("rxfill > EFX_RXQ_LIMIT(SFXGE_NDESCS)")); ntodo = min(EFX_RXQ_LIMIT(SFXGE_NDESCS) - rxfill, target); KASSERT(ntodo <= EFX_RXQ_LIMIT(SFXGE_NDESCS), ("ntodo > EFX_RQX_LIMIT(SFXGE_NDESCS)")); if (ntodo == 0) return; batch = 0; mblksize = sc->rx_buffer_size; while (ntodo-- > 0) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; bus_dma_segment_t seg; struct mbuf *m; id = (rxq->added + batch) & (SFXGE_NDESCS - 1); rx_desc = &rxq->queue[id]; KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); rx_desc->flags = EFX_DISCARD; m = rx_desc->mbuf = sfxge_rx_alloc_mbuf(sc); if (m == NULL) break; sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); addr[batch++] = seg.ds_addr; if (batch == SFXGE_REFILL_BATCH) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; batch = 0; } } if (ntodo != 0) sfxge_rx_schedule_refill(rxq, retrying); if (batch != 0) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; } /* Make the descriptors visible to the hardware */ bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, BUS_DMASYNC_PREWRITE); efx_rx_qpush(rxq->common, rxq->added); }