Пример #1
0
static boolean_t
sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
    uint16_t flags)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;
	struct sfxge_rxq *rxq;
	unsigned int expected;
	struct sfxge_rx_sw_desc *rx_desc;

	evq = arg;
	sc = evq->sc;

	if (evq->exception)
		goto done;

	rxq = sc->rxq[label];
	KASSERT(rxq != NULL, ("rxq == NULL"));
	KASSERT(evq->index == rxq->index,
	    ("evq->index != rxq->index"));

	if (rxq->init_state != SFXGE_RXQ_STARTED)
		goto done;

	expected = rxq->pending++ & (SFXGE_NDESCS - 1);
	if (id != expected) {
		evq->exception = B_TRUE;

		device_printf(sc->dev, "RX completion out of order"
			      " (id=%#x expected=%#x flags=%#x); resetting\n",
			      id, expected, flags);
		sfxge_schedule_reset(sc);

		goto done;
	}

	rx_desc = &rxq->queue[id];

	KASSERT(rx_desc->flags == EFX_DISCARD,
	    ("rx_desc->flags != EFX_DISCARD"));
	rx_desc->flags = flags;

	KASSERT(size < (1 << 16), ("size > (1 << 16)"));
	rx_desc->size = (uint16_t)size;
	prefetch_read_many(rx_desc->mbuf);

	evq->rx_done++;

	if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
		sfxge_ev_qcomplete(evq, B_FALSE);

done:
	return (evq->rx_done >= SFXGE_EV_BATCH);
}
Пример #2
0
static void
sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
{
	struct sfxge_softc *sc;
	unsigned int index;
	struct sfxge_evq *evq;
	unsigned int batch;
	unsigned int rxfill;
	unsigned int mblksize;
	int ntodo;
	efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];

	sc = rxq->sc;
	index = rxq->index;
	evq = sc->evq[index];

	prefetch_read_many(sc->enp);
	prefetch_read_many(rxq->common);

	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
		return;

	rxfill = rxq->added - rxq->completed;
	KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries),
	    ("rxfill > EFX_RXQ_LIMIT(rxq->entries)"));
	ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target);
	KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries),
	    ("ntodo > EFX_RQX_LIMIT(rxq->entries)"));

	if (ntodo == 0)
		return;

	batch = 0;
	mblksize = sc->rx_buffer_size - sc->rx_buffer_align;
	while (ntodo-- > 0) {
		unsigned int id;
		struct sfxge_rx_sw_desc *rx_desc;
		bus_dma_segment_t seg;
		struct mbuf *m;

		id = (rxq->added + batch) & rxq->ptr_mask;
		rx_desc = &rxq->queue[id];
		KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));

		rx_desc->flags = EFX_DISCARD;
		m = rx_desc->mbuf = sfxge_rx_alloc_mbuf(sc);
		if (m == NULL)
			break;

		/* m_len specifies length of area to be mapped for DMA */
		m->m_len  = mblksize;
		m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE);
		m->m_data += sc->rx_buffer_align;

		sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg);
		addr[batch++] = seg.ds_addr;

		if (batch == SFXGE_REFILL_BATCH) {
			efx_rx_qpost(rxq->common, addr, mblksize, batch,
			    rxq->completed, rxq->added);
			rxq->added += batch;
			batch = 0;
		}
	}

	if (ntodo != 0)
		sfxge_rx_schedule_refill(rxq, retrying);

	if (batch != 0) {
		efx_rx_qpost(rxq->common, addr, mblksize, batch,
		    rxq->completed, rxq->added);
		rxq->added += batch;
	}

	/* Make the descriptors visible to the hardware */
	bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map,
			BUS_DMASYNC_PREWRITE);

	efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed);

	/* The queue could still be empty if no descriptors were actually
	 * pushed, in which case there will be no event to cause the next
	 * refill, so we must schedule a refill ourselves.
	 */
	if(rxq->pushed == rxq->completed) {
		sfxge_rx_schedule_refill(rxq, retrying);
	}
}
Пример #3
0
static boolean_t
sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
	    uint16_t flags)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;
	struct sfxge_rxq *rxq;
	unsigned int stop;
	unsigned int delta;
	struct sfxge_rx_sw_desc *rx_desc;

	evq = arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;

	if (evq->exception)
		goto done;

	rxq = sc->rxq[label];
	KASSERT(rxq != NULL, ("rxq == NULL"));
	KASSERT(evq->index == rxq->index,
	    ("evq->index != rxq->index"));

	if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
		goto done;

	stop = (id + 1) & rxq->ptr_mask;
	id = rxq->pending & rxq->ptr_mask;
	delta = (stop >= id) ? (stop - id) : (rxq->entries - id + stop);
	rxq->pending += delta;

	if (delta != 1) {
		if ((!efx_nic_cfg_get(sc->enp)->enc_rx_batching_enabled) ||
		    (delta <= 0) ||
		    (delta > efx_nic_cfg_get(sc->enp)->enc_rx_batch_max)) {
			evq->exception = B_TRUE;

			device_printf(sc->dev, "RX completion out of order"
						  " (id=%#x delta=%u flags=%#x); resetting\n",
						  id, delta, flags);
			sfxge_schedule_reset(sc);

			goto done;
		}
	}

	rx_desc = &rxq->queue[id];

	prefetch_read_many(rx_desc->mbuf);

	for (; id != stop; id = (id + 1) & rxq->ptr_mask) {
		rx_desc = &rxq->queue[id];
		KASSERT(rx_desc->flags == EFX_DISCARD,
				("rx_desc->flags != EFX_DISCARD"));
		rx_desc->flags = flags;

		KASSERT(size < (1 << 16), ("size > (1 << 16)"));
		rx_desc->size = (uint16_t)size;
	}

	evq->rx_done++;

	if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
		sfxge_ev_qcomplete(evq, B_FALSE);

done:
	return (evq->rx_done >= SFXGE_EV_BATCH);
}
Пример #4
0
static void
sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
{
	struct sfxge_softc *sc;
	unsigned int index;
	struct sfxge_evq *evq;
	unsigned int batch;
	unsigned int rxfill;
	unsigned int mblksize;
	int ntodo;
	efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];

	sc = rxq->sc;
	index = rxq->index;
	evq = sc->evq[index];

	prefetch_read_many(sc->enp);
	prefetch_read_many(rxq->common);

	mtx_assert(&evq->lock, MA_OWNED);

	if (rxq->init_state != SFXGE_RXQ_STARTED)
		return;

	rxfill = rxq->added - rxq->completed;
	KASSERT(rxfill <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
	    ("rxfill > EFX_RXQ_LIMIT(SFXGE_NDESCS)"));
	ntodo = min(EFX_RXQ_LIMIT(SFXGE_NDESCS) - rxfill, target);
	KASSERT(ntodo <= EFX_RXQ_LIMIT(SFXGE_NDESCS),
	    ("ntodo > EFX_RQX_LIMIT(SFXGE_NDESCS)"));

	if (ntodo == 0)
		return;

	batch = 0;
	mblksize = sc->rx_buffer_size;
	while (ntodo-- > 0) {
		unsigned int id;
		struct sfxge_rx_sw_desc *rx_desc;
		bus_dma_segment_t seg;
		struct mbuf *m;

		id = (rxq->added + batch) & (SFXGE_NDESCS - 1);
		rx_desc = &rxq->queue[id];
		KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));

		rx_desc->flags = EFX_DISCARD;
		m = rx_desc->mbuf = sfxge_rx_alloc_mbuf(sc);
		if (m == NULL)
			break;
		sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg);
		addr[batch++] = seg.ds_addr;

		if (batch == SFXGE_REFILL_BATCH) {
			efx_rx_qpost(rxq->common, addr, mblksize, batch,
			    rxq->completed, rxq->added);
			rxq->added += batch;
			batch = 0;
		}
	}

	if (ntodo != 0)
		sfxge_rx_schedule_refill(rxq, retrying);

	if (batch != 0) {
		efx_rx_qpost(rxq->common, addr, mblksize, batch,
		    rxq->completed, rxq->added);
		rxq->added += batch;
	}

	/* Make the descriptors visible to the hardware */
	bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map,
			BUS_DMASYNC_PREWRITE);

	efx_rx_qpush(rxq->common, rxq->added);
}