Esempio n. 1
0
static boolean_t
sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;
	struct sfxge_rxq *rxq;
	unsigned int index;
	unsigned int label;
	uint16_t magic;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;
	rxq = sc->rxq[rxq_index];

	KASSERT(rxq != NULL, ("rxq == NULL"));

	/* Resend a software event on the correct queue */
	index = rxq->index;
	evq = sc->evq[index];

	label = rxq_index;
	KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
	    ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
	magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;

	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
	    ("evq not started"));
	efx_ev_qpost(evq->common, magic);

	return (B_FALSE);
}
Esempio n. 2
0
static boolean_t
sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;

	DBGPRINT(sc->dev, "[%d] %s", evq->index,
			  (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
			  (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
			  (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
			  (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) ? "UNKNOWN_SENSOREVT" :
			  (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
			  (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
			  (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
			  (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
			  (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
			  "UNKNOWN");

	evq->exception = B_TRUE;

	if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
		device_printf(sc->dev,
			      "hardware exception (code=%u); resetting\n",
			      code);
		sfxge_schedule_reset(sc);
	}

	return (B_FALSE);
}
Esempio n. 3
0
static boolean_t
sfxge_ev_link_change(void *arg, efx_link_mode_t	link_mode)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;

	sfxge_mac_link_update(sc, link_mode);

	return (0);
}
Esempio n. 4
0
static boolean_t
sfxge_ev_initialized(void *arg)
{
	struct sfxge_evq *evq;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	/* Init done events may be duplicated on 7xxx */
	KASSERT(evq->init_state == SFXGE_EVQ_STARTING ||
		evq->init_state == SFXGE_EVQ_STARTED,
	    ("evq not starting"));

	evq->init_state = SFXGE_EVQ_STARTED;

	return (0);
}
Esempio n. 5
0
static boolean_t
sfxge_ev_software(void *arg, uint16_t magic)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;
	unsigned int label;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;

	label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
	magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;

	switch (magic) {
	case SFXGE_MAGIC_RX_QFLUSH_DONE:
		sfxge_rx_qflush_done(sfxge_get_rxq_by_label(evq, label));
		break;

	case SFXGE_MAGIC_RX_QFLUSH_FAILED:
		sfxge_rx_qflush_failed(sfxge_get_rxq_by_label(evq, label));
		break;

	case SFXGE_MAGIC_RX_QREFILL:
		sfxge_rx_qrefill(sfxge_get_rxq_by_label(evq, label));
		break;

	case SFXGE_MAGIC_TX_QFLUSH_DONE: {
		struct sfxge_txq *txq = sfxge_get_txq_by_label(evq, label);

		KASSERT(txq != NULL, ("txq == NULL"));
		KASSERT(evq->index == txq->evq_index,
		    ("evq->index != txq->evq_index"));

		sfxge_tx_qflush_done(txq);
		break;
	}
	default:
		break;
	}

	return (B_FALSE);
}
Esempio n. 6
0
static boolean_t
sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
{
	struct sfxge_evq *evq;
	struct sfxge_txq *txq;
	unsigned int stop;
	unsigned int delta;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	txq = sfxge_get_txq_by_label(evq, label);

	KASSERT(txq != NULL, ("txq == NULL"));
	KASSERT(evq->index == txq->evq_index,
	    ("evq->index != txq->evq_index"));

	if (__predict_false(txq->init_state != SFXGE_TXQ_STARTED))
		goto done;

	stop = (id + 1) & txq->ptr_mask;
	id = txq->pending & txq->ptr_mask;

	delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop);
	txq->pending += delta;

	evq->tx_done++;

	if (txq->next == NULL &&
	    evq->txqs != &(txq->next)) {
		*(evq->txqs) = txq;
		evq->txqs = &(txq->next);
	}

	if (txq->pending - txq->completed >= SFXGE_TX_BATCH)
		sfxge_tx_qcomplete(txq, evq);

done:
	return (evq->tx_done >= SFXGE_EV_BATCH);
}
Esempio n. 7
0
static boolean_t
sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;
	struct sfxge_txq *txq;
	unsigned int label;
	uint16_t magic;

	evq = (struct sfxge_evq *)arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;
	txq = sc->txq[txq_index];

	KASSERT(txq != NULL, ("txq == NULL"));
	KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
	    ("txq not initialized"));

	if (txq->evq_index == evq->index) {
		sfxge_tx_qflush_done(txq);
		return (B_FALSE);
	}

	/* Resend a software event on the correct queue */
	evq = sc->evq[txq->evq_index];

	label = txq->type;
	KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
	    ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
	magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;

	KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
	    ("evq not started"));
	efx_ev_qpost(evq->common, magic);

	return (B_FALSE);
}
Esempio n. 8
0
static void
sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop)
{
	struct sfxge_softc *sc;
	unsigned int index;
	struct sfxge_rxq *rxq;
	struct sfxge_txq *txq;

	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;
	index = evq->index;
	rxq = sc->rxq[index];

	if ((txq = evq->txq) != NULL) {
		evq->txq = NULL;
		evq->txqs = &(evq->txq);

		do {
			struct sfxge_txq *next;

			next = txq->next;
			txq->next = NULL;

			KASSERT(txq->evq_index == index,
			    ("txq->evq_index != index"));

			if (txq->pending != txq->completed)
				sfxge_tx_qcomplete(txq, evq);

			txq = next;
		} while (txq != NULL);
	}

	if (rxq->pending != rxq->completed)
		sfxge_rx_qcomplete(rxq, eop);
}
Esempio n. 9
0
static boolean_t
sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
	    uint16_t flags)
{
	struct sfxge_evq *evq;
	struct sfxge_softc *sc;
	struct sfxge_rxq *rxq;
	unsigned int stop;
	unsigned int delta;
	struct sfxge_rx_sw_desc *rx_desc;

	evq = arg;
	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	sc = evq->sc;

	if (evq->exception)
		goto done;

	rxq = sc->rxq[label];
	KASSERT(rxq != NULL, ("rxq == NULL"));
	KASSERT(evq->index == rxq->index,
	    ("evq->index != rxq->index"));

	if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
		goto done;

	stop = (id + 1) & rxq->ptr_mask;
	id = rxq->pending & rxq->ptr_mask;
	delta = (stop >= id) ? (stop - id) : (rxq->entries - id + stop);
	rxq->pending += delta;

	if (delta != 1) {
		if ((!efx_nic_cfg_get(sc->enp)->enc_rx_batching_enabled) ||
		    (delta <= 0) ||
		    (delta > efx_nic_cfg_get(sc->enp)->enc_rx_batch_max)) {
			evq->exception = B_TRUE;

			device_printf(sc->dev, "RX completion out of order"
						  " (id=%#x delta=%u flags=%#x); resetting\n",
						  id, delta, flags);
			sfxge_schedule_reset(sc);

			goto done;
		}
	}

	rx_desc = &rxq->queue[id];

	prefetch_read_many(rx_desc->mbuf);

	for (; id != stop; id = (id + 1) & rxq->ptr_mask) {
		rx_desc = &rxq->queue[id];
		KASSERT(rx_desc->flags == EFX_DISCARD,
				("rx_desc->flags != EFX_DISCARD"));
		rx_desc->flags = flags;

		KASSERT(size < (1 << 16), ("size > (1 << 16)"));
		rx_desc->size = (uint16_t)size;
	}

	evq->rx_done++;

	if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
		sfxge_ev_qcomplete(evq, B_FALSE);

done:
	return (evq->rx_done >= SFXGE_EV_BATCH);
}
Esempio n. 10
0
static void
sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
{
	struct sfxge_softc *sc;
	unsigned int index;
	struct sfxge_evq *evq;
	unsigned int batch;
	unsigned int rxfill;
	unsigned int mblksize;
	int ntodo;
	efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];

	sc = rxq->sc;
	index = rxq->index;
	evq = sc->evq[index];

	prefetch_read_many(sc->enp);
	prefetch_read_many(rxq->common);

	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
		return;

	rxfill = rxq->added - rxq->completed;
	KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries),
	    ("rxfill > EFX_RXQ_LIMIT(rxq->entries)"));
	ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target);
	KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries),
	    ("ntodo > EFX_RQX_LIMIT(rxq->entries)"));

	if (ntodo == 0)
		return;

	batch = 0;
	mblksize = sc->rx_buffer_size - sc->rx_buffer_align;
	while (ntodo-- > 0) {
		unsigned int id;
		struct sfxge_rx_sw_desc *rx_desc;
		bus_dma_segment_t seg;
		struct mbuf *m;

		id = (rxq->added + batch) & rxq->ptr_mask;
		rx_desc = &rxq->queue[id];
		KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));

		rx_desc->flags = EFX_DISCARD;
		m = rx_desc->mbuf = sfxge_rx_alloc_mbuf(sc);
		if (m == NULL)
			break;

		/* m_len specifies length of area to be mapped for DMA */
		m->m_len  = mblksize;
		m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE);
		m->m_data += sc->rx_buffer_align;

		sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg);
		addr[batch++] = seg.ds_addr;

		if (batch == SFXGE_REFILL_BATCH) {
			efx_rx_qpost(rxq->common, addr, mblksize, batch,
			    rxq->completed, rxq->added);
			rxq->added += batch;
			batch = 0;
		}
	}

	if (ntodo != 0)
		sfxge_rx_schedule_refill(rxq, retrying);

	if (batch != 0) {
		efx_rx_qpost(rxq->common, addr, mblksize, batch,
		    rxq->completed, rxq->added);
		rxq->added += batch;
	}

	/* Make the descriptors visible to the hardware */
	bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map,
			BUS_DMASYNC_PREWRITE);

	efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed);

	/* The queue could still be empty if no descriptors were actually
	 * pushed, in which case there will be no event to cause the next
	 * refill, so we must schedule a refill ourselves.
	 */
	if(rxq->pushed == rxq->completed) {
		sfxge_rx_schedule_refill(rxq, retrying);
	}
}