Example #1
0
static void
ffec_rxfinish_locked(struct ffec_softc *sc)
{
	struct ffec_hwdesc *desc;
	int len;
	boolean_t produced_empty_buffer;

	FFEC_ASSERT_LOCKED(sc);

	produced_empty_buffer = false;
	for (;;) {
		desc = &sc->rxdesc_ring[sc->rx_idx];
		if (desc->flags_len & FEC_RXDESC_EMPTY)
			break;
		produced_empty_buffer = true;
		len = (desc->flags_len & FEC_RXDESC_LEN_MASK);
		if (len < 64) {
			/*
			 * Just recycle the descriptor and continue.           .
			 */
			ffec_setup_rxdesc(sc, sc->rx_idx,
			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
		} else if ((desc->flags_len & FEC_RXDESC_L) == 0) {
			/*
			 * The entire frame is not in this buffer.  Impossible.
			 * Recycle the descriptor and continue.
			 *
			 * XXX what's the right way to handle this? Probably we
			 * should stop/init the hardware because this should
			 * just really never happen when we have buffers bigger
			 * than the maximum frame size.
			 */
			device_printf(sc->dev, 
			    "fec_rxfinish: received frame without LAST bit set");
			ffec_setup_rxdesc(sc, sc->rx_idx, 
			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
		} else if (desc->flags_len & FEC_RXDESC_ERROR_BITS) {
			/*
			 *  Something went wrong with receiving the frame, we
			 *  don't care what (the hardware has counted the error
			 *  in the stats registers already), we just reuse the
			 *  same mbuf, which is still dma-mapped, by resetting
			 *  the rx descriptor.
			 */
			ffec_setup_rxdesc(sc, sc->rx_idx, 
			    sc->rxdesc_ring[sc->rx_idx].buf_paddr);
		} else {
			/*
			 *  Normal case: a good frame all in one buffer.
			 */
			ffec_rxfinish_onebuf(sc, len);
		}
		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
	}

	if (produced_empty_buffer) {
		WR4(sc, FEC_RDAR_REG, FEC_RDAR_RDAR);
	}
}
Example #2
0
static void
ffec_txfinish_locked(struct ffec_softc *sc)
{
	struct ifnet *ifp;
	struct ffec_hwdesc *desc;
	struct ffec_bufmap *bmap;
	boolean_t retired_buffer;

	FFEC_ASSERT_LOCKED(sc);

	/* XXX Can't set PRE|POST right now, but we need both. */
	bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREREAD);
	bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTREAD);
	ifp = sc->ifp;
	retired_buffer = false;
	while (sc->tx_idx_tail != sc->tx_idx_head) {
		desc = &sc->txdesc_ring[sc->tx_idx_tail];
		if (desc->flags_len & FEC_TXDESC_READY)
			break;
		retired_buffer = true;
		bmap = &sc->txbuf_map[sc->tx_idx_tail];
		bus_dmamap_sync(sc->txbuf_tag, bmap->map, 
		    BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
		m_freem(bmap->mbuf);
		bmap->mbuf = NULL;
		ffec_setup_txdesc(sc, sc->tx_idx_tail, 0, 0);
		sc->tx_idx_tail = next_txidx(sc, sc->tx_idx_tail);
	}

	/*
	 * If we retired any buffers, there will be open tx slots available in
	 * the descriptor ring, go try to start some new output.
	 */
	if (retired_buffer) {
		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
		ffec_txstart_locked(sc);
	}

	/* If there are no buffers outstanding, muzzle the watchdog. */
	if (sc->tx_idx_tail == sc->tx_idx_head) {
		sc->tx_watchdog_count = 0;
	}
}
Example #3
0
static void
ffec_txstart_locked(struct ffec_softc *sc)
{
	struct ifnet *ifp;
	struct mbuf *m;
	int enqueued;

	FFEC_ASSERT_LOCKED(sc);

	if (!sc->link_is_up)
		return;

	ifp = sc->ifp;

	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
		return;

	enqueued = 0;

	for (;;) {
		if (sc->txcount == (TX_DESC_COUNT-1)) {
			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
			break;
		}
		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;
		if (ffec_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) {
			IFQ_DRV_PREPEND(&ifp->if_snd, m);
			break;
		}
		BPF_MTAP(ifp, m);
		sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head);
		++enqueued;
	}

	if (enqueued != 0) {
		bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREWRITE);
		WR4(sc, FEC_TDAR_REG, FEC_TDAR_TDAR);
		bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTWRITE);
		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
	}
}
Example #4
0
static void
ffec_tick(void *arg)
{
	struct ffec_softc *sc;
	struct ifnet *ifp;
	int link_was_up;

	sc = arg;

	FFEC_ASSERT_LOCKED(sc);

	ifp = sc->ifp;

	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
	    return;

	/*
	 * Typical tx watchdog.  If this fires it indicates that we enqueued
	 * packets for output and never got a txdone interrupt for them.  Maybe
	 * it's a missed interrupt somehow, just pretend we got one.
	 */
	if (sc->tx_watchdog_count > 0) {
		if (--sc->tx_watchdog_count == 0) {
			ffec_txfinish_locked(sc);
		}
	}

	/* Gather stats from hardware counters. */
	ffec_harvest_stats(sc);

	/* Check the media status. */
	link_was_up = sc->link_is_up;
	mii_tick(sc->mii_softc);
	if (sc->link_is_up && !link_was_up)
		ffec_txstart_locked(sc);

	/* Schedule another check one second from now. */
	callout_reset(&sc->ffec_callout, hz, ffec_tick, sc);
}
Example #5
0
static void
ffec_miibus_statchg(device_t dev)
{
	struct ffec_softc *sc;
	struct mii_data *mii;
	uint32_t ecr, rcr, tcr;

	/*
	 * Called by the MII bus driver when the PHY establishes link to set the
	 * MAC interface registers.
	 */

	sc = device_get_softc(dev);

	FFEC_ASSERT_LOCKED(sc);

	mii = sc->mii_softc;

	if (mii->mii_media_status & IFM_ACTIVE)
		sc->link_is_up = true;
	else
		sc->link_is_up = false;

	ecr = RD4(sc, FEC_ECR_REG) & ~FEC_ECR_SPEED;
	rcr = RD4(sc, FEC_RCR_REG) & ~(FEC_RCR_RMII_10T | FEC_RCR_RMII_MODE |
	    FEC_RCR_RGMII_EN | FEC_RCR_DRT | FEC_RCR_FCE);
	tcr = RD4(sc, FEC_TCR_REG) & ~FEC_TCR_FDEN;

	rcr |= FEC_RCR_MII_MODE; /* Must always be on even for R[G]MII. */
	switch (sc->phy_conn_type) {
	case PHY_CONN_MII:
		break;
	case PHY_CONN_RMII:
		rcr |= FEC_RCR_RMII_MODE;
		break;
	case PHY_CONN_RGMII:
		rcr |= FEC_RCR_RGMII_EN;
		break;
	}

	switch (IFM_SUBTYPE(mii->mii_media_active)) {
	case IFM_1000_T:
	case IFM_1000_SX:
		ecr |= FEC_ECR_SPEED;
		break;
	case IFM_100_TX:
		/* Not-FEC_ECR_SPEED + not-FEC_RCR_RMII_10T means 100TX */
		break;
	case IFM_10_T:
		rcr |= FEC_RCR_RMII_10T;
		break;
	case IFM_NONE:
		sc->link_is_up = false;
		return;
	default:
		sc->link_is_up = false;
		device_printf(dev, "Unsupported media %u\n",
		    IFM_SUBTYPE(mii->mii_media_active));
		return;
	}

	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
		tcr |= FEC_TCR_FDEN;
	else
		rcr |= FEC_RCR_DRT;

	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FLOW) != 0)
		rcr |= FEC_RCR_FCE;

	WR4(sc, FEC_RCR_REG, rcr);
	WR4(sc, FEC_TCR_REG, tcr);
	WR4(sc, FEC_ECR_REG, ecr);
}