Exemplo n.º 1
0
/*
 * octeon_eth_tick_free
 *
 * => garbage collect send gather buffer / mbuf
 * => called at softclock
 */
void
octeon_eth_tick_free(void *arg)
{
	struct octeon_eth_softc *sc = arg;
	int timo;
	int s;

	s = splnet();
	/* XXX */
	if (sc->sc_soft_req_cnt > 0) {
		octeon_eth_send_queue_flush_prefetch(sc);
		octeon_eth_send_queue_flush_fetch(sc);
		octeon_eth_send_queue_flush(sc);
		octeon_eth_send_queue_flush_sync(sc);
	}
	/* XXX */

	/* XXX ??? */
	timo = hz - (100 * sc->sc_ext_callback_cnt);
	if (timo < 10)
		 timo = 10;
	timeout_add_msec(&sc->sc_tick_free_ch, 1000 * timo / hz);
	/* XXX */
	splx(s);
}
Exemplo n.º 2
0
int
octeon_eth_send_queue_is_full(struct octeon_eth_softc *sc)
{
#ifdef OCTEON_ETH_SEND_QUEUE_CHECK
	int64_t nofree_cnt;

	nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt; 

	if (__predict_false(nofree_cnt == GATHER_QUEUE_SIZE - 1)) {
		octeon_eth_send_queue_flush(sc);
		return 1;
	}

#endif
	return 0;
}
Exemplo n.º 3
0
void
octeon_eth_start(struct ifnet *ifp)
{
	struct octeon_eth_softc *sc = ifp->if_softc;
	struct mbuf *m;

	/*
	 * performance tuning
	 * presend iobdma request 
	 */
	octeon_eth_send_queue_flush_prefetch(sc);

	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
		goto last;

	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port)))
		goto last;

	for (;;) {
		octeon_eth_send_queue_flush_fetch(sc); /* XXX */

		/*
		 * XXXSEIL
		 * If no free send buffer is available, free all the sent buffer
		 * and bail out.
		 */
		if (octeon_eth_send_queue_is_full(sc)) {
			return;
		}
		/* XXX */

		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			return;

#if NBPFILTER > 0
		if (ifp->if_bpf != NULL)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif

		/* XXX */
		if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh)
			octeon_eth_send_queue_flush(sc);
		if (octeon_eth_send(sc, m)) {
			ifp->if_oerrors++;
			m_freem(m);
			log(LOG_WARNING,
		  	  "%s: failed to transmit packet\n",
		    	  sc->sc_dev.dv_xname);
		}
		/* XXX */

		/*
		 * send next iobdma request 
		 */
		octeon_eth_send_queue_flush_prefetch(sc);
	}

last:
	octeon_eth_send_queue_flush_fetch(sc);
}
Exemplo n.º 4
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	int result = 0;
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	/* XXX */
	/*
 	 * performance tuning
	 * presend iobdma request
	 */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_prefetch(sc);
	}
	/* XXX */

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);

	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
		octeon_eth_send_queue_flush(sc);
	}

	/* XXX */
	if (sc->sc_flush)
		octeon_eth_send_queue_flush_sync(sc);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
	}
	/* XXX */

	return result;
}
Exemplo n.º 5
0
void
octeon_eth_start(struct ifnet *ifp)
{
	struct octeon_eth_softc *sc = ifp->if_softc;
	struct mbuf *m;

	/*
	 * performance tuning
	 * presend iobdma request 
	 */
	octeon_eth_send_queue_flush_prefetch(sc);

	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
		goto last;

	/* XXX assume that OCTEON doesn't buffer packets */
	if (__predict_false(!cn30xxgmx_link_status(sc->sc_gmx_port))) {
		/* dequeue and drop them */
		while (1) {
			IFQ_DEQUEUE(&ifp->if_snd, m);
			if (m == NULL)
				break;
#if 0
#ifdef DDB
			m_print(m, "cd", printf);
#endif
			printf("%s: drop\n", sc->sc_dev.dv_xname);
#endif
			m_freem(m);
			IF_DROP(&ifp->if_snd);
		}
		goto last;
	}

	for (;;) {
		IFQ_POLL(&ifp->if_snd, m);
		if (__predict_false(m == NULL))
			break;

		octeon_eth_send_queue_flush_fetch(sc); /* XXX */

		/*
		 * XXXSEIL
		 * If no free send buffer is available, free all the sent buffer
		 * and bail out.
		 */
		if (octeon_eth_send_queue_is_full(sc)) {
			return;
		}
		/* XXX */

		IFQ_DEQUEUE(&ifp->if_snd, m);

		OCTEON_ETH_TAP(ifp, m, BPF_DIRECTION_OUT);

		/* XXX */
		if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh)
			octeon_eth_send_queue_flush(sc);
		if (octeon_eth_send(sc, m)) {
			ifp->if_oerrors++;
			m_freem(m);
			log(LOG_WARNING,
		  	  "%s: failed to transmit packet\n",
		    	  sc->sc_dev.dv_xname);
		} else {
			sc->sc_soft_req_cnt++;
		}
		if (sc->sc_flush)
			octeon_eth_send_queue_flush_sync(sc);
		/* XXX */

		/*
		 * send next iobdma request 
		 */
		octeon_eth_send_queue_flush_prefetch(sc);
	}

/*
 * XXXSEIL
 * Don't schedule send-buffer-free callout every time - those buffers are freed
 * by "free tick".  This makes some packets like NFS slower, but it normally
 * doesn't happen on SEIL.
 */
#ifdef OCTEON_ETH_USENFS
	if (__predict_false(sc->sc_ext_callback_cnt > 0)) {
		int timo;

		/* ??? */
		timo = hz - (100 * sc->sc_ext_callback_cnt);
		if (timo < 10)
			timo = 10;
		callout_schedule(&sc->sc_tick_free_ch, timo);
	}
#endif

last:
	octeon_eth_send_queue_flush_fetch(sc);
}