Ejemplo n.º 1
0
int
octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
    struct mbuf **rm)
{
	struct mbuf *m;
	void (*ext_free)(caddr_t, u_int, void *);
	void *ext_buf;
	size_t ext_size;
	void *data;
	uint64_t word1 = work[1];
	uint64_t word2 = work[2];
	uint64_t word3 = work[3];

	MGETHDR(m, M_NOWAIT, MT_DATA);
	if (m == NULL)
		return 1;
	OCTEON_ETH_KASSERT(m != NULL);

	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
		/* Dynamic short */
		ext_free = octeon_eth_buf_ext_free_m;
		ext_buf = &work[4];
		ext_size = 96;

		data = &work[4 + sc->sc_ip_offset / sizeof(uint64_t)];
	} else {
		vaddr_t addr;
		vaddr_t start_buffer;

		addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED);
		start_buffer = addr & ~(2048 - 1);

		ext_free = octeon_eth_buf_ext_free_ext;
		ext_buf = (void *)start_buffer;
		ext_size = 2048;

		data = (void *)addr;
	}

	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));

	m->m_data = data;
	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
#if 0
	/*
	 * not readonly buffer
	 */
	m->m_flags |= M_EXT_RW;
#endif

	*rm = m;

	OCTEON_ETH_KASSERT(*rm != NULL);

	return 0;
}
Ejemplo n.º 2
0
void
octeon_eth_send_queue_flush_fetch(struct octeon_eth_softc *sc)
{
#ifndef  OCTEON_ETH_DEBUG
	if (!sc->sc_prefetch)
		return;
#endif
	OCTEON_ETH_KASSERT(sc->sc_prefetch == 1);
	sc->sc_hard_done_cnt = cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
	OCTEON_ETH_KASSERT(sc->sc_hard_done_cnt <= 0);
	sc->sc_prefetch = 0;
}
Ejemplo n.º 3
0
int
octeon_eth_send_makecmd_gbuf(struct octeon_eth_softc *sc, struct mbuf *m0,
    uint64_t *gbuf, int *rsegs)
{
	struct mbuf *m;
	int segs = 0;
	uint32_t laddr, rlen, nlen;

	for (m = m0; m != NULL; m = m->m_next) {

		if (__predict_false(m->m_len == 0))
			continue;

#if 0	
		OCTEON_ETH_KASSERT(((uint32_t)m->m_data & (PAGE_SIZE - 1))
		   == (kvtophys((vaddr_t)m->m_data) & (PAGE_SIZE - 1)));
#endif

		/*
		 * aligned 4k
		 */
		laddr = (uintptr_t)m->m_data & (PAGE_SIZE - 1);

		if (laddr + m->m_len > PAGE_SIZE) {
			/* XXX */
			rlen = PAGE_SIZE - laddr;
			nlen = m->m_len - rlen;
			*(gbuf + segs) = octeon_eth_send_makecmd_w1(rlen,
			    KVTOPHYS(m->m_data));
			segs++;
			if (segs > 63) {
				return 1;
			}
			/* XXX */
		} else {
			rlen = 0;
			nlen = m->m_len;
		}

		*(gbuf + segs) = octeon_eth_send_makecmd_w1(nlen,
		    KVTOPHYS((caddr_t)m->m_data + rlen));
		segs++;
		if (segs > 63) {
			return 1;
		}
	}

	OCTEON_ETH_KASSERT(m == NULL);

	*rsegs = segs;

	return 0;
}
Ejemplo n.º 4
0
void
octeon_eth_send_queue_del(struct octeon_eth_softc *sc, struct mbuf **rm,
    uint64_t **rgbuf)
{
	struct mbuf *m;
	m = ml_dequeue(&sc->sc_sendq);
	OCTEON_ETH_KASSERT(m != NULL);

	*rm = m;
	*rgbuf = m->m_pkthdr.ph_cookie;

	if (m->m_ext.ext_free != NULL) {
		sc->sc_ext_callback_cnt--;
		OCTEON_ETH_KASSERT(sc->sc_ext_callback_cnt >= 0);
	}
}
Ejemplo n.º 5
0
void
octeon_eth_send_queue_flush_prefetch(struct octeon_eth_softc *sc)
{
	OCTEON_ETH_KASSERT(sc->sc_prefetch == 0);
	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, 0);
	sc->sc_prefetch = 1;
}
Ejemplo n.º 6
0
void
octeon_eth_send_queue_flush_sync(struct octeon_eth_softc *sc)
{
	if (sc->sc_flush == 0)
		return;

	OCTEON_ETH_KASSERT(sc->sc_flush > 0);

	/* XXX */
	cn30xxfau_op_inc_read_8(&sc->sc_fau_done);
	sc->sc_soft_req_cnt -= sc->sc_flush;
	OCTEON_ETH_KASSERT(sc->sc_soft_req_cnt >= 0);
	/* XXX */

	sc->sc_flush = 0;
}
Ejemplo n.º 7
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (!(ifp->if_flags & IFF_RUNNING))
		goto drop;

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, &m->m_pkthdr.csum_flags);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	octeon_eth_buf_free_work(sc, work, word2);
	return 1;
}
Ejemplo n.º 8
0
void
octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
    uint64_t *gbuf)
{
	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);

	m->m_pkthdr.ph_cookie = gbuf;
	ml_enqueue(&sc->sc_sendq, m);

	if (m->m_ext.ext_free != NULL)
		sc->sc_ext_callback_cnt++;
}
Ejemplo n.º 9
0
void
octeon_eth_recv_intr(void *data, uint64_t *work)
{
	struct octeon_eth_softc *sc;
	int port;

	OCTEON_ETH_KASSERT(work != NULL);

	port = (work[1] & PIP_WQE_WORD1_IPRT) >> 42;

	OCTEON_ETH_KASSERT(port < GMX_PORT_NUNITS);

	sc = octeon_eth_gsc[port];

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(port == sc->sc_port);

	/* XXX process all work queue entries anyway */

	(void)octeon_eth_recv(sc, work);
}
Ejemplo n.º 10
0
int
octeon_eth_send_cmd(struct octeon_eth_softc *sc, uint64_t pko_cmd_w0,
    uint64_t pko_cmd_w1)
{
	uint64_t *cmdptr;
	int result = 0;

	cmdptr = (uint64_t *)PHYS_TO_XKPHYS(sc->sc_cmdptr.cmdptr, CCA_CACHED);
	cmdptr += sc->sc_cmdptr.cmdptr_idx;

	OCTEON_ETH_KASSERT(cmdptr != NULL);

	*cmdptr++ = pko_cmd_w0;
	*cmdptr++ = pko_cmd_w1;

	OCTEON_ETH_KASSERT(sc->sc_cmdptr.cmdptr_idx + 2 <= FPA_COMMAND_BUFFER_POOL_NWORDS - 1);

	if (sc->sc_cmdptr.cmdptr_idx + 2 == FPA_COMMAND_BUFFER_POOL_NWORDS - 1) {
		paddr_t buf;

		buf = cn30xxfpa_buf_get_paddr(octeon_eth_fb_cmd);
		if (buf == 0) {
			log(LOG_WARNING,
			    "%s: cannot allocate command buffer from free pool allocator\n",
			    sc->sc_dev.dv_xname);
			result = 1;
			goto done;
		}
		*cmdptr++ = buf;
		sc->sc_cmdptr.cmdptr = (uint64_t)buf;
		sc->sc_cmdptr.cmdptr_idx = 0;
	} else {
		sc->sc_cmdptr.cmdptr_idx += 2;
	}

	cn30xxpko_op_doorbell_write(sc->sc_port, sc->sc_port, 2);

done:
	return result;
}
Ejemplo n.º 11
0
void
octeon_eth_send_queue_flush(struct octeon_eth_softc *sc)
{
	const int64_t sent_count = sc->sc_hard_done_cnt;
	int i;

	OCTEON_ETH_KASSERT(sc->sc_flush == 0);
	OCTEON_ETH_KASSERT(sent_count <= 0);

	for (i = 0; i < 0 - sent_count; i++) {
		struct mbuf *m;
		uint64_t *gbuf;

		octeon_eth_send_queue_del(sc, &m, &gbuf);

		cn30xxfpa_buf_put_paddr(octeon_eth_fb_sg, XKPHYS_TO_PHYS(gbuf));

		m_freem(m);
	}

	cn30xxfau_op_inc_fetch_8(&sc->sc_fau_done, i);
	sc->sc_flush = i;
}
Ejemplo n.º 12
0
int
octeon_eth_recv_mbuf(struct octeon_eth_softc *sc, uint64_t *work,
    struct mbuf **rm)
{
	struct mbuf *m;
	void (*ext_free)(caddr_t, u_int, void *);
	void *ext_buf;
	size_t ext_size;
	caddr_t data;
	uint64_t word1 = work[1];
	uint64_t word2 = work[2];
	uint64_t word3 = work[3];

	MGETHDR(m, M_NOWAIT, MT_DATA);
	if (m == NULL)
		return 1;
	OCTEON_ETH_KASSERT(m != NULL);

	if ((word2 & PIP_WQE_WORD2_IP_BUFS) == 0) {
		/* Dynamic short */
		ext_free = octeon_eth_buf_ext_free_m;
		ext_buf = &work[4];
		ext_size = 96;

		/*
		 * If the packet is IP, the hardware has padded it so that the
		 * IP source address starts on the next 64-bit word boundary.
		 */
		data = (caddr_t)&work[4] + ETHER_ALIGN;
		if (!ISSET(word2, PIP_WQE_WORD2_IP_NI) &&
		    !ISSET(word2, PIP_WQE_WORD2_IP_V6))
			data += 4;
	} else {
		vaddr_t addr;
		vaddr_t start_buffer;

		addr = PHYS_TO_XKPHYS(word3 & PIP_WQE_WORD3_ADDR, CCA_CACHED);
		start_buffer = addr & ~(2048 - 1);

		ext_free = octeon_eth_buf_ext_free_ext;
		ext_buf = (void *)start_buffer;
		ext_size = 2048;

		data = (void *)addr;
	}

	MEXTADD(m, ext_buf, ext_size, 0, ext_free, work);
	OCTEON_ETH_KASSERT(ISSET(m->m_flags, M_EXT));

	m->m_data = data;
	m->m_len = m->m_pkthdr.len = (word1 & PIP_WQE_WORD1_LEN) >> 48;
#if 0
	/*
	 * not readonly buffer
	 */
	m->m_flags |= M_EXT_RW;
#endif

	*rm = m;

	OCTEON_ETH_KASSERT(*rm != NULL);

	return 0;
}
Ejemplo n.º 13
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	int result = 0;
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	/* XXX */
	/*
 	 * performance tuning
	 * presend iobdma request
	 */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_prefetch(sc);
	}
	/* XXX */

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);

	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
		octeon_eth_send_queue_flush(sc);
	}

	/* XXX */
	if (sc->sc_flush)
		octeon_eth_send_queue_flush_sync(sc);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
	}
	/* XXX */

	return result;
}