Example #1
0
void
pairstart(struct ifnet *ifp)
{
	struct pair_softc	*sc = (struct pair_softc *)ifp->if_softc;
	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
	struct ifnet		*pairedifp;
	struct mbuf		*m;

	pairedifp = if_get(sc->sc_pairedif);

	for (;;) {
		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif /* NBPFILTER > 0 */

		ifp->if_opackets++;
		if (pairedifp != NULL) {
			if (m->m_flags & M_PKTHDR)
				m_resethdr(m);
			ml_enqueue(&ml, m);
		} else
			m_freem(m);
	}

	if (pairedifp != NULL) {
		if_input(pairedifp, &ml);
		if_put(pairedifp);
	}
}
Example #2
0
/*
 * Pass a packet up to the higher levels.
 */
void
egread(struct eg_softc *sc, caddr_t buf, int len)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;

	if (len <= sizeof(struct ether_header) ||
	    len > ETHER_MAX_LEN) {
		printf("%s: invalid packet size %d; dropping\n",
		    sc->sc_dev.dv_xname, len);
		ifp->if_ierrors++;
		return;
	}

	/* Pull packet off interface. */
	m = egget(sc, buf, len);
	if (m == NULL) {
		ifp->if_ierrors++;
		return;
	}

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);
}
Example #3
0
void
octeon_eth_send_queue_add(struct octeon_eth_softc *sc, struct mbuf *m,
    uint64_t *gbuf)
{
	OCTEON_ETH_KASSERT(m->m_flags & M_PKTHDR);

	m->m_pkthdr.ph_cookie = gbuf;
	ml_enqueue(&sc->sc_sendq, m);

	if (m->m_ext.ext_free != NULL)
		sc->sc_ext_callback_cnt++;
}
Example #4
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (!(ifp->if_flags & IFF_RUNNING))
		goto drop;

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, &m->m_pkthdr.csum_flags);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	octeon_eth_buf_free_work(sc, work, word2);
	return 1;
}
Example #5
0
void
nfe_rxeof(struct nfe_softc *sc)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct nfe_desc32 *desc32;
	struct nfe_desc64 *desc64;
	struct nfe_rx_data *data;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m, *mnew;
	bus_addr_t physaddr;
#if NVLAN > 0
	uint32_t vtag;
#endif
	uint16_t flags;
	int error, len;

	for (;;) {
		data = &sc->rxq.data[sc->rxq.cur];

		if (sc->sc_flags & NFE_40BIT_ADDR) {
			desc64 = &sc->rxq.desc64[sc->rxq.cur];
			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc64->flags);
			len = letoh16(desc64->length) & 0x3fff;
#if NVLAN > 0
			vtag = letoh32(desc64->physaddr[1]);
#endif
		} else {
			desc32 = &sc->rxq.desc32[sc->rxq.cur];
			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);

			flags = letoh16(desc32->flags);
			len = letoh16(desc32->length) & 0x3fff;
		}

		if (flags & NFE_RX_READY)
			break;

		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
			if (!(flags & NFE_RX_VALID_V1))
				goto skip;

			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		} else {
			if (!(flags & NFE_RX_VALID_V2))
				goto skip;

			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
				flags &= ~NFE_RX_ERROR;
				len--;	/* fix buffer length */
			}
		}

		if (flags & NFE_RX_ERROR) {
			ifp->if_ierrors++;
			goto skip;
		}

		/*
		 * Try to allocate a new mbuf for this ring element and load
		 * it before processing the current mbuf. If the ring element
		 * cannot be loaded, drop the received packet and reuse the
		 * old mbuf. In the unlikely case that the old mbuf can't be
		 * reloaded either, explicitly panic.
		 */
		mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
		if (mnew == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;

		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_dmat, data->map);

		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
		    BUS_DMA_READ | BUS_DMA_NOWAIT);
		if (error != 0) {
			m_freem(mnew);

			/* try to reload the old mbuf */
			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
			if (error != 0) {
				/* very unlikely that it will fail.. */
				panic("%s: could not load old rx mbuf",
				    sc->sc_dev.dv_xname);
			}
			ifp->if_ierrors++;
			goto skip;
		}
		physaddr = data->map->dm_segs[0].ds_addr;

		/*
		 * New mbuf successfully loaded, update Rx ring and continue
		 * processing.
		 */
		m = data->m;
		data->m = mnew;

		/* finalize mbuf */
		m->m_pkthdr.len = m->m_len = len;

		if ((sc->sc_flags & NFE_HW_CSUM) &&
		    (flags & NFE_RX_IP_CSUMOK)) {
			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
			if (flags & NFE_RX_UDP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
			if (flags & NFE_RX_TCP_CSUMOK)
				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
		}

#if NVLAN > 0
		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
			m->m_pkthdr.ether_vtag = vtag & 0xffff;
			m->m_flags |= M_VLANTAG;
		}
#endif

		ml_enqueue(&ml, m);

		/* update mapping address in h/w descriptor */
		if (sc->sc_flags & NFE_40BIT_ADDR) {
#if defined(__LP64__)
			desc64->physaddr[0] = htole32(physaddr >> 32);
#endif
			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
		} else {
Example #6
0
void
cdcef_rxeof(struct usbf_xfer *xfer, void *priv,
    usbf_status status)
{
	struct cdcef_softc	*sc = priv;
	int total_len = 0;
	struct ifnet		*ifp = GET_IFP(sc);
	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
	struct mbuf		*m = NULL;


	int s;

#if 0
	printf("cdcef_rxeof: xfer=%p, priv=%p, %s\n", xfer, priv,
	    usbf_errstr(status));
#endif

	if (status != USBF_NORMAL_COMPLETION) {
		if (status == USBF_NOT_STARTED || status == USBF_CANCELLED)	
			return;
		if (sc->sc_rxeof_errors == 0)
			printf("%s: usb error on rx: %s\n",
			    DEVNAME(sc), usbf_errstr(status));
		/* XXX - no stalls on client */
		if (sc->sc_rxeof_errors++ > 10) {
			printf("%s: too many errors, disabling\n",
			    DEVNAME(sc));
		}
		goto done;
	}
	sc->sc_rxeof_errors = 0;

	/* upon first incoming packet we know the host is listening */
	if (sc->sc_listening == 0) {
		sc->sc_listening = 1;
	}


	usbf_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);

	/* total_len -= 4; Strip off CRC added for Zaurus - XXX*/
	if (total_len <= 1)
		goto done;

	if (total_len < sizeof(struct ether_header)) {
		ifp->if_ierrors++;
		goto done;
	}

	if (ifp->if_flags & IFF_RUNNING) {
		m = cdcef_newbuf();
		if (m == NULL) {
			/* message? */
			ifp->if_ierrors++;
			goto done;
		}

		m->m_pkthdr.len = m->m_len = total_len;
		bcopy(sc->sc_buffer_out, mtod(m, char *), total_len);

		ml_enqueue(&ml, m);
	}
Example #7
0
/*
 * A frame has been uploaded: pass the resulting mbuf chain up to
 * the higher level protocols.
 */
void
ste_rxeof(struct ste_softc *sc)
{
        struct mbuf		*m;
	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
        struct ifnet		*ifp;
	struct ste_chain_onefrag	*cur_rx;
	int			total_len = 0, count=0;
	u_int32_t		rxstat;

	ifp = &sc->arpcom.ac_if;

	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
	      & STE_RXSTAT_DMADONE) {
		if ((STE_RX_LIST_CNT - count) < 3)
			break;

		cur_rx = sc->ste_cdata.ste_rx_head;
		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;

		/*
		 * If an error occurs, update stats, clear the
		 * status word and leave the mbuf cluster in place:
		 * it should simply get re-used next time this descriptor
	 	 * comes up in the ring.
		 */
		if (rxstat & STE_RXSTAT_FRAME_ERR) {
			ifp->if_ierrors++;
			cur_rx->ste_ptr->ste_status = 0;
			continue;
		}

		/*
		 * If there error bit was not set, the upload complete
		 * bit should be set which means we have a valid packet.
		 * If not, something truly strange has happened.
		 */
		if (!(rxstat & STE_RXSTAT_DMADONE)) {
			printf("%s: bad receive status -- packet dropped",
				sc->sc_dev.dv_xname);
			ifp->if_ierrors++;
			cur_rx->ste_ptr->ste_status = 0;
			continue;
		}

		/* No errors; receive the packet. */	
		m = cur_rx->ste_mbuf;
		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;

		/*
		 * Try to conjure up a new mbuf cluster. If that
		 * fails, it means we have an out of memory condition and
		 * should leave the buffer in place and continue. This will
		 * result in a lost packet, but there's little else we
		 * can do in this situation.
		 */
		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
			ifp->if_ierrors++;
			cur_rx->ste_ptr->ste_status = 0;
			continue;
		}

		m->m_pkthdr.len = m->m_len = total_len;

		ml_enqueue(&ml, m);

		cur_rx->ste_ptr->ste_status = 0;
		count++;
	}

	if_input(ifp, &ml);
}
Example #8
0
void
vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ldc_map *map = sc->sc_lm;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	caddr_t buf;
	paddr_t pa;
	psize_t nbytes;
	u_int cons;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		nbytes = roundup(dm->nbytes, 8);

		if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
			ifp->if_ierrors++;
			goto skip;
		}

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
		    dm->cookie[0].addr, pa, nbytes, &nbytes);
		if (err != H_EOK) {
			pool_put(&sc->sc_pool, buf);
			ifp->if_ierrors++;
			goto skip;
		}

		/* Stupid OBP doesn't align properly. */
                m = m_devget(buf, dm->nbytes, ETHER_ALIGN);
		pool_put(&sc->sc_pool, buf);
		if (m == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}

		/* Pass it on. */
		ml_enqueue(&ml, m);
		if_input(ifp, &ml);

	skip:
		dm->tag.stype = VIO_SUBTYPE_ACK;
		dm->tag.sid = sc->sc_local_sid;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DESC_DATA\n"));

		if (dm->desc_handle != sc->sc_tx_cons) {
			printf("out of order\n");
			return;
		}

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);

		map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
		atomic_dec_int(&map->lm_count);

		pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
		ifp->if_opackets++;

		sc->sc_tx_cons++;
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DESC_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype));
		break;
	}
}
Example #9
0
void
vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf *m;
	paddr_t pa;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
	{
		struct vnet_desc desc;
		uint64_t cookie;
		paddr_t desc_pa;
		int idx, ack_end_idx = -1;
		struct mbuf_list ml = MBUF_LIST_INITIALIZER();

		idx = dm->start_idx;
		for (;;) {
			cookie = sc->sc_peer_dring_cookie.addr;
			cookie += idx * sc->sc_peer_desc_size;
			nbytes = sc->sc_peer_desc_size;
			pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("hv_ldc_copy_in %d\n", err);
				break;
			}

			if (desc.hdr.dstate != VIO_DESC_READY)
				break;

			if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
				ifp->if_ierrors++;
				goto skip;
			}

			m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes);
			if (!m)
				break;
			m->m_len = m->m_pkthdr.len = desc.nbytes;
			nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8);

			pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
			    desc.cookie[0].addr, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				m_freem(m);
				goto skip;
			}
			m->m_data += VNET_ETHER_ALIGN;

			ml_enqueue(&ml, m);

		skip:
			desc.hdr.dstate = VIO_DESC_DONE;
			nbytes = sc->sc_peer_desc_size;
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK)
				printf("hv_ldc_copy_out %d\n", err);

			ack_end_idx = idx;
			if (++idx == sc->sc_peer_dring_nentries)
				idx = 0;
		}

		if_input(ifp, &ml);

		if (ack_end_idx == -1) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
		} else {
			dm->tag.stype = VIO_SUBTYPE_ACK;
			dm->end_idx = ack_end_idx;
		}
		dm->tag.sid = sc->sc_local_sid;
		dm->proc_state = VIO_DP_STOPPED;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;
	}

	case VIO_SUBTYPE_ACK:
	{
		struct ldc_map *map = sc->sc_lm;
		u_int cons, count;

		sc->sc_peer_state = dm->proc_state;

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
			map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
			atomic_dec_int(&map->lm_count);

			pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
			ifp->if_opackets++;

			sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE;
			sc->sc_tx_cons++;
			cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		}

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE)
			vnet_send_dring_data(sc, cons);

		KERNEL_LOCK();
		if (count < (sc->sc_vd->vd_nentries - 1))
			ifp->if_flags &= ~IFF_OACTIVE;
		if (count == 0)
			ifp->if_timer = 0;

		vnet_start(ifp);
		KERNEL_UNLOCK();
		break;
	}

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		sc->sc_peer_state = VIO_DP_STOPPED;
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
Example #10
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	int result = 0;
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	/* XXX */
	/*
 	 * performance tuning
	 * presend iobdma request
	 */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_prefetch(sc);
	}
	/* XXX */

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);

	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
		octeon_eth_send_queue_flush(sc);
	}

	/* XXX */
	if (sc->sc_flush)
		octeon_eth_send_queue_flush_sync(sc);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
	}
	/* XXX */

	return result;
}
Example #11
0
int
bmac_rint(void *v)
{
	struct bmac_softc *sc = v;
	struct ifnet *ifp = &sc->arpcom.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	dbdma_command_t *cmd;
	int status, resid, count, datalen;
	int i, n;
	void *data;
#ifdef BMAC_DEBUG
	printf("bmac_rint() called\n");
#endif

	i = sc->sc_rxlast;
	for (n = 0; n < BMAC_RXBUFS; n++, i++) {
		if (i == BMAC_RXBUFS)
			i = 0;
		cmd = &sc->sc_rxcmd[i];
		status = dbdma_ld16(&cmd->d_status);
		resid = dbdma_ld16(&cmd->d_resid);

#ifdef BMAC_DEBUG
		if (status != 0 && status != 0x8440 && status != 0x9440)
			printf("bmac_rint status = 0x%x\n", status);
#endif

		if ((status & DBDMA_CNTRL_ACTIVE) == 0)	/* 0x9440 | 0x8440 */
			continue;
		count = dbdma_ld16(&cmd->d_count);
		datalen = count - resid;		/* 2 == framelen */
		if (datalen < sizeof(struct ether_header)) {
			printf("%s: short packet len = %d\n",
				ifp->if_xname, datalen);
			goto next;
		}
		DBDMA_BUILD_CMD(cmd, DBDMA_CMD_STOP, 0, 0, 0, 0);
		data = sc->sc_rxbuf + BMAC_BUFLEN * i;

		/* XXX Sometimes bmac reads one extra byte. */
		if (datalen == ETHER_MAX_LEN + 1)
			datalen--;

		/* Trim the CRC. */
		datalen -= ETHER_CRC_LEN;

		m = bmac_get(sc, data, datalen);
		if (m == NULL) {
			ifp->if_ierrors++;
			goto next;
		}

		ml_enqueue(&ml, m);

next:
		DBDMA_BUILD_CMD(cmd, DBDMA_CMD_IN_LAST, 0, DBDMA_INT_ALWAYS,
			DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);

		cmd->d_status = 0;
		cmd->d_resid = 0;
		sc->sc_rxlast = i + 1;
	}
	bmac_mediachange(ifp);

	dbdma_continue(sc->sc_rxdma);

	if_input(ifp, &ml);
	return (1);
}
Example #12
0
int
cpsw_rxintr(void *arg)
{
	struct cpsw_softc * const sc = arg;
	struct ifnet * const ifp = &sc->sc_ac.ac_if;
	struct cpsw_ring_data * const rdp = sc->sc_rdp;
	struct cpsw_cpdma_bd bd;
	bus_dmamap_t dm;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	u_int i;
	u_int len, off;

	for (;;) {
		KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);

		i = sc->sc_rxhead;
		dm = rdp->rx_dm[i];
		m = rdp->rx_mb[i];

		KASSERT(dm != NULL);
		KASSERT(m != NULL);

		cpsw_get_rxdesc(sc, i, &bd);

		if (bd.flags & CPDMA_BD_OWNER)
			break;

		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
			sc->sc_rxrun = false;
			goto done;
		}

		if ((bd.flags & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
		    (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
			/* Debugger(); */
		}

		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
		    BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_bdt, dm);

		if (cpsw_new_rxbuf(sc, i) != 0) {
			/* drop current packet, reuse buffer for new */
			ifp->if_ierrors++;
			goto next;
		}

		off = bd.bufoff;
		len = bd.pktlen;

		if (bd.flags & CPDMA_BD_PASSCRC)
			len -= ETHER_CRC_LEN;

		m->m_pkthdr.len = m->m_len = len;
		m->m_data += off;

		ml_enqueue(&ml, m);

next:
		sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
		if (bd.flags & CPDMA_BD_EOQ) {
			sc->sc_rxeoq = true;
			break;
		} else {
			sc->sc_rxeoq = false;
		}
		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0),
		    cpsw_rxdesc_paddr(sc, i));
	}

	if (sc->sc_rxeoq) {
		printf("rxeoq\n");
		/* Debugger(); */
	}

	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR,
	    CPSW_INTROFF_RX);

done:
	if_input(ifp, &ml);

	return 1;
}
Example #13
0
/*
 * Interrupt handler.
 */
int
epic_intr(void *arg)
{
	struct epic_softc *sc = arg;
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct epic_rxdesc *rxd;
	struct epic_txdesc *txd;
	struct epic_descsoft *ds;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	u_int32_t intstat, rxstatus, txstatus;
	int i, claimed = 0;
	u_int len;

	/*
	 * Get the interrupt status from the EPIC.
	 */
	intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
	if ((intstat & INTSTAT_INT_ACTV) == 0)
		return (claimed);

	claimed = 1;

	/*
	 * Acknowledge the interrupt.
	 */
	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
	    intstat & INTMASK);

	/*
	 * Check for receive interrupts.
	 */
	if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
		for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
			rxd = EPIC_CDRX(sc, i);
			ds = EPIC_DSRX(sc, i);

			EPIC_CDRXSYNC(sc, i,
			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);

			rxstatus = rxd->er_rxstatus;
			if (rxstatus & ER_RXSTAT_OWNER) {
				/*
				 * We have processed all of the
				 * receive buffers.
				 */
				break;
			}

			/*
			 * Make sure the packet arrived intact.  If an error
			 * occurred, update stats and reset the descriptor.
			 * The buffer will be reused the next time the
			 * descriptor comes up in the ring.
			 */
			if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
				if (rxstatus & ER_RXSTAT_CRCERROR)
					printf("%s: CRC error\n",
					    sc->sc_dev.dv_xname);
				if (rxstatus & ER_RXSTAT_ALIGNERROR)
					printf("%s: alignment error\n",
					    sc->sc_dev.dv_xname);
				ifp->if_ierrors++;
				EPIC_INIT_RXDESC(sc, i);
				continue;
			}

			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);

			/*
			 * The EPIC includes the CRC with every packet;
			 * trim it.
			 */
			len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN;

			if (len < sizeof(struct ether_header)) {
				/*
				 * Runt packet; drop it now.
				 */
				ifp->if_ierrors++;
				EPIC_INIT_RXDESC(sc, i);
				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
				    ds->ds_dmamap->dm_mapsize,
				    BUS_DMASYNC_PREREAD);
				continue;
			}

			/*
			 * If the packet is small enough to fit in a
			 * single header mbuf, allocate one and copy
			 * the data into it.  This greatly reduces
			 * memory consumption when we receive lots
			 * of small packets.
			 *
			 * Otherwise, we add a new buffer to the receive
			 * chain.  If this fails, we drop the packet and
			 * recycle the old buffer.
			 */
			if (epic_copy_small != 0 && len <= MHLEN) {
				MGETHDR(m, M_DONTWAIT, MT_DATA);
				if (m == NULL)
					goto dropit;
				memcpy(mtod(m, caddr_t),
				    mtod(ds->ds_mbuf, caddr_t), len);
				EPIC_INIT_RXDESC(sc, i);
				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
				    ds->ds_dmamap->dm_mapsize,
				    BUS_DMASYNC_PREREAD);
			} else {
				m = ds->ds_mbuf;
				if (epic_add_rxbuf(sc, i) != 0) {
 dropit:
					ifp->if_ierrors++;
					EPIC_INIT_RXDESC(sc, i);
					bus_dmamap_sync(sc->sc_dmat,
					    ds->ds_dmamap, 0,
					    ds->ds_dmamap->dm_mapsize,
					    BUS_DMASYNC_PREREAD);
					continue;
				}
			}

			m->m_pkthdr.len = m->m_len = len;

			ml_enqueue(&ml, m);
		}

		/* Update the receive pointer. */
		sc->sc_rxptr = i;

		/*
		 * Check for receive queue underflow.
		 */
		if (intstat & INTSTAT_RQE) {
			printf("%s: receiver queue empty\n",
			    sc->sc_dev.dv_xname);
			/*
			 * Ring is already built; just restart the
			 * receiver.
			 */
			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
			    EPIC_CDRXADDR(sc, sc->sc_rxptr));
			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
			    COMMAND_RXQUEUED | COMMAND_START_RX);
		}
	}

	if_input(ifp, &ml);

	/*
	 * Check for transmission complete interrupts.
	 */
	if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
		ifp->if_flags &= ~IFF_OACTIVE;
		for (i = sc->sc_txdirty; sc->sc_txpending != 0;
		     i = EPIC_NEXTTX(i), sc->sc_txpending--) {
			txd = EPIC_CDTX(sc, i);
			ds = EPIC_DSTX(sc, i);

			EPIC_CDTXSYNC(sc, i,
			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);

			txstatus = txd->et_txstatus;
			if (txstatus & ET_TXSTAT_OWNER)
				break;

			EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);

			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
			    0, ds->ds_dmamap->dm_mapsize,
			    BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
			m_freem(ds->ds_mbuf);
			ds->ds_mbuf = NULL;

			/*
			 * Check for errors and collisions.
			 */
			if ((txstatus & ET_TXSTAT_PACKETTX) == 0)
				ifp->if_oerrors++;
			else
				ifp->if_opackets++;
			ifp->if_collisions +=
			    TXSTAT_COLLISIONS(txstatus);
			if (txstatus & ET_TXSTAT_CARSENSELOST)
				printf("%s: lost carrier\n",
				    sc->sc_dev.dv_xname);
		}

		/* Update the dirty transmit buffer pointer. */
		sc->sc_txdirty = i;

		/*
		 * Cancel the watchdog timer if there are no pending
		 * transmissions.
		 */
		if (sc->sc_txpending == 0)
			ifp->if_timer = 0;

		/*
		 * Kick the transmitter after a DMA underrun.
		 */
		if (intstat & INTSTAT_TXU) {
			printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
			bus_space_write_4(sc->sc_st, sc->sc_sh,
			    EPIC_COMMAND, COMMAND_TXUGO);
			if (sc->sc_txpending)
				bus_space_write_4(sc->sc_st, sc->sc_sh,
				    EPIC_COMMAND, COMMAND_TXQUEUED);
		}

		/*
		 * Try to get more packets going.
		 */
		epic_start(ifp);
	}

	/*
	 * Check for fatal interrupts.
	 */
	if (intstat & INTSTAT_FATAL_INT) {
		if (intstat & INTSTAT_PTA)
			printf("%s: PCI target abort error\n",
			    sc->sc_dev.dv_xname);
		else if (intstat & INTSTAT_PMA)
			printf("%s: PCI master abort error\n",
			    sc->sc_dev.dv_xname);
		else if (intstat & INTSTAT_APE)
			printf("%s: PCI address parity error\n",
			    sc->sc_dev.dv_xname);
		else if (intstat & INTSTAT_DPE)
			printf("%s: PCI data parity error\n",
			    sc->sc_dev.dv_xname);
		else
			printf("%s: unknown fatal error\n",
			    sc->sc_dev.dv_xname);
		(void) epic_init(ifp);
	}

	return (claimed);
}
Example #14
0
/*
 * A frame has been uploaded: pass the resulting mbuf chain up to
 * the higher level protocols.
 */
void
kue_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
{
	struct kue_chain	*c = priv;
	struct kue_softc	*sc = c->kue_sc;
	struct ifnet		*ifp = GET_IFP(sc);
	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
	struct mbuf		*m;
	int			total_len = 0;
	int			s;

	DPRINTFN(10,("%s: %s: enter status=%d\n", sc->kue_dev.dv_xname,
		     __func__, status));

	if (usbd_is_dying(sc->kue_udev))
		return;

	if (!(ifp->if_flags & IFF_RUNNING))
		return;

	if (status != USBD_NORMAL_COMPLETION) {
		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
			return;
		sc->kue_rx_errs++;
		if (usbd_ratecheck(&sc->kue_rx_notice)) {
			printf("%s: %u usb errors on rx: %s\n",
			    sc->kue_dev.dv_xname, sc->kue_rx_errs,
			    usbd_errstr(status));
			sc->kue_rx_errs = 0;
		}
		if (status == USBD_STALLED)
			usbd_clear_endpoint_stall_async(sc->kue_ep[KUE_ENDPT_RX]);
		goto done;
	}

	usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);

	DPRINTFN(10,("%s: %s: total_len=%d len=%d\n", sc->kue_dev.dv_xname,
		     __func__, total_len,
		     UGETW(mtod(c->kue_mbuf, u_int8_t *))));

	if (total_len <= 1)
		goto done;

	m = c->kue_mbuf;
	/* copy data to mbuf */
	memcpy(mtod(m, char *), c->kue_buf, total_len);

	/* No errors; receive the packet. */
	total_len = UGETW(mtod(m, u_int8_t *));
	m_adj(m, sizeof(u_int16_t));

	if (total_len < sizeof(struct ether_header)) {
		ifp->if_ierrors++;
		goto done;
	}

	m->m_pkthdr.len = m->m_len = total_len;
	ml_enqueue(&ml, m);

	if (kue_newbuf(sc, c, NULL) == ENOBUFS) {
		ifp->if_ierrors++;
		goto done;
	}

	s = splnet();
	if_input(ifp, &ml);
	splx(s);

 done:

	/* Setup new transfer. */
	usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX],
	    c, c->kue_buf, KUE_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY,
	    USBD_NO_TIMEOUT, kue_rxeof);
	usbd_transfer(c->kue_xfer);

	DPRINTFN(10,("%s: %s: start rx\n", sc->kue_dev.dv_xname,
		    __func__));
}