Пример #1
0
void
pairstart(struct ifnet *ifp)
{
	struct pair_softc	*sc = (struct pair_softc *)ifp->if_softc;
	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
	struct ifnet		*pairedifp;
	struct mbuf		*m;

	pairedifp = if_get(sc->sc_pairedif);

	for (;;) {
		IFQ_DEQUEUE(&ifp->if_snd, m);
		if (m == NULL)
			break;

#if NBPFILTER > 0
		if (ifp->if_bpf)
			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
#endif /* NBPFILTER > 0 */

		ifp->if_opackets++;
		if (pairedifp != NULL) {
			if (m->m_flags & M_PKTHDR)
				m_resethdr(m);
			ml_enqueue(&ml, m);
		} else
			m_freem(m);
	}

	if (pairedifp != NULL) {
		if_input(pairedifp, &ml);
		if_put(pairedifp);
	}
}
Пример #2
0
/*
 * Pass a packet up to the higher levels.
 */
void
egread(struct eg_softc *sc, caddr_t buf, int len)
{
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;

	if (len <= sizeof(struct ether_header) ||
	    len > ETHER_MAX_LEN) {
		printf("%s: invalid packet size %d; dropping\n",
		    sc->sc_dev.dv_xname, len);
		ifp->if_ierrors++;
		return;
	}

	/* Pull packet off interface. */
	m = egget(sc, buf, len);
	if (m == NULL) {
		ifp->if_ierrors++;
		return;
	}

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);
}
Пример #3
0
Файл: cgdb.c Проект: i4fumi/cgdb
/* user_input: This function will get a key from the user and process it.
 *
 *  Returns:  -1 on error, 0 on success
 */
static int user_input(void)
{
    static int key, val;

    /* Clear the current map sets. Give the KUI the map sets 
     * that should be used with the current focus.
     */
    val = kui_manager_clear_map_sets(kui_ctx);
    if (val == -1) {
        logger_write_pos(logger, __FILE__, __LINE__, "user_input error");
        return -1;
    }

    if (if_get_focus() == CGDB)
        val = kui_manager_add_map_set(kui_ctx, kui_map);
    else if (if_get_focus() == GDB)
        val = kui_manager_add_map_set(kui_ctx, kui_imap);

    key = kui_manager_getkey(kui_ctx);
    if (key == -1) {
        logger_write_pos(logger, __FILE__, __LINE__,
                "kui_manager_getkey error");
        return -1;
    }

    val = if_input(key);

    if (val == -1) {
        logger_write_pos(logger, __FILE__, __LINE__, "if_input error");
        return -1;
    } else if (val != 1 && val != 2)
        return 0;

    if (val == 1 && completion_ptr)
        return handle_tab_completion_request(completion_ptr, key);

    /* Process the key */
    if (kui_term_is_cgdb_key(key)) {
        char *seqbuf = kui_term_get_ascii_char_sequence_from_key(key);

        if (seqbuf == NULL) {
            logger_write_pos(logger, __FILE__, __LINE__,
                    "kui_term_get_ascii_char_sequence_from_key error %d", key);
            return -1;
        } else {
            int length = strlen(seqbuf), i;

            for (i = 0; i < length; i++)
                send_key(val, seqbuf[i]);
        }
    } else
        send_key(val, key);

    return 0;
}
Пример #4
0
static int
tap_dev_write(int unit, struct uio *uio, int flags)
{
	struct tap_softc *sc =
	    device_lookup_private(&tap_cd, unit);
	struct ifnet *ifp;
	struct mbuf *m, **mp;
	int error = 0;
	int s;

	if (sc == NULL)
		return (ENXIO);

	getnanotime(&sc->sc_mtime);
	ifp = &sc->sc_ec.ec_if;

	/* One write, one packet, that's the rule */
	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL) {
		ifp->if_ierrors++;
		return (ENOBUFS);
	}
	m->m_pkthdr.len = uio->uio_resid;

	mp = &m;
	while (error == 0 && uio->uio_resid > 0) {
		if (*mp != m) {
			MGET(*mp, M_DONTWAIT, MT_DATA);
			if (*mp == NULL) {
				error = ENOBUFS;
				break;
			}
		}
		(*mp)->m_len = min(MHLEN, uio->uio_resid);
		error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio);
		mp = &(*mp)->m_next;
	}
	if (error) {
		ifp->if_ierrors++;
		m_freem(m);
		return (error);
	}

	ifp->if_ipackets++;
	m_set_rcvif(m, ifp);

	bpf_mtap(ifp, m);
	s = splnet();
	if_input(ifp, m);
	splx(s);

	return (0);
}
Пример #5
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (!(ifp->if_flags & IFF_RUNNING))
		goto drop;

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, &m->m_pkthdr.csum_flags);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	octeon_eth_buf_free_work(sc, work, word2);
	return 1;
}
Пример #6
0
static void
shmif_rcv(void *arg)
{
	struct ifnet *ifp = arg;
	struct shmif_sc *sc = ifp->if_softc;
	struct shmif_mem *busmem;
	struct mbuf *m = NULL;
	struct ether_header *eth;
	uint32_t nextpkt;
	bool wrap, passup;
	int error;
	const int align
	    = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);

 reup:
	mutex_enter(&sc->sc_mtx);
	while ((ifp->if_flags & IFF_RUNNING) == 0 && !sc->sc_dying)
		cv_wait(&sc->sc_cv, &sc->sc_mtx);
	mutex_exit(&sc->sc_mtx);

	busmem = sc->sc_busmem;

	while (ifp->if_flags & IFF_RUNNING) {
		struct shmif_pkthdr sp;

		if (m == NULL) {
			m = m_gethdr(M_WAIT, MT_DATA);
			MCLGET(m, M_WAIT);
			m->m_data += align;
		}

		DPRINTF(("waiting %d/%" PRIu64 "\n",
		    sc->sc_nextpacket, sc->sc_devgen));
		KASSERT(m->m_flags & M_EXT);

		shmif_lockbus(busmem);
		KASSERT(busmem->shm_magic == SHMIF_MAGIC);
		KASSERT(busmem->shm_gen >= sc->sc_devgen);

		/* need more data? */
		if (sc->sc_devgen == busmem->shm_gen && 
		    shmif_nextpktoff(busmem, busmem->shm_last)
		     == sc->sc_nextpacket) {
			shmif_unlockbus(busmem);
			error = 0;
			rumpcomp_shmif_watchwait(sc->sc_kq);
			if (__predict_false(error))
				printf("shmif_rcv: wait failed %d\n", error);
			membar_consumer();
			continue;
		}

		if (stillvalid_p(sc)) {
			nextpkt = sc->sc_nextpacket;
		} else {
			KASSERT(busmem->shm_gen > 0);
			nextpkt = busmem->shm_first;
			if (busmem->shm_first > busmem->shm_last)
				sc->sc_devgen = busmem->shm_gen - 1;
			else
				sc->sc_devgen = busmem->shm_gen;
			DPRINTF(("dev %p overrun, new data: %d/%" PRIu64 "\n",
			    sc, nextpkt, sc->sc_devgen));
		}

		/*
		 * If our read pointer is ahead the bus last write, our
		 * generation must be one behind.
		 */
		KASSERT(!(nextpkt > busmem->shm_last
		    && sc->sc_devgen == busmem->shm_gen));

		wrap = false;
		nextpkt = shmif_busread(busmem, &sp,
		    nextpkt, sizeof(sp), &wrap);
		KASSERT(sp.sp_len <= ETHERMTU + ETHER_HDR_LEN);
		nextpkt = shmif_busread(busmem, mtod(m, void *),
		    nextpkt, sp.sp_len, &wrap);

		DPRINTF(("shmif_rcv: read packet of length %d at %d\n",
		    sp.sp_len, nextpkt));

		sc->sc_nextpacket = nextpkt;
		shmif_unlockbus(sc->sc_busmem);

		if (wrap) {
			sc->sc_devgen++;
			DPRINTF(("dev %p generation now %" PRIu64 "\n",
			    sc, sc->sc_devgen));
		}

		/*
		 * Ignore packets too short to possibly be valid.
		 * This is hit at least for the first frame on a new bus.
		 */
		if (__predict_false(sp.sp_len < ETHER_HDR_LEN)) {
			DPRINTF(("shmif read packet len %d < ETHER_HDR_LEN\n",
			    sp.sp_len));
			continue;
		}

		m->m_len = m->m_pkthdr.len = sp.sp_len;
		m_set_rcvif(m, ifp);

		/*
		 * Test if we want to pass the packet upwards
		 */
		eth = mtod(m, struct ether_header *);
		if (sp.sp_sender == sc->sc_uuid) {
			passup = false;
		} else if (memcmp(eth->ether_dhost, CLLADDR(ifp->if_sadl),
		    ETHER_ADDR_LEN) == 0) {
			passup = true;
		} else if (ETHER_IS_MULTICAST(eth->ether_dhost)) {
			passup = true;
		} else if (ifp->if_flags & IFF_PROMISC) {
			m->m_flags |= M_PROMISC;
			passup = true;
		} else {
			passup = false;
		}

		if (passup) {
			int bound;
			ifp->if_ipackets++;
			KERNEL_LOCK(1, NULL);
			/* Prevent LWP migrations between CPUs for psref(9) */
			bound = curlwp_bind();
			bpf_mtap(ifp, m);
			if_input(ifp, m);
			curlwp_bindx(bound);
			KERNEL_UNLOCK_ONE(NULL);
			m = NULL;
		}
		/* else: reuse mbuf for a future packet */
	}
	m_freem(m);
	m = NULL;

	if (!sc->sc_dying)
		goto reup;

	kthread_exit(0);
}
Пример #7
0
/*
 * A frame has been uploaded: pass the resulting mbuf chain up to
 * the higher level protocols.
 */
void
ste_rxeof(struct ste_softc *sc)
{
        struct mbuf		*m;
	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
        struct ifnet		*ifp;
	struct ste_chain_onefrag	*cur_rx;
	int			total_len = 0, count=0;
	u_int32_t		rxstat;

	ifp = &sc->arpcom.ac_if;

	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
	      & STE_RXSTAT_DMADONE) {
		if ((STE_RX_LIST_CNT - count) < 3)
			break;

		cur_rx = sc->ste_cdata.ste_rx_head;
		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;

		/*
		 * If an error occurs, update stats, clear the
		 * status word and leave the mbuf cluster in place:
		 * it should simply get re-used next time this descriptor
	 	 * comes up in the ring.
		 */
		if (rxstat & STE_RXSTAT_FRAME_ERR) {
			ifp->if_ierrors++;
			cur_rx->ste_ptr->ste_status = 0;
			continue;
		}

		/*
		 * If there error bit was not set, the upload complete
		 * bit should be set which means we have a valid packet.
		 * If not, something truly strange has happened.
		 */
		if (!(rxstat & STE_RXSTAT_DMADONE)) {
			printf("%s: bad receive status -- packet dropped",
				sc->sc_dev.dv_xname);
			ifp->if_ierrors++;
			cur_rx->ste_ptr->ste_status = 0;
			continue;
		}

		/* No errors; receive the packet. */	
		m = cur_rx->ste_mbuf;
		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;

		/*
		 * Try to conjure up a new mbuf cluster. If that
		 * fails, it means we have an out of memory condition and
		 * should leave the buffer in place and continue. This will
		 * result in a lost packet, but there's little else we
		 * can do in this situation.
		 */
		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
			ifp->if_ierrors++;
			cur_rx->ste_ptr->ste_status = 0;
			continue;
		}

		m->m_pkthdr.len = m->m_len = total_len;

		ml_enqueue(&ml, m);

		cur_rx->ste_ptr->ste_status = 0;
		count++;
	}

	if_input(ifp, &ml);
}
Пример #8
0
void
vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ldc_map *map = sc->sc_lm;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	caddr_t buf;
	paddr_t pa;
	psize_t nbytes;
	u_int cons;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		nbytes = roundup(dm->nbytes, 8);

		if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
			ifp->if_ierrors++;
			goto skip;
		}

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
		    dm->cookie[0].addr, pa, nbytes, &nbytes);
		if (err != H_EOK) {
			pool_put(&sc->sc_pool, buf);
			ifp->if_ierrors++;
			goto skip;
		}

		/* Stupid OBP doesn't align properly. */
                m = m_devget(buf, dm->nbytes, ETHER_ALIGN);
		pool_put(&sc->sc_pool, buf);
		if (m == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}

		/* Pass it on. */
		ml_enqueue(&ml, m);
		if_input(ifp, &ml);

	skip:
		dm->tag.stype = VIO_SUBTYPE_ACK;
		dm->tag.sid = sc->sc_local_sid;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DESC_DATA\n"));

		if (dm->desc_handle != sc->sc_tx_cons) {
			printf("out of order\n");
			return;
		}

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);

		map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
		atomic_dec_int(&map->lm_count);

		pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
		ifp->if_opackets++;

		sc->sc_tx_cons++;
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DESC_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype));
		break;
	}
}
Пример #9
0
void
vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf *m;
	paddr_t pa;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
	{
		struct vnet_desc desc;
		uint64_t cookie;
		paddr_t desc_pa;
		int idx, ack_end_idx = -1;
		struct mbuf_list ml = MBUF_LIST_INITIALIZER();

		idx = dm->start_idx;
		for (;;) {
			cookie = sc->sc_peer_dring_cookie.addr;
			cookie += idx * sc->sc_peer_desc_size;
			nbytes = sc->sc_peer_desc_size;
			pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("hv_ldc_copy_in %d\n", err);
				break;
			}

			if (desc.hdr.dstate != VIO_DESC_READY)
				break;

			if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
				ifp->if_ierrors++;
				goto skip;
			}

			m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes);
			if (!m)
				break;
			m->m_len = m->m_pkthdr.len = desc.nbytes;
			nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8);

			pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
			    desc.cookie[0].addr, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				m_freem(m);
				goto skip;
			}
			m->m_data += VNET_ETHER_ALIGN;

			ml_enqueue(&ml, m);

		skip:
			desc.hdr.dstate = VIO_DESC_DONE;
			nbytes = sc->sc_peer_desc_size;
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK)
				printf("hv_ldc_copy_out %d\n", err);

			ack_end_idx = idx;
			if (++idx == sc->sc_peer_dring_nentries)
				idx = 0;
		}

		if_input(ifp, &ml);

		if (ack_end_idx == -1) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
		} else {
			dm->tag.stype = VIO_SUBTYPE_ACK;
			dm->end_idx = ack_end_idx;
		}
		dm->tag.sid = sc->sc_local_sid;
		dm->proc_state = VIO_DP_STOPPED;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;
	}

	case VIO_SUBTYPE_ACK:
	{
		struct ldc_map *map = sc->sc_lm;
		u_int cons, count;

		sc->sc_peer_state = dm->proc_state;

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
			map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
			atomic_dec_int(&map->lm_count);

			pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
			ifp->if_opackets++;

			sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE;
			sc->sc_tx_cons++;
			cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		}

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE)
			vnet_send_dring_data(sc, cons);

		KERNEL_LOCK();
		if (count < (sc->sc_vd->vd_nentries - 1))
			ifp->if_flags &= ~IFF_OACTIVE;
		if (count == 0)
			ifp->if_timer = 0;

		vnet_start(ifp);
		KERNEL_UNLOCK();
		break;
	}

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		sc->sc_peer_state = VIO_DP_STOPPED;
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
Пример #10
0
int
octeon_eth_recv(struct octeon_eth_softc *sc, uint64_t *work)
{
	int result = 0;
	struct ifnet *ifp;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	uint64_t word2;

	/* XXX */
	/*
 	 * performance tuning
	 * presend iobdma request
	 */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_prefetch(sc);
	}
	/* XXX */

	OCTEON_ETH_KASSERT(sc != NULL);
	OCTEON_ETH_KASSERT(work != NULL);

	word2 = work[2];
	ifp = &sc->sc_arpcom.ac_if;

	OCTEON_ETH_KASSERT(ifp != NULL);

	if (__predict_false(octeon_eth_recv_check(sc, word2) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	if (__predict_false(octeon_eth_recv_mbuf(sc, work, &m) != 0)) {
		ifp->if_ierrors++;
		result = 1;
		octeon_eth_buf_free_work(sc, work, word2);
		goto drop;
	}

	/* work[0] .. work[3] may not be valid any more */

	OCTEON_ETH_KASSERT(m != NULL);

	cn30xxipd_offload(word2, m->m_data, &m->m_pkthdr.csum_flags);

	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
		octeon_eth_send_queue_flush(sc);
	}

	/* XXX */
	if (sc->sc_flush)
		octeon_eth_send_queue_flush_sync(sc);

	ml_enqueue(&ml, m);
	if_input(ifp, &ml);

	return 0;

drop:
	/* XXX */
	if (sc->sc_soft_req_cnt > sc->sc_soft_req_thresh) {
		octeon_eth_send_queue_flush_fetch(sc);
	}
	/* XXX */

	return result;
}
Пример #11
0
int
bmac_rint(void *v)
{
	struct bmac_softc *sc = v;
	struct ifnet *ifp = &sc->arpcom.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	dbdma_command_t *cmd;
	int status, resid, count, datalen;
	int i, n;
	void *data;
#ifdef BMAC_DEBUG
	printf("bmac_rint() called\n");
#endif

	i = sc->sc_rxlast;
	for (n = 0; n < BMAC_RXBUFS; n++, i++) {
		if (i == BMAC_RXBUFS)
			i = 0;
		cmd = &sc->sc_rxcmd[i];
		status = dbdma_ld16(&cmd->d_status);
		resid = dbdma_ld16(&cmd->d_resid);

#ifdef BMAC_DEBUG
		if (status != 0 && status != 0x8440 && status != 0x9440)
			printf("bmac_rint status = 0x%x\n", status);
#endif

		if ((status & DBDMA_CNTRL_ACTIVE) == 0)	/* 0x9440 | 0x8440 */
			continue;
		count = dbdma_ld16(&cmd->d_count);
		datalen = count - resid;		/* 2 == framelen */
		if (datalen < sizeof(struct ether_header)) {
			printf("%s: short packet len = %d\n",
				ifp->if_xname, datalen);
			goto next;
		}
		DBDMA_BUILD_CMD(cmd, DBDMA_CMD_STOP, 0, 0, 0, 0);
		data = sc->sc_rxbuf + BMAC_BUFLEN * i;

		/* XXX Sometimes bmac reads one extra byte. */
		if (datalen == ETHER_MAX_LEN + 1)
			datalen--;

		/* Trim the CRC. */
		datalen -= ETHER_CRC_LEN;

		m = bmac_get(sc, data, datalen);
		if (m == NULL) {
			ifp->if_ierrors++;
			goto next;
		}

		ml_enqueue(&ml, m);

next:
		DBDMA_BUILD_CMD(cmd, DBDMA_CMD_IN_LAST, 0, DBDMA_INT_ALWAYS,
			DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);

		cmd->d_status = 0;
		cmd->d_resid = 0;
		sc->sc_rxlast = i + 1;
	}
	bmac_mediachange(ifp);

	dbdma_continue(sc->sc_rxdma);

	if_input(ifp, &ml);
	return (1);
}
Пример #12
0
int
cpsw_rxintr(void *arg)
{
	struct cpsw_softc * const sc = arg;
	struct ifnet * const ifp = &sc->sc_ac.ac_if;
	struct cpsw_ring_data * const rdp = sc->sc_rdp;
	struct cpsw_cpdma_bd bd;
	bus_dmamap_t dm;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	u_int i;
	u_int len, off;

	for (;;) {
		KASSERT(sc->sc_rxhead < CPSW_NRXDESCS);

		i = sc->sc_rxhead;
		dm = rdp->rx_dm[i];
		m = rdp->rx_mb[i];

		KASSERT(dm != NULL);
		KASSERT(m != NULL);

		cpsw_get_rxdesc(sc, i, &bd);

		if (bd.flags & CPDMA_BD_OWNER)
			break;

		if (bd.flags & CPDMA_BD_TDOWNCMPLT) {
			sc->sc_rxrun = false;
			goto done;
		}

		if ((bd.flags & (CPDMA_BD_SOP|CPDMA_BD_EOP)) !=
		    (CPDMA_BD_SOP|CPDMA_BD_EOP)) {
			/* Debugger(); */
		}

		bus_dmamap_sync(sc->sc_bdt, dm, 0, dm->dm_mapsize,
		    BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(sc->sc_bdt, dm);

		if (cpsw_new_rxbuf(sc, i) != 0) {
			/* drop current packet, reuse buffer for new */
			ifp->if_ierrors++;
			goto next;
		}

		off = bd.bufoff;
		len = bd.pktlen;

		if (bd.flags & CPDMA_BD_PASSCRC)
			len -= ETHER_CRC_LEN;

		m->m_pkthdr.len = m->m_len = len;
		m->m_data += off;

		ml_enqueue(&ml, m);

next:
		sc->sc_rxhead = RXDESC_NEXT(sc->sc_rxhead);
		if (bd.flags & CPDMA_BD_EOQ) {
			sc->sc_rxeoq = true;
			break;
		} else {
			sc->sc_rxeoq = false;
		}
		bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_RX_CP(0),
		    cpsw_rxdesc_paddr(sc, i));
	}

	if (sc->sc_rxeoq) {
		printf("rxeoq\n");
		/* Debugger(); */
	}

	bus_space_write_4(sc->sc_bst, sc->sc_bsh, CPSW_CPDMA_CPDMA_EOI_VECTOR,
	    CPSW_INTROFF_RX);

done:
	if_input(ifp, &ml);

	return 1;
}
Пример #13
0
/*
 * Interrupt handler.
 */
int
epic_intr(void *arg)
{
	struct epic_softc *sc = arg;
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	struct epic_rxdesc *rxd;
	struct epic_txdesc *txd;
	struct epic_descsoft *ds;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	u_int32_t intstat, rxstatus, txstatus;
	int i, claimed = 0;
	u_int len;

	/*
	 * Get the interrupt status from the EPIC.
	 */
	intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
	if ((intstat & INTSTAT_INT_ACTV) == 0)
		return (claimed);

	claimed = 1;

	/*
	 * Acknowledge the interrupt.
	 */
	bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
	    intstat & INTMASK);

	/*
	 * Check for receive interrupts.
	 */
	if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
		for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
			rxd = EPIC_CDRX(sc, i);
			ds = EPIC_DSRX(sc, i);

			EPIC_CDRXSYNC(sc, i,
			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);

			rxstatus = rxd->er_rxstatus;
			if (rxstatus & ER_RXSTAT_OWNER) {
				/*
				 * We have processed all of the
				 * receive buffers.
				 */
				break;
			}

			/*
			 * Make sure the packet arrived intact.  If an error
			 * occurred, update stats and reset the descriptor.
			 * The buffer will be reused the next time the
			 * descriptor comes up in the ring.
			 */
			if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
				if (rxstatus & ER_RXSTAT_CRCERROR)
					printf("%s: CRC error\n",
					    sc->sc_dev.dv_xname);
				if (rxstatus & ER_RXSTAT_ALIGNERROR)
					printf("%s: alignment error\n",
					    sc->sc_dev.dv_xname);
				ifp->if_ierrors++;
				EPIC_INIT_RXDESC(sc, i);
				continue;
			}

			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);

			/*
			 * The EPIC includes the CRC with every packet;
			 * trim it.
			 */
			len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN;

			if (len < sizeof(struct ether_header)) {
				/*
				 * Runt packet; drop it now.
				 */
				ifp->if_ierrors++;
				EPIC_INIT_RXDESC(sc, i);
				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
				    ds->ds_dmamap->dm_mapsize,
				    BUS_DMASYNC_PREREAD);
				continue;
			}

			/*
			 * If the packet is small enough to fit in a
			 * single header mbuf, allocate one and copy
			 * the data into it.  This greatly reduces
			 * memory consumption when we receive lots
			 * of small packets.
			 *
			 * Otherwise, we add a new buffer to the receive
			 * chain.  If this fails, we drop the packet and
			 * recycle the old buffer.
			 */
			if (epic_copy_small != 0 && len <= MHLEN) {
				MGETHDR(m, M_DONTWAIT, MT_DATA);
				if (m == NULL)
					goto dropit;
				memcpy(mtod(m, caddr_t),
				    mtod(ds->ds_mbuf, caddr_t), len);
				EPIC_INIT_RXDESC(sc, i);
				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
				    ds->ds_dmamap->dm_mapsize,
				    BUS_DMASYNC_PREREAD);
			} else {
				m = ds->ds_mbuf;
				if (epic_add_rxbuf(sc, i) != 0) {
 dropit:
					ifp->if_ierrors++;
					EPIC_INIT_RXDESC(sc, i);
					bus_dmamap_sync(sc->sc_dmat,
					    ds->ds_dmamap, 0,
					    ds->ds_dmamap->dm_mapsize,
					    BUS_DMASYNC_PREREAD);
					continue;
				}
			}

			m->m_pkthdr.len = m->m_len = len;

			ml_enqueue(&ml, m);
		}

		/* Update the receive pointer. */
		sc->sc_rxptr = i;

		/*
		 * Check for receive queue underflow.
		 */
		if (intstat & INTSTAT_RQE) {
			printf("%s: receiver queue empty\n",
			    sc->sc_dev.dv_xname);
			/*
			 * Ring is already built; just restart the
			 * receiver.
			 */
			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
			    EPIC_CDRXADDR(sc, sc->sc_rxptr));
			bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
			    COMMAND_RXQUEUED | COMMAND_START_RX);
		}
	}

	if_input(ifp, &ml);

	/*
	 * Check for transmission complete interrupts.
	 */
	if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
		ifp->if_flags &= ~IFF_OACTIVE;
		for (i = sc->sc_txdirty; sc->sc_txpending != 0;
		     i = EPIC_NEXTTX(i), sc->sc_txpending--) {
			txd = EPIC_CDTX(sc, i);
			ds = EPIC_DSTX(sc, i);

			EPIC_CDTXSYNC(sc, i,
			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);

			txstatus = txd->et_txstatus;
			if (txstatus & ET_TXSTAT_OWNER)
				break;

			EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);

			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
			    0, ds->ds_dmamap->dm_mapsize,
			    BUS_DMASYNC_POSTWRITE);
			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
			m_freem(ds->ds_mbuf);
			ds->ds_mbuf = NULL;

			/*
			 * Check for errors and collisions.
			 */
			if ((txstatus & ET_TXSTAT_PACKETTX) == 0)
				ifp->if_oerrors++;
			else
				ifp->if_opackets++;
			ifp->if_collisions +=
			    TXSTAT_COLLISIONS(txstatus);
			if (txstatus & ET_TXSTAT_CARSENSELOST)
				printf("%s: lost carrier\n",
				    sc->sc_dev.dv_xname);
		}

		/* Update the dirty transmit buffer pointer. */
		sc->sc_txdirty = i;

		/*
		 * Cancel the watchdog timer if there are no pending
		 * transmissions.
		 */
		if (sc->sc_txpending == 0)
			ifp->if_timer = 0;

		/*
		 * Kick the transmitter after a DMA underrun.
		 */
		if (intstat & INTSTAT_TXU) {
			printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
			bus_space_write_4(sc->sc_st, sc->sc_sh,
			    EPIC_COMMAND, COMMAND_TXUGO);
			if (sc->sc_txpending)
				bus_space_write_4(sc->sc_st, sc->sc_sh,
				    EPIC_COMMAND, COMMAND_TXQUEUED);
		}

		/*
		 * Try to get more packets going.
		 */
		epic_start(ifp);
	}

	/*
	 * Check for fatal interrupts.
	 */
	if (intstat & INTSTAT_FATAL_INT) {
		if (intstat & INTSTAT_PTA)
			printf("%s: PCI target abort error\n",
			    sc->sc_dev.dv_xname);
		else if (intstat & INTSTAT_PMA)
			printf("%s: PCI master abort error\n",
			    sc->sc_dev.dv_xname);
		else if (intstat & INTSTAT_APE)
			printf("%s: PCI address parity error\n",
			    sc->sc_dev.dv_xname);
		else if (intstat & INTSTAT_DPE)
			printf("%s: PCI data parity error\n",
			    sc->sc_dev.dv_xname);
		else
			printf("%s: unknown fatal error\n",
			    sc->sc_dev.dv_xname);
		(void) epic_init(ifp);
	}

	return (claimed);
}
Пример #14
0
/*
 * A frame has been uploaded: pass the resulting mbuf chain up to
 * the higher level protocols.
 */
void
kue_rxeof(struct usbd_xfer *xfer, void *priv, usbd_status status)
{
	struct kue_chain	*c = priv;
	struct kue_softc	*sc = c->kue_sc;
	struct ifnet		*ifp = GET_IFP(sc);
	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
	struct mbuf		*m;
	int			total_len = 0;
	int			s;

	DPRINTFN(10,("%s: %s: enter status=%d\n", sc->kue_dev.dv_xname,
		     __func__, status));

	if (usbd_is_dying(sc->kue_udev))
		return;

	if (!(ifp->if_flags & IFF_RUNNING))
		return;

	if (status != USBD_NORMAL_COMPLETION) {
		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
			return;
		sc->kue_rx_errs++;
		if (usbd_ratecheck(&sc->kue_rx_notice)) {
			printf("%s: %u usb errors on rx: %s\n",
			    sc->kue_dev.dv_xname, sc->kue_rx_errs,
			    usbd_errstr(status));
			sc->kue_rx_errs = 0;
		}
		if (status == USBD_STALLED)
			usbd_clear_endpoint_stall_async(sc->kue_ep[KUE_ENDPT_RX]);
		goto done;
	}

	usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);

	DPRINTFN(10,("%s: %s: total_len=%d len=%d\n", sc->kue_dev.dv_xname,
		     __func__, total_len,
		     UGETW(mtod(c->kue_mbuf, u_int8_t *))));

	if (total_len <= 1)
		goto done;

	m = c->kue_mbuf;
	/* copy data to mbuf */
	memcpy(mtod(m, char *), c->kue_buf, total_len);

	/* No errors; receive the packet. */
	total_len = UGETW(mtod(m, u_int8_t *));
	m_adj(m, sizeof(u_int16_t));

	if (total_len < sizeof(struct ether_header)) {
		ifp->if_ierrors++;
		goto done;
	}

	m->m_pkthdr.len = m->m_len = total_len;
	ml_enqueue(&ml, m);

	if (kue_newbuf(sc, c, NULL) == ENOBUFS) {
		ifp->if_ierrors++;
		goto done;
	}

	s = splnet();
	if_input(ifp, &ml);
	splx(s);

 done:

	/* Setup new transfer. */
	usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX],
	    c, c->kue_buf, KUE_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY,
	    USBD_NO_TIMEOUT, kue_rxeof);
	usbd_transfer(c->kue_xfer);

	DPRINTFN(10,("%s: %s: start rx\n", sc->kue_dev.dv_xname,
		    __func__));
}