Esempio n. 1
0
void
atge_l1e_init_rx_pages(atge_t *atgep)
{
	atge_l1e_data_t *l1e;
	atge_dma_t *dma;
	int pages;

	ASSERT(atgep != NULL);
	l1e = atgep->atge_private_data;

	ASSERT(l1e != NULL);

	l1e->atge_l1e_proc_max = L1E_RX_PAGE_SZ / ETHERMIN;
	l1e->atge_l1e_rx_curp = 0;
	l1e->atge_l1e_rx_seqno = 0;

	for (pages = 0; pages < L1E_RX_PAGES; pages++) {
		l1e->atge_l1e_rx_page_cons = 0;
		l1e->atge_l1e_rx_page_prods[pages] = 0;


		dma = l1e->atge_l1e_rx_page[pages];
		ASSERT(dma != NULL);
		bzero(dma->addr, l1e->atge_l1e_pagesize);
		DMA_SYNC(dma, 0, l1e->atge_l1e_pagesize, DDI_DMA_SYNC_FORDEV);
	}

	dma = l1e->atge_l1e_rx_cmb;
	ASSERT(dma != NULL);
	bzero(dma->addr, L1E_RX_CMB_SZ * L1E_RX_PAGES);
	DMA_SYNC(dma, 0, L1E_RX_CMB_SZ * L1E_RX_PAGES, DDI_DMA_SYNC_FORDEV);
}
Esempio n. 2
0
/*
 * XXX: Poll a particular ring. The implementation is incomplete.
 * Once the ring interrupts are disabled, we need to do bge_recyle()
 * for the ring as well and re enable the ring interrupt automatically
 * if the poll doesn't find any packets in the ring. We need to
 * have MSI-X interrupts support for this.
 *
 * The basic poll policy is that rings that are dealing with explicit
 * flows (like TCP or some service) and are marked as such should
 * have their own MSI-X interrupt per ring. bge_intr() should leave
 * that interrupt disabled after an upcall. The ring is in poll mode.
 * When a poll thread comes down and finds nothing, the MSI-X interrupt
 * is automatically enabled. Squeue needs to deal with the race of
 * a new interrupt firing and reaching before poll thread returns.
 */
mblk_t *
bge_poll_ring(void *arg, int bytes_to_pickup)
{
    recv_ring_t *rrp = arg;
    bge_t *bgep = rrp->bgep;
    bge_rbd_t *hw_rbd_p;
    uint64_t slot;
    mblk_t *head;
    mblk_t **tail;
    mblk_t *mp;
    size_t sz = 0;

    mutex_enter(rrp->rx_lock);

    /*
     * Sync (all) the receive ring descriptors
     * before accepting the packets they describe
     */
    DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
    if (*rrp->prod_index_p >= rrp->desc.nslots) {
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
        mutex_exit(rrp->rx_lock);
        return (NULL);
    }
    if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
        rrp->rx_next = *rrp->prod_index_p;
        bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
        bgep->bge_dma_error = B_TRUE;
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        mutex_exit(rrp->rx_lock);
        return (NULL);
    }

    hw_rbd_p = DMA_VPTR(rrp->desc);
    head = NULL;
    tail = &head;
    slot = rrp->rx_next;

    /* Note: volatile */
    while ((slot != *rrp->prod_index_p) && (sz <= bytes_to_pickup)) {
        if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
                != NULL) {
            *tail = mp;
            sz += msgdsize(mp);
            tail = &mp->b_next;
        }
        rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
    }

    bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
    if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
        bgep->bge_chip_state = BGE_CHIP_ERROR;
    mutex_exit(rrp->rx_lock);
    return (head);
}
Esempio n. 3
0
/*
 * igb_tx_copy
 *
 * Copy the mblk fragment to the pre-allocated tx buffer
 */
static int
igb_tx_copy(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp,
    uint32_t len, boolean_t copy_done)
{
	dma_buffer_t *tx_buf;
	uint32_t desc_num;
	_NOTE(ARGUNUSED(tx_ring));

	tx_buf = &tcb->tx_buf;

	/*
	 * Copy the packet data of the mblk fragment into the
	 * pre-allocated tx buffer, which is maintained by the
	 * tx control block.
	 *
	 * Several mblk fragments can be copied into one tx buffer.
	 * The destination address of the current copied fragment in
	 * the tx buffer is next to the end of the previous copied
	 * fragment.
	 */
	if (len > 0) {
		bcopy(mp->b_rptr, tx_buf->address + tx_buf->len, len);

		tx_buf->len += len;
		tcb->frag_num++;
	}

	desc_num = 0;

	/*
	 * If it is the last fragment copied to the current tx buffer,
	 * in other words, if there's no remaining fragment or the remaining
	 * fragment requires a new tx control block to process, we need to
	 * complete the current copy processing by syncing up the current
	 * DMA buffer and saving the descriptor data.
	 */
	if (copy_done) {
		/*
		 * Sync the DMA buffer of the packet data
		 */
		DMA_SYNC(tx_buf, DDI_DMA_SYNC_FORDEV);

		tcb->tx_type = USE_COPY;

		/*
		 * Save the address and length to the private data structure
		 * of the tx control block, which will be used to fill the
		 * tx descriptor ring after all the fragments are processed.
		 */
		igb_save_desc(tcb, tx_buf->dma_address, tx_buf->len);
		desc_num++;
	}

	return (desc_num);
}
Esempio n. 4
0
void
atge_l1e_send_packet(atge_ring_t *r, int start, uint32_t pktlen)
{
	atge_l1e_tx_desc_t	*txd;
	uchar_t *c;
	uint32_t cflags = 0;

	c = (uchar_t *)r->r_desc_ring->addr;
	c += (sizeof (atge_l1e_tx_desc_t) * start);
	txd = (atge_l1e_tx_desc_t *)c;

	ATGE_PUT64(r->r_desc_ring, &txd->addr,
	    r->r_buf_tbl[start]->cookie.dmac_laddress);

	ATGE_PUT32(r->r_desc_ring, &txd->len, L1E_TX_BYTES(pktlen));

	cflags |= L1E_TD_EOP;
	ATGE_PUT32(r->r_desc_ring, &txd->flags, cflags);

	/*
	 * Sync buffer first.
	 */
	DMA_SYNC(r->r_buf_tbl[start], 0, pktlen, DDI_DMA_SYNC_FORDEV);

	/*
	 * Increment TX producer count by one.
	 */
	ATGE_DESC_INC(r->r_producer, L1E_TX_RING_CNT);

	/*
	 * Sync descriptor table.
	 */
	DMA_SYNC(r->r_desc_ring, 0, L1E_TX_RING_SZ, DDI_DMA_SYNC_FORDEV);

	/*
	 * Ask chip to send the packet now.
	 */
	OUTL(r->r_atge, ATGE_MBOX, r->r_producer);

	r->r_atge->atge_opackets++;
	r->r_atge->atge_obytes += pktlen;
}
Esempio n. 5
0
static mblk_t *
bge_receive_ring(bge_t *bgep, recv_ring_t *rrp)
{
    bge_rbd_t *hw_rbd_p;
    uint64_t slot;
    mblk_t *head;
    mblk_t **tail;
    mblk_t *mp;
    int recv_cnt = 0;

    ASSERT(mutex_owned(rrp->rx_lock));

    /*
     * Sync (all) the receive ring descriptors
     * before accepting the packets they describe
     */
    DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
    if (*rrp->prod_index_p >= rrp->desc.nslots) {
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
        return (NULL);
    }
    if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
        rrp->rx_next = *rrp->prod_index_p;
        bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
        bgep->bge_dma_error = B_TRUE;
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        return (NULL);
    }

    hw_rbd_p = DMA_VPTR(rrp->desc);
    head = NULL;
    tail = &head;
    slot = rrp->rx_next;

    while ((slot != *rrp->prod_index_p) && /* Note: volatile	*/
            (recv_cnt < BGE_MAXPKT_RCVED)) {
        if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
                != NULL) {
            *tail = mp;
            tail = &mp->b_next;
            recv_cnt++;
        }
        rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
    }

    bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
    if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
        bgep->bge_chip_state = BGE_CHIP_ERROR;
    return (head);
}
Esempio n. 6
0
void
atge_l1e_init_tx_ring(atge_t *atgep)
{
	ASSERT(atgep != NULL);
	ASSERT(atgep->atge_tx_ring != NULL);
	ASSERT(atgep->atge_tx_ring->r_desc_ring != NULL);

	atgep->atge_tx_ring->r_producer = 0;
	atgep->atge_tx_ring->r_consumer = 0;
	atgep->atge_tx_ring->r_avail_desc = L1E_TX_RING_CNT;

	bzero(atgep->atge_tx_ring->r_desc_ring->addr, L1E_TX_RING_SZ);

	DMA_SYNC(atgep->atge_tx_ring->r_desc_ring, 0, L1E_TX_RING_SZ,
	    DDI_DMA_SYNC_FORDEV);
}
Esempio n. 7
0
void
rge_hw_stats_dump(rge_t *rgep)
{
	int i = 0;

	while (rge_reg_get32(rgep, DUMP_COUNTER_REG_0) & DUMP_START) {
		drv_usecwait(100);
		if (++i > STATS_DUMP_LOOP) {
			RGE_DEBUG(("rge h/w statistics dump fail!"));
			rgep->rge_chip_state = RGE_CHIP_ERROR;
			return;
		}
	}
	DMA_SYNC(rgep->dma_area_stats, DDI_DMA_SYNC_FORKERNEL);

	/*
	 * Start H/W statistics dump for RTL8169 chip
	 */
	rge_reg_set32(rgep, DUMP_COUNTER_REG_0, DUMP_START);
}
Esempio n. 8
0
static mblk_t *
bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p, recv_ring_t *rrp)
{
    bge_rbd_t hw_rbd;
    buff_ring_t *brp;
    sw_rbd_t *srbdp;
    uchar_t *dp;
    mblk_t *mp;
    uint_t len;
    uint_t minsize;
    uint_t maxsize;
    uint32_t pflags;

    mp = NULL;
    hw_rbd = *hw_rbd_p;

    switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) {
    case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING:
    default:
        /* error, this shouldn't happen */
        BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!"));
        goto error;

    case RBD_FLAG_JUMBO_RING:
        brp = &bgep->buff[BGE_JUMBO_BUFF_RING];
        break;

#if	(BGE_BUFF_RINGS_USED > 2)
    case RBD_FLAG_MINI_RING:
        brp = &bgep->buff[BGE_MINI_BUFF_RING];
        break;
#endif	/* BGE_BUFF_RINGS_USED > 2 */

    case 0:
        brp = &bgep->buff[BGE_STD_BUFF_RING];
        break;
    }

    if (hw_rbd.index >= brp->desc.nslots) {
        /* error, this shouldn't happen */
        BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!"));
        goto error;
    }

    srbdp = &brp->sw_rbds[hw_rbd.index];
    if (hw_rbd.opaque != srbdp->pbuf.token) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token"));
        goto refill;
    }

    if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet"));
        goto refill;
    }

    if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet"));
        goto refill;
    }

    len = hw_rbd.len;

#ifdef BGE_IPMI_ASF
    /*
     * When IPMI/ASF is enabled, VLAN tag must be stripped.
     */
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
        maxsize = bgep->chipid.ethmax_size + ETHERFCSL;
    else
#endif
        /*
         * H/W will not strip the VLAN tag from incoming packet
         * now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in
         * RECEIVE_MAC_MODE_REG register.
         */
        maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL;
    if (len > maxsize) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet"));
        goto refill;
    }

#ifdef BGE_IPMI_ASF
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
        minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ;
    else
#endif
        minsize = ETHERMIN + ETHERFCSL;
    if (len < minsize) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet"));
        goto refill;
    }

    /*
     * Packet looks good; get a buffer to copy it into.
     * We want to leave some space at the front of the allocated
     * buffer in case any upstream modules want to prepend some
     * sort of header.  This also has the side-effect of making
     * the packet *contents* 4-byte aligned, as required by NCA!
     */
#ifdef BGE_IPMI_ASF
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
        mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0);
    } else {
#endif

        mp = allocb(BGE_HEADROOM + len, 0);
#ifdef BGE_IPMI_ASF
    }
#endif
    if (mp == NULL) {
        /* Nothing to do but drop the packet */
        goto refill;
    }

    /*
     * Sync the data and copy it to the STREAMS buffer.
     */
    DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL);
    if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) {
        bgep->bge_dma_error = B_TRUE;
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        return (NULL);
    }
#ifdef BGE_IPMI_ASF
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
        /*
         * As VLAN tag has been stripped from incoming packet in ASF
         * scenario, we insert it into this packet again.
         */
        struct ether_vlan_header *ehp;
        mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ;
        bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL);
        ehp = (void *)dp;
        ehp->ether_tpid = ntohs(ETHERTYPE_VLAN);
        ehp->ether_tci = ntohs(hw_rbd.vlan_tci);
        bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL,
              dp + 2 * ETHERADDRL + VLAN_TAGSZ,
              len - 2 * ETHERADDRL);
    } else {
#endif
        mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM;
        bcopy(DMA_VPTR(srbdp->pbuf), dp, len);
#ifdef BGE_IPMI_ASF
    }

    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
        mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL;
    } else
#endif
        mp->b_wptr = dp + len - ETHERFCSL;

    /*
     * Special check for one specific type of data corruption;
     * in a good packet, the first 8 bytes are *very* unlikely
     * to be the same as the second 8 bytes ... but we let the
     * packet through just in case.
     */
    if (bcmp(dp, dp+8, 8) == 0)
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?"));

    pflags = 0;
    if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM)
        pflags |= HCK_FULLCKSUM;
    if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM)
        pflags |= HCK_IPV4_HDRCKSUM_OK;
    if (pflags != 0)
        mac_hcksum_set(mp, 0, 0, 0, hw_rbd.tcp_udp_cksum, pflags);

    /* Update per-ring rx statistics */
    rrp->rx_pkts++;
    rrp->rx_bytes += len;

refill:
    /*
     * Replace the buffer in the ring it came from ...
     */
    bge_refill(bgep, brp, srbdp);
    return (mp);

error:
    /*
     * We come here if the integrity of the ring descriptors
     * (rather than merely packet data) appears corrupted.
     * The factotum will attempt to reset-and-recover.
     */
    bgep->bge_chip_state = BGE_CHIP_ERROR;
    bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
    return (NULL);
}
Esempio n. 9
0
void
atge_l1e_rx_next_pkt(atge_t *atgep, uint32_t len)
{
	atge_l1e_data_t *l1e = atgep->atge_private_data;
	atge_dma_t *dma_rx_page;
	atge_dma_t *dma_rx_cmb;
	int curr = l1e->atge_l1e_rx_curp;
	uint32_t *p;

	/*
	 * Update consumer position.
	 */
	l1e->atge_l1e_rx_page_cons +=
	    ROUNDUP(len + sizeof (rx_rs_t), L1E_RX_PAGE_ALIGN);

	/*
	 * If we need to flip to the other page. Note that we use only two
	 * pages.
	 */
	if (l1e->atge_l1e_rx_page_cons >= L1E_RX_PAGE_SZ) {
		ATGE_DB(("%s: %s() cons : %d, prod :%d, L1E_RX_PAGE_SZ : %d",
		    atgep->atge_name, __func__, l1e->atge_l1e_rx_page_cons,
		    l1e->atge_l1e_rx_page_prods[curr], L1E_RX_PAGE_SZ));

		/*
		 * Clear the producer.
		 */
		dma_rx_cmb = l1e->atge_l1e_rx_cmb;
		p = (void *)dma_rx_cmb->addr;
		p = p + curr;
		*p = 0;
		DMA_SYNC(dma_rx_cmb, curr * L1E_RX_CMB_SZ,
		    L1E_RX_CMB_SZ, DDI_DMA_SYNC_FORDEV);

		/*
		 * Notify the NIC that the current RX page is available again.
		 */
		OUTB(atgep, L1E_RXF0_PAGE0 + curr, RXF_VALID);

		/*
		 * End of Rx page reached, let hardware reuse this page.
		 */
		l1e->atge_l1e_rx_page_cons = 0;
		l1e->atge_l1e_rx_page_prods[curr] = 0;

		/*
		 * Switch to alternate Rx page.
		 */
		curr ^= 1;
		l1e->atge_l1e_rx_curp = curr;

		/*
		 * Page flipped, sync CMB and then Rx page.
		 */
		DMA_SYNC(dma_rx_cmb, 0, L1E_RX_PAGES * L1E_RX_CMB_SZ,
		    DDI_DMA_SYNC_FORKERNEL);
		p = (void *)dma_rx_cmb->addr;
		l1e->atge_l1e_rx_page_prods[curr] =
		    ATGE_GET32(dma_rx_cmb, p + curr);

		dma_rx_page = l1e->atge_l1e_rx_page[curr];
		DMA_SYNC(dma_rx_page, 0, l1e->atge_l1e_rx_page_prods[curr],
		    DDI_DMA_SYNC_FORKERNEL);

		ATGE_DB(("%s: %s() PAGE FLIPPED -> %d, producer[0,1]: %d, %d",
		    atgep->atge_name, __func__, curr,
		    ATGE_GET32(dma_rx_cmb, p), ATGE_GET32(dma_rx_cmb, p + 1)));
	}
}
Esempio n. 10
0
mblk_t *
atge_l1e_receive(atge_t *atgep)
{
	atge_l1e_data_t *l1e;
	atge_dma_t *dma_rx_page;
	atge_dma_t *dma_rx_cmb;
	uint32_t *ptr;
	uint32_t cons, current_page;
	uchar_t *pageaddr, *bufp;
	rx_rs_t	*rs;
	int prog;
	uint32_t seqno, len, flags;
	mblk_t *mp = NULL, *rx_head, *rx_tail;
	static uint32_t gen = 0;

	l1e = atgep->atge_private_data;

	ASSERT(MUTEX_HELD(&atgep->atge_intr_lock));
	ASSERT(l1e != NULL);

	rx_tail = NULL;
	rx_head = NULL;

	current_page = l1e->atge_l1e_rx_curp;

	/* Sync CMB first */
	dma_rx_cmb = l1e->atge_l1e_rx_cmb;
	DMA_SYNC(dma_rx_cmb, 0, L1E_RX_CMB_SZ * L1E_RX_PAGES,
	    DDI_DMA_SYNC_FORKERNEL);

	dma_rx_page = l1e->atge_l1e_rx_page[current_page];

	/*
	 * Get the producer offset from CMB.
	 */
	ptr = (void *)dma_rx_cmb->addr;

	l1e->atge_l1e_rx_page_prods[current_page] =
	    ATGE_GET32(dma_rx_cmb, ptr + current_page);

	/* Sync current RX Page as well */
	DMA_SYNC(dma_rx_page, l1e->atge_l1e_rx_page_cons,
	    l1e->atge_l1e_rx_page_prods[current_page], DDI_DMA_SYNC_FORKERNEL);

	ATGE_DB(("%s: %s() prod : %d, cons : %d, curr page : %d, gen : (%d)"
	    " cmb[0,1] : %d, %d",
	    atgep->atge_name, __func__,
	    l1e->atge_l1e_rx_page_prods[current_page],
	    l1e->atge_l1e_rx_page_cons, l1e->atge_l1e_rx_curp, gen,
	    ATGE_GET32(dma_rx_cmb, ptr), ATGE_GET32(dma_rx_cmb, ptr + 1)));

	for (prog = 0; prog <= l1e->atge_l1e_proc_max; prog++) {
		cons = l1e->atge_l1e_rx_page_cons;
		if (cons >= l1e->atge_l1e_rx_page_prods[l1e->atge_l1e_rx_curp])
			break;

		dma_rx_page = l1e->atge_l1e_rx_page[l1e->atge_l1e_rx_curp];
		pageaddr = (uchar_t *)dma_rx_page->addr;
		pageaddr = pageaddr + cons;
		rs = (rx_rs_t *)pageaddr;

		seqno = ATGE_GET32(dma_rx_page, &(rs->seqno));
		seqno = L1E_RX_SEQNO(seqno);

		len = ATGE_GET32(dma_rx_page, &(rs->length));
		len = L1E_RX_BYTES(len);

		flags = ATGE_GET32(dma_rx_page, &(rs->flags));

		if (seqno != l1e->atge_l1e_rx_seqno) {
			/*
			 * We have not seen this happening but we
			 * must restart the chip if that happens.
			 */
			ATGE_DB(("%s: %s() MISS-MATCH in seqno :%d,"
			    " atge_l1e_rx_seqno : %d, length : %d, flags : %x",
			    atgep->atge_name, __func__, seqno,
			    l1e->atge_l1e_rx_seqno, len, flags));

			mutex_enter(&atgep->atge_tx_lock);
			atge_device_restart(atgep);
			mutex_exit(&atgep->atge_tx_lock);

			/*
			 * Return all the pkts received before restarting
			 * the chip.
			 */
			return (rx_head);
		} else {
			l1e->atge_l1e_rx_seqno++;
		}

		/*
		 * We will pass the pkt to upper layer provided it's clear
		 * from any error.
		 */
		if ((flags & L1E_RD_ERROR) != 0) {
			if ((flags & (L1E_RD_CRC | L1E_RD_CODE |
			    L1E_RD_DRIBBLE | L1E_RD_RUNT | L1E_RD_OFLOW |
			    L1E_RD_TRUNC)) != 0) {
				ATGE_DB(("%s: %s() ERRORED PKT : %x",
				    atgep->atge_name, __func__, flags));
				atge_l1e_rx_next_pkt(atgep, len);
				atgep->atge_errrcv++;
				continue;
			}
		}

		/*
		 * So we have received a frame/pkt.
		 */
		if (len == 0 || len > atgep->atge_rx_buf_len) {
			ATGE_DB(("%s: %s() PKT len > error : %d",
			    atgep->atge_name, __func__, len));
			atge_l1e_rx_next_pkt(atgep, len);
			continue;
		}

		mp = allocb(len + VLAN_TAGSZ, BPRI_MED);
		if (mp != NULL) {
			mp->b_rptr += VLAN_TAGSZ;
			bufp = mp->b_rptr;
			mp->b_wptr = bufp + len;
			mp->b_next = NULL;

			bcopy(pageaddr + sizeof (rx_rs_t), bufp, len);

			if (rx_tail == NULL)
				rx_head = rx_tail = mp;
			else {
				rx_tail->b_next = mp;
				rx_tail = mp;
			}

			atgep->atge_ipackets++;
			atgep->atge_rbytes += len;
		} else {
			ATGE_DB(("%s: %s() PKT mp == NULL len : %d",
			    atgep->atge_name, __func__, len));

			if (len > atgep->atge_rx_buf_len) {
				atgep->atge_toolong_errors++;
			} else if (mp == NULL) {
				atgep->atge_norcvbuf++;
			}
		}

		atge_l1e_rx_next_pkt(atgep, len);

		ATGE_DB(("%s: %s() seqno :%d, atge_l1e_rx_seqno :"
		    " %d, length : %d,"
		    " flags : %x, cons : %d, prod : %d",
		    atgep->atge_name, __func__, seqno,
		    l1e->atge_l1e_rx_seqno, len, flags,
		    l1e->atge_l1e_rx_page_cons,
		    l1e->atge_l1e_rx_page_prods[l1e->atge_l1e_rx_curp]));
	}

	ATGE_DB(("%s: %s() receive completed (gen : %d) : cons : %d,"
	    " prod :%d, L1E_RX_PAGE_SZ : %d (prog:%d)",
	    atgep->atge_name, __func__, gen,
	    l1e->atge_l1e_rx_page_cons,
	    l1e->atge_l1e_rx_page_prods[l1e->atge_l1e_rx_curp],
	    L1E_RX_PAGE_SZ, prog));

	gen++;
	return (rx_head);
}