Exemple #1
0
/*
 * XXX: Poll a particular ring. The implementation is incomplete.
 * Once the ring interrupts are disabled, we need to do bge_recyle()
 * for the ring as well and re enable the ring interrupt automatically
 * if the poll doesn't find any packets in the ring. We need to
 * have MSI-X interrupts support for this.
 *
 * The basic poll policy is that rings that are dealing with explicit
 * flows (like TCP or some service) and are marked as such should
 * have their own MSI-X interrupt per ring. bge_intr() should leave
 * that interrupt disabled after an upcall. The ring is in poll mode.
 * When a poll thread comes down and finds nothing, the MSI-X interrupt
 * is automatically enabled. Squeue needs to deal with the race of
 * a new interrupt firing and reaching before poll thread returns.
 */
mblk_t *
bge_poll_ring(void *arg, int bytes_to_pickup)
{
    recv_ring_t *rrp = arg;
    bge_t *bgep = rrp->bgep;
    bge_rbd_t *hw_rbd_p;
    uint64_t slot;
    mblk_t *head;
    mblk_t **tail;
    mblk_t *mp;
    size_t sz = 0;

    mutex_enter(rrp->rx_lock);

    /*
     * Sync (all) the receive ring descriptors
     * before accepting the packets they describe
     */
    DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
    if (*rrp->prod_index_p >= rrp->desc.nslots) {
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
        mutex_exit(rrp->rx_lock);
        return (NULL);
    }
    if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
        rrp->rx_next = *rrp->prod_index_p;
        bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
        bgep->bge_dma_error = B_TRUE;
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        mutex_exit(rrp->rx_lock);
        return (NULL);
    }

    hw_rbd_p = DMA_VPTR(rrp->desc);
    head = NULL;
    tail = &head;
    slot = rrp->rx_next;

    /* Note: volatile */
    while ((slot != *rrp->prod_index_p) && (sz <= bytes_to_pickup)) {
        if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
                != NULL) {
            *tail = mp;
            sz += msgdsize(mp);
            tail = &mp->b_next;
        }
        rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
    }

    bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
    if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
        bgep->bge_chip_state = BGE_CHIP_ERROR;
    mutex_exit(rrp->rx_lock);
    return (head);
}
Exemple #2
0
static mblk_t *
bge_receive_ring(bge_t *bgep, recv_ring_t *rrp)
{
    bge_rbd_t *hw_rbd_p;
    uint64_t slot;
    mblk_t *head;
    mblk_t **tail;
    mblk_t *mp;
    int recv_cnt = 0;

    ASSERT(mutex_owned(rrp->rx_lock));

    /*
     * Sync (all) the receive ring descriptors
     * before accepting the packets they describe
     */
    DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
    if (*rrp->prod_index_p >= rrp->desc.nslots) {
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
        return (NULL);
    }
    if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
        rrp->rx_next = *rrp->prod_index_p;
        bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
        bgep->bge_dma_error = B_TRUE;
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        return (NULL);
    }

    hw_rbd_p = DMA_VPTR(rrp->desc);
    head = NULL;
    tail = &head;
    slot = rrp->rx_next;

    while ((slot != *rrp->prod_index_p) && /* Note: volatile	*/
            (recv_cnt < BGE_MAXPKT_RCVED)) {
        if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
                != NULL) {
            *tail = mp;
            tail = &mp->b_next;
            recv_cnt++;
        }
        rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
    }

    bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
    if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
        bgep->bge_chip_state = BGE_CHIP_ERROR;
    return (head);
}
Exemple #3
0
static int
bge_restart_copper(bge_t *bgep, boolean_t powerdown)
{
	uint16_t phy_status;
	boolean_t reset_ok;
	uint16_t extctrl, auxctrl;

	BGE_TRACE(("bge_restart_copper($%p, %d)", (void *)bgep, powerdown));

	ASSERT(mutex_owned(bgep->genlock));

	switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) {
	default:
		/*
		 * Shouldn't happen; it means we don't recognise this chip.
		 * It's probably a new one, so we'll try our best anyway ...
		 */
	case MHCR_CHIP_ASIC_REV_5703:
	case MHCR_CHIP_ASIC_REV_5704:
	case MHCR_CHIP_ASIC_REV_5705:
	case MHCR_CHIP_ASIC_REV_5752:
	case MHCR_CHIP_ASIC_REV_5714:
	case MHCR_CHIP_ASIC_REV_5715:
		reset_ok = bge_phy_reset_and_check(bgep);
		break;

	case MHCR_CHIP_ASIC_REV_5906:
	case MHCR_CHIP_ASIC_REV_5700:
	case MHCR_CHIP_ASIC_REV_5701:
	case MHCR_CHIP_ASIC_REV_5723:
	case MHCR_CHIP_ASIC_REV_5721_5751:
		/*
		 * Just a plain reset; the "check" code breaks these chips
		 */
		reset_ok = bge_phy_reset(bgep);
		if (!reset_ok)
			bge_fm_ereport(bgep, DDI_FM_DEVICE_NO_RESPONSE);
		break;
	}
	if (!reset_ok) {
		BGE_REPORT((bgep, "PHY failed to reset correctly"));
		return (DDI_FAILURE);
	}

	/*
	 * Step 5: disable WOL (not required after RESET)
	 *
	 * Step 6: refer to errata
	 */
	switch (bgep->chipid.asic_rev) {
	default:
		break;

	case MHCR_CHIP_REV_5704_A0:
		bge_phy_tweak_gmii(bgep);
		break;
	}

	switch (MHCR_CHIP_ASIC_REV(bgep->chipid.asic_rev)) {
	case MHCR_CHIP_ASIC_REV_5705:
	case MHCR_CHIP_ASIC_REV_5721_5751:
		bge_phy_bit_err_fix(bgep);
		break;
	}

	if (!(bgep->chipid.flags & CHIP_FLAG_NO_JUMBO) &&
	    (bgep->chipid.default_mtu > BGE_DEFAULT_MTU)) {
		/* Set the GMII Fifo Elasticity to high latency */
		extctrl = bge_mii_get16(bgep, 0x10);
		bge_mii_put16(bgep, 0x10, extctrl | 0x1);

		/* Allow reception of extended length packets */
		bge_mii_put16(bgep, MII_AUX_CONTROL, 0x0007);
		auxctrl = bge_mii_get16(bgep, MII_AUX_CONTROL);
		auxctrl |= 0x4000;
		bge_mii_put16(bgep, MII_AUX_CONTROL, auxctrl);
	}

	/*
	 * Step 7: read the MII_INTR_STATUS register twice,
	 * in order to clear any sticky bits (but they should
	 * have been cleared by the RESET, I think), and we're
	 * not using PHY interrupts anyway.
	 *
	 * Step 8: enable the PHY to interrupt on link status
	 * change (not required)
	 *
	 * Step 9: configure PHY LED Mode - not applicable?
	 *
	 * Step 10: read the MII_STATUS register twice, in
	 * order to clear any sticky bits (but they should
	 * have been cleared by the RESET, I think).
	 */
	phy_status = bge_mii_get16(bgep, MII_STATUS);
	phy_status = bge_mii_get16(bgep, MII_STATUS);
	BGE_DEBUG(("bge_restart_copper: status 0x%x", phy_status));

	/*
	 * Finally, shut down the PHY, if required
	 */
	if (powerdown)
		bge_phy_powerdown(bgep);
	return (DDI_SUCCESS);
}
Exemple #4
0
/*
 * Special-case code to reset the PHY on the 5702/5703/5704C/5705/5782.
 * Tries up to 5 times to recover from failure to reset or PHY lockup.
 *
 * Returns TRUE on success, FALSE if there's an unrecoverable problem
 */
static boolean_t
bge_phy_reset_and_check(bge_t *bgep)
{
	boolean_t reset_success;
	boolean_t phy_locked;
	uint16_t extctrl;
	uint16_t gigctrl;
	uint_t retries;

	for (retries = 0; retries < 5; ++retries) {
		/* Issue a phy reset, and wait for reset to complete */
		/* Assuming reset is successful first */
		reset_success = bge_phy_reset(bgep);

		/*
		 * Now go check the DFE TAPs to see if locked up, but
		 * first, we need to set up PHY so we can read DFE
		 * TAPs.
		 */

		/*
		 * Disable Transmitter and Interrupt, while we play
		 * with the PHY registers, so the link partner won't
		 * see any strange data and the Driver won't see any
		 * interrupts.
		 */
		extctrl = bge_mii_get16(bgep, 0x10);
		bge_mii_put16(bgep, 0x10, extctrl | 0x3000);

		/* Setup Full-Duplex, 1000 mbps */
		bge_mii_put16(bgep, 0x0, 0x0140);

		/* Set to Master mode */
		gigctrl = bge_mii_get16(bgep, 0x9);
		bge_mii_put16(bgep, 0x9, 0x1800);

		/* Enable SM_DSP_CLOCK & 6dB */
		bge_mii_put16(bgep, 0x18, 0x0c00);	/* "the ADC fix" */

		/* Work-arounds */
		bge_mii_put16(bgep, 0x17, 0x201f);
		bge_mii_put16(bgep, 0x15, 0x2aaa);

		/* More workarounds */
		bge_mii_put16(bgep, 0x17, 0x000a);
		bge_mii_put16(bgep, 0x15, 0x0323);	/* "the Gamma fix" */

		/* Blocks the PHY control access */
		bge_mii_put16(bgep, 0x17, 0x8005);
		bge_mii_put16(bgep, 0x15, 0x0800);

		/* Test whether PHY locked up ;-( */
		phy_locked = bge_phy_locked_up(bgep);
		if (reset_success && !phy_locked)
			break;

		/*
		 * Some problem here ... log it & retry
		 */
		if (!reset_success)
			BGE_REPORT((bgep, "PHY didn't reset!"));
		if (phy_locked)
			BGE_REPORT((bgep, "PHY locked up!"));
	}

	/* Remove block phy control */
	bge_mii_put16(bgep, 0x17, 0x8005);
	bge_mii_put16(bgep, 0x15, 0x0000);

	/* Unfreeze DFE TAP filter for all channels */
	bge_mii_put16(bgep, 0x17, 0x8200);
	bge_mii_put16(bgep, 0x16, 0x0000);

	/* Restore PHY back to operating state */
	bge_mii_put16(bgep, 0x18, 0x0400);

	/* Restore 1000BASE-T Control Register */
	bge_mii_put16(bgep, 0x9, gigctrl);

	/* Enable transmitter and interrupt */
	extctrl = bge_mii_get16(bgep, 0x10);
	bge_mii_put16(bgep, 0x10, extctrl & ~0x3000);

	if (DEVICE_5906_SERIES_CHIPSETS(bgep))
		(void) bge_adj_volt_5906(bgep);

	if (!reset_success)
		bge_fm_ereport(bgep, DDI_FM_DEVICE_NO_RESPONSE);
	else if (phy_locked)
		bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
	return (reset_success && !phy_locked);
}
Exemple #5
0
static mblk_t *
bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p, recv_ring_t *rrp)
{
    bge_rbd_t hw_rbd;
    buff_ring_t *brp;
    sw_rbd_t *srbdp;
    uchar_t *dp;
    mblk_t *mp;
    uint_t len;
    uint_t minsize;
    uint_t maxsize;
    uint32_t pflags;

    mp = NULL;
    hw_rbd = *hw_rbd_p;

    switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) {
    case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING:
    default:
        /* error, this shouldn't happen */
        BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!"));
        goto error;

    case RBD_FLAG_JUMBO_RING:
        brp = &bgep->buff[BGE_JUMBO_BUFF_RING];
        break;

#if	(BGE_BUFF_RINGS_USED > 2)
    case RBD_FLAG_MINI_RING:
        brp = &bgep->buff[BGE_MINI_BUFF_RING];
        break;
#endif	/* BGE_BUFF_RINGS_USED > 2 */

    case 0:
        brp = &bgep->buff[BGE_STD_BUFF_RING];
        break;
    }

    if (hw_rbd.index >= brp->desc.nslots) {
        /* error, this shouldn't happen */
        BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!"));
        goto error;
    }

    srbdp = &brp->sw_rbds[hw_rbd.index];
    if (hw_rbd.opaque != srbdp->pbuf.token) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token"));
        goto refill;
    }

    if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet"));
        goto refill;
    }

    if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet"));
        goto refill;
    }

    len = hw_rbd.len;

#ifdef BGE_IPMI_ASF
    /*
     * When IPMI/ASF is enabled, VLAN tag must be stripped.
     */
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
        maxsize = bgep->chipid.ethmax_size + ETHERFCSL;
    else
#endif
        /*
         * H/W will not strip the VLAN tag from incoming packet
         * now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in
         * RECEIVE_MAC_MODE_REG register.
         */
        maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL;
    if (len > maxsize) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet"));
        goto refill;
    }

#ifdef BGE_IPMI_ASF
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
        minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ;
    else
#endif
        minsize = ETHERMIN + ETHERFCSL;
    if (len < minsize) {
        /* bogus, drop the packet */
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet"));
        goto refill;
    }

    /*
     * Packet looks good; get a buffer to copy it into.
     * We want to leave some space at the front of the allocated
     * buffer in case any upstream modules want to prepend some
     * sort of header.  This also has the side-effect of making
     * the packet *contents* 4-byte aligned, as required by NCA!
     */
#ifdef BGE_IPMI_ASF
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
        mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0);
    } else {
#endif

        mp = allocb(BGE_HEADROOM + len, 0);
#ifdef BGE_IPMI_ASF
    }
#endif
    if (mp == NULL) {
        /* Nothing to do but drop the packet */
        goto refill;
    }

    /*
     * Sync the data and copy it to the STREAMS buffer.
     */
    DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL);
    if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) {
        bgep->bge_dma_error = B_TRUE;
        bgep->bge_chip_state = BGE_CHIP_ERROR;
        return (NULL);
    }
#ifdef BGE_IPMI_ASF
    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
        /*
         * As VLAN tag has been stripped from incoming packet in ASF
         * scenario, we insert it into this packet again.
         */
        struct ether_vlan_header *ehp;
        mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ;
        bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL);
        ehp = (void *)dp;
        ehp->ether_tpid = ntohs(ETHERTYPE_VLAN);
        ehp->ether_tci = ntohs(hw_rbd.vlan_tci);
        bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL,
              dp + 2 * ETHERADDRL + VLAN_TAGSZ,
              len - 2 * ETHERADDRL);
    } else {
#endif
        mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM;
        bcopy(DMA_VPTR(srbdp->pbuf), dp, len);
#ifdef BGE_IPMI_ASF
    }

    if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
        mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL;
    } else
#endif
        mp->b_wptr = dp + len - ETHERFCSL;

    /*
     * Special check for one specific type of data corruption;
     * in a good packet, the first 8 bytes are *very* unlikely
     * to be the same as the second 8 bytes ... but we let the
     * packet through just in case.
     */
    if (bcmp(dp, dp+8, 8) == 0)
        BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?"));

    pflags = 0;
    if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM)
        pflags |= HCK_FULLCKSUM;
    if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM)
        pflags |= HCK_IPV4_HDRCKSUM_OK;
    if (pflags != 0)
        mac_hcksum_set(mp, 0, 0, 0, hw_rbd.tcp_udp_cksum, pflags);

    /* Update per-ring rx statistics */
    rrp->rx_pkts++;
    rrp->rx_bytes += len;

refill:
    /*
     * Replace the buffer in the ring it came from ...
     */
    bge_refill(bgep, brp, srbdp);
    return (mp);

error:
    /*
     * We come here if the integrity of the ring descriptors
     * (rather than merely packet data) appears corrupted.
     * The factotum will attempt to reset-and-recover.
     */
    bgep->bge_chip_state = BGE_CHIP_ERROR;
    bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
    return (NULL);
}