Esempio n. 1
0
/*
 * Reset the physical layer
 */
void
bge_phys_reset(bge_t *bgep)
{
	BGE_TRACE(("bge_phys_reset($%p)", (void *)bgep));

	mutex_enter(bgep->genlock);
	if ((*bgep->physops->phys_restart)(bgep, B_FALSE) != DDI_SUCCESS)
		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_UNAFFECTED);
	mutex_exit(bgep->genlock);
}
Esempio n. 2
0
/*
 * function to trigger a POST on the device
 *
 * dev - software handle to the device
 *
 */
int
oce_POST(struct oce_dev *dev)
{
	mpu_ep_semaphore_t post_status;
	clock_t tmo;
	clock_t earlier = ddi_get_lbolt();

	/* read semaphore CSR */
	post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
	if (oce_fm_check_acc_handle(dev, dev->csr_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
		return (DDI_FAILURE);
	}
	/* if host is ready then wait for fw ready else send POST */
	if (post_status.bits.stage <= POST_STAGE_AWAITING_HOST_RDY) {
		post_status.bits.stage = POST_STAGE_CHIP_RESET;
		OCE_CSR_WRITE32(dev, MPU_EP_SEMAPHORE, post_status.dw0);
		if (oce_fm_check_acc_handle(dev, dev->csr_handle) !=
		    DDI_FM_OK) {
			ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
			return (DDI_FAILURE);
		}
	}

	/* wait for FW ready */
	tmo = drv_usectohz(60000000); /* 1.0min */
	for (;;) {
		if ((ddi_get_lbolt() - earlier) > tmo) {
			tmo = 0;
			break;
		}

		post_status.dw0 = OCE_CSR_READ32(dev, MPU_EP_SEMAPHORE);
		if (oce_fm_check_acc_handle(dev, dev->csr_handle) !=
		    DDI_FM_OK) {
			ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
			return (DDI_FAILURE);
		}
		if (post_status.bits.error) {
			oce_log(dev, CE_WARN, MOD_CONFIG,
			    "0x%x POST ERROR!!", post_status.dw0);
			return (DDI_FAILURE);
		}
		if (post_status.bits.stage == POST_STAGE_ARMFW_READY)
			return (DDI_SUCCESS);

		drv_usecwait(100);
	}
	return (DDI_FAILURE);
} /* oce_POST */
Esempio n. 3
0
/*
 * Set the promiscuity of the device.
 */
int
igb_m_promisc(void *arg, boolean_t on)
{
	igb_t *igb = (igb_t *)arg;
	uint32_t reg_val;

	mutex_enter(&igb->gen_lock);

	if (igb->igb_state & IGB_SUSPENDED) {
		mutex_exit(&igb->gen_lock);
		return (ECANCELED);
	}

	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);

	if (on)
		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
	else
		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));

	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);

	mutex_exit(&igb->gen_lock);

	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
		return (EIO);
	}

	return (0);
}
Esempio n. 4
0
static void BnxeTimer(void * pArg)
{
    um_device_t * pUM = (um_device_t *)pArg;
    lm_device_t * pLM = &pUM->lm_dev;

    BNXE_LOCK_ENTER_TIMER(pUM);

    if (pUM->timerEnabled != B_TRUE)
    {
        BNXE_LOCK_EXIT_TIMER(pUM);
        return;
    }

    lm_stats_on_timer(pLM);

    if (pUM->fmCapabilities &&
        BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK)
    {
        ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_UNAFFECTED);
    }

    if (pUM->phyInitialized)
    {
        BNXE_LOCK_ENTER_PHY(pUM);
        lm_link_on_timer(pLM);
        BNXE_LOCK_EXIT_PHY(pUM);
    }

    pUM->timerID = timeout(BnxeTimer, (void *)pUM,
                           drv_usectohz(BNXE_TIMER_INTERVAL));

    BNXE_LOCK_EXIT_TIMER(pUM);
}
Esempio n. 5
0
/*
 * function to copy the packet to preallocated Tx buffer
 *
 * wq - pointer to WQ
 * wqed - Pointer to WQE descriptor
 * mp - Pointer to packet chain
 * pktlen - Size of the packet
 *
 * return 0=>success, error code otherwise
 */
static int
oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
    uint32_t pkt_len)
{
	oce_wq_bdesc_t *wqbd;
	caddr_t buf_va;
	struct oce_dev *dev = wq->parent;
	int len = 0;

	wqbd = oce_wqb_alloc(wq);
	if (wqbd == NULL) {
		atomic_inc_32(&dev->tx_noxmtbuf);
		oce_log(dev, CE_WARN, MOD_TX, "%s",
		    "wqb pool empty");
		return (ENOMEM);
	}

	/* create a fragment wqe for the packet */
	wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
	wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
	buf_va = DBUF_VA(wqbd->wqb);

	/* copy pkt into buffer */
	for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
		bcopy(mp->b_rptr, buf_va, MBLKL(mp));
		buf_va += MBLKL(mp);
		len += MBLKL(mp);
	}

	(void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
	    DDI_DMA_SYNC_FORDEV);

	if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
		/* Free the buffer */
		oce_wqb_free(wq, wqbd);
		return (EIO);
	}
	wqed->frag[wqed->frag_idx].u0.s.frag_len   =  pkt_len;
	wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
	wqed->hdesc[wqed->nhdl].type = COPY_WQE;
	wqed->frag_cnt++;
	wqed->frag_idx++;
	wqed->nhdl++;
	return (0);
} /* oce_bcopy_wqe */
Esempio n. 6
0
/*
 * function to map the device memory
 *
 * dev - handle to device private data structure
 *
 */
int
oce_pci_init(struct oce_dev *dev)
{
	int ret = 0;

	ret = oce_map_regs(dev);

	if (ret != DDI_SUCCESS) {
		return (DDI_FAILURE);
	}
	dev->fn =  OCE_PCI_FUNC(dev);
	if (oce_fm_check_acc_handle(dev, dev->dev_cfg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
	}

	if (ret != DDI_FM_OK) {
		oce_pci_fini(dev);
		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);
} /* oce_pci_init */
Esempio n. 7
0
/*
 * e1000g_receive - main receive routine
 *
 * This routine will process packets received in an interrupt
 */
mblk_t *
e1000g_receive(e1000g_rx_ring_t *rx_ring, mblk_t **tail, uint_t sz)
{
	struct e1000_hw *hw;
	mblk_t *nmp;
	mblk_t *ret_mp;
	mblk_t *ret_nmp;
	struct e1000_rx_desc *current_desc;
	struct e1000_rx_desc *last_desc;
	p_rx_sw_packet_t packet;
	p_rx_sw_packet_t newpkt;
	uint16_t length;
	uint32_t pkt_count;
	uint32_t desc_count;
	boolean_t accept_frame;
	boolean_t end_of_packet;
	boolean_t need_copy;
	struct e1000g *Adapter;
	dma_buffer_t *rx_buf;
	uint16_t cksumflags;
	uint_t chain_sz = 0;
	e1000g_rx_data_t *rx_data;
	uint32_t max_size;
	uint32_t min_size;

	ret_mp = NULL;
	ret_nmp = NULL;
	pkt_count = 0;
	desc_count = 0;
	cksumflags = 0;

	Adapter = rx_ring->adapter;
	rx_data = rx_ring->rx_data;
	hw = &Adapter->shared;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORKERNEL);

	if (e1000g_check_dma_handle(rx_data->rbd_dma_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
		return (NULL);
	}

	current_desc = rx_data->rbd_next;
	if (!(current_desc->status & E1000_RXD_STAT_DD)) {
		/*
		 * don't send anything up. just clear the RFD
		 */
		E1000G_DEBUG_STAT(rx_ring->stat_none);
		return (NULL);
	}

	max_size = Adapter->max_frame_size - ETHERFCSL - VLAN_TAGSZ;
	min_size = ETHERMIN;

	/*
	 * Loop through the receive descriptors starting at the last known
	 * descriptor owned by the hardware that begins a packet.
	 */
	while ((current_desc->status & E1000_RXD_STAT_DD) &&
	    (pkt_count < Adapter->rx_limit_onintr) &&
	    ((sz == E1000G_CHAIN_NO_LIMIT) || (chain_sz <= sz))) {

		desc_count++;
		/*
		 * Now this can happen in Jumbo frame situation.
		 */
		if (current_desc->status & E1000_RXD_STAT_EOP) {
			/* packet has EOP set */
			end_of_packet = B_TRUE;
		} else {
			/*
			 * If this received buffer does not have the
			 * End-Of-Packet bit set, the received packet
			 * will consume multiple buffers. We won't send this
			 * packet upstack till we get all the related buffers.
			 */
			end_of_packet = B_FALSE;
		}

		/*
		 * Get a pointer to the actual receive buffer
		 * The mp->b_rptr is mapped to The CurrentDescriptor
		 * Buffer Address.
		 */
		packet =
		    (p_rx_sw_packet_t)QUEUE_GET_HEAD(&rx_data->recv_list);
		ASSERT(packet != NULL);

		rx_buf = packet->rx_buf;

		length = current_desc->length;

#ifdef __sparc
		if (packet->dma_type == USE_DVMA)
			dvma_sync(rx_buf->dma_handle, 0,
			    DDI_DMA_SYNC_FORKERNEL);
		else
			(void) ddi_dma_sync(rx_buf->dma_handle,
			    E1000G_IPALIGNROOM, length,
			    DDI_DMA_SYNC_FORKERNEL);
#else
		(void) ddi_dma_sync(rx_buf->dma_handle,
		    E1000G_IPALIGNROOM, length,
		    DDI_DMA_SYNC_FORKERNEL);
#endif

		if (e1000g_check_dma_handle(
		    rx_buf->dma_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(Adapter->dip,
			    DDI_SERVICE_DEGRADED);
			Adapter->e1000g_state |= E1000G_ERROR;

			goto rx_drop;
		}

		accept_frame = (current_desc->errors == 0) ||
		    ((current_desc->errors &
		    (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) != 0);

		if (hw->mac.type == e1000_82543) {
			unsigned char last_byte;

			last_byte =
			    *((unsigned char *)rx_buf->address + length - 1);

			if (TBI_ACCEPT(hw,
			    current_desc->status, current_desc->errors,
			    current_desc->length, last_byte,
			    Adapter->min_frame_size, Adapter->max_frame_size)) {

				e1000_tbi_adjust_stats(Adapter,
				    length, hw->mac.addr);

				length--;
				accept_frame = B_TRUE;
			} else if (e1000_tbi_sbp_enabled_82543(hw) &&
			    (current_desc->errors == E1000_RXD_ERR_CE)) {
				accept_frame = B_TRUE;
			}
		}

		/*
		 * Indicate the packet to the NOS if it was good.
		 * Normally, hardware will discard bad packets for us.
		 * Check for the packet to be a valid Ethernet packet
		 */
		if (!accept_frame) {
			/*
			 * error in incoming packet, either the packet is not a
			 * ethernet size packet, or the packet has an error. In
			 * either case, the packet will simply be discarded.
			 */
			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
			    "Process Receive Interrupts: Error in Packet\n");

			E1000G_STAT(rx_ring->stat_error);
			/*
			 * Returning here as we are done here. There is
			 * no point in waiting for while loop to elapse
			 * and the things which were done. More efficient
			 * and less error prone...
			 */
			goto rx_drop;
		}

		/*
		 * If the Ethernet CRC is not stripped by the hardware,
		 * we need to strip it before sending it up to the stack.
		 */
		if (end_of_packet && !Adapter->strip_crc) {
			if (length > ETHERFCSL) {
				length -= ETHERFCSL;
			} else {
				/*
				 * If the fragment is smaller than the CRC,
				 * drop this fragment, do the processing of
				 * the end of the packet.
				 */
				ASSERT(rx_data->rx_mblk_tail != NULL);
				rx_data->rx_mblk_tail->b_wptr -=
				    ETHERFCSL - length;
				rx_data->rx_mblk_len -=
				    ETHERFCSL - length;

				QUEUE_POP_HEAD(&rx_data->recv_list);

				goto rx_end_of_packet;
			}
		}

		need_copy = B_TRUE;

		if (length <= Adapter->rx_bcopy_thresh)
			goto rx_copy;

		/*
		 * Get the pre-constructed mblk that was associated
		 * to the receive data buffer.
		 */
		if (packet->mp == NULL) {
			packet->mp = desballoc((unsigned char *)
			    rx_buf->address, length,
			    BPRI_MED, &packet->free_rtn);
		}

		if (packet->mp != NULL) {
			/*
			 * We have two sets of buffer pool. One associated with
			 * the Rxdescriptors and other a freelist buffer pool.
			 * Each time we get a good packet, Try to get a buffer
			 * from the freelist pool using e1000g_get_buf. If we
			 * get free buffer, then replace the descriptor buffer
			 * address with the free buffer we just got, and pass
			 * the pre-constructed mblk upstack. (note no copying)
			 *
			 * If we failed to get a free buffer, then try to
			 * allocate a new buffer(mp) and copy the recv buffer
			 * content to our newly allocated buffer(mp). Don't
			 * disturb the desriptor buffer address. (note copying)
			 */
			newpkt = e1000g_get_buf(rx_data);

			if (newpkt != NULL) {
				/*
				 * Get the mblk associated to the data,
				 * and strip it off the sw packet.
				 */
				nmp = packet->mp;
				packet->mp = NULL;
				atomic_inc_32(&packet->ref_cnt);

				/*
				 * Now replace old buffer with the new
				 * one we got from free list
				 * Both the RxSwPacket as well as the
				 * Receive Buffer Descriptor will now
				 * point to this new packet.
				 */
				packet = newpkt;

				current_desc->buffer_addr =
				    newpkt->rx_buf->dma_address;

				need_copy = B_FALSE;
			} else {
				/* EMPTY */
				E1000G_DEBUG_STAT(rx_ring->stat_no_freepkt);
			}
		}

rx_copy:
		if (need_copy) {
			/*
			 * No buffers available on free list,
			 * bcopy the data from the buffer and
			 * keep the original buffer. Dont want to
			 * do this.. Yack but no other way
			 */
			if ((nmp = allocb(length + E1000G_IPALIGNROOM,
			    BPRI_MED)) == NULL) {
				/*
				 * The system has no buffers available
				 * to send up the incoming packet, hence
				 * the packet will have to be processed
				 * when there're more buffers available.
				 */
				E1000G_STAT(rx_ring->stat_allocb_fail);
				goto rx_drop;
			}
			nmp->b_rptr += E1000G_IPALIGNROOM;
			nmp->b_wptr += E1000G_IPALIGNROOM;
			/*
			 * The free list did not have any buffers
			 * available, so, the received packet will
			 * have to be copied into a mp and the original
			 * buffer will have to be retained for future
			 * packet reception.
			 */
			bcopy(rx_buf->address, nmp->b_wptr, length);
		}

		/*
		 * The rx_sw_packet MUST be popped off the
		 * RxSwPacketList before either a putnext or freemsg
		 * is done on the mp that has now been created by the
		 * desballoc. If not, it is possible that the free
		 * routine will get called from the interrupt context
		 * and try to put this packet on the free list
		 */
		(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

		ASSERT(nmp != NULL);
		nmp->b_wptr += length;

		if (rx_data->rx_mblk == NULL) {
			/*
			 *  TCP/UDP checksum offload and
			 *  IP checksum offload
			 */
			if (!(current_desc->status & E1000_RXD_STAT_IXSM)) {
				/*
				 * Check TCP/UDP checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_TCPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_TCPE))
					cksumflags |= HCK_FULLCKSUM |
					    HCK_FULLCKSUM_OK;
				/*
				 * Check IP Checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_IPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_IPE))
					cksumflags |= HCK_IPV4_HDRCKSUM;
			}
		}

		/*
		 * We need to maintain our packet chain in the global
		 * Adapter structure, for the Rx processing can end
		 * with a fragment that has no EOP set.
		 */
		if (rx_data->rx_mblk == NULL) {
			/* Get the head of the message chain */
			rx_data->rx_mblk = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len = length;
		} else {	/* Not the first packet */
			/* Continue adding buffers */
			rx_data->rx_mblk_tail->b_cont = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len += length;
		}
		ASSERT(rx_data->rx_mblk != NULL);
		ASSERT(rx_data->rx_mblk_tail != NULL);
		ASSERT(rx_data->rx_mblk_tail->b_cont == NULL);

		/*
		 * Now this MP is ready to travel upwards but some more
		 * fragments are coming.
		 * We will send packet upwards as soon as we get EOP
		 * set on the packet.
		 */
		if (!end_of_packet) {
			/*
			 * continue to get the next descriptor,
			 * Tail would be advanced at the end
			 */
			goto rx_next_desc;
		}

rx_end_of_packet:
		if (E1000G_IS_VLAN_PACKET(rx_data->rx_mblk->b_rptr))
			max_size = Adapter->max_frame_size - ETHERFCSL;

		if ((rx_data->rx_mblk_len > max_size) ||
		    (rx_data->rx_mblk_len < min_size)) {
			E1000G_STAT(rx_ring->stat_size_error);
			goto rx_drop;
		}

		/*
		 * Found packet with EOP
		 * Process the last fragment.
		 */
		if (cksumflags != 0) {
			(void) hcksum_assoc(rx_data->rx_mblk,
			    NULL, NULL, 0, 0, 0, 0, cksumflags, 0);
			cksumflags = 0;
		}

		/*
		 * Count packets that span multi-descriptors
		 */
		E1000G_DEBUG_STAT_COND(rx_ring->stat_multi_desc,
		    (rx_data->rx_mblk->b_cont != NULL));

		/*
		 * Append to list to send upstream
		 */
		if (ret_mp == NULL) {
			ret_mp = ret_nmp = rx_data->rx_mblk;
		} else {
			ret_nmp->b_next = rx_data->rx_mblk;
			ret_nmp = rx_data->rx_mblk;
		}
		ret_nmp->b_next = NULL;
		*tail = ret_nmp;
		chain_sz += length;

		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;

		pkt_count++;

rx_next_desc:
		/*
		 * Zero out the receive descriptors status
		 */
		current_desc->status = 0;

		if (current_desc == rx_data->rbd_last)
			rx_data->rbd_next = rx_data->rbd_first;
		else
			rx_data->rbd_next++;

		last_desc = current_desc;
		current_desc = rx_data->rbd_next;

		/*
		 * Put the buffer that we just indicated back
		 * at the end of our list
		 */
		QUEUE_PUSH_TAIL(&rx_data->recv_list,
		    &packet->Link);
	}	/* while loop */

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	Adapter->rx_pkt_cnt = pkt_count;

	return (ret_mp);

rx_drop:
	/*
	 * Zero out the receive descriptors status
	 */
	current_desc->status = 0;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	if (current_desc == rx_data->rbd_last)
		rx_data->rbd_next = rx_data->rbd_first;
	else
		rx_data->rbd_next++;

	last_desc = current_desc;

	(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

	QUEUE_PUSH_TAIL(&rx_data->recv_list, &packet->Link);
	/*
	 * Reclaim all old buffers already allocated during
	 * Jumbo receives.....for incomplete reception
	 */
	if (rx_data->rx_mblk != NULL) {
		freemsg(rx_data->rx_mblk);
		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;
	}
	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	return (ret_mp);
}
Esempio n. 8
0
/*
 * Update driver private statistics.
 */
static int
igb_update_stats(kstat_t *ks, int rw)
{
	igb_t *igb;
	struct e1000_hw *hw;
	igb_stat_t *igb_ks;
	uint32_t val_low, val_high;
#ifdef IGB_DEBUG
	int i;
#endif

	if (rw == KSTAT_WRITE)
		return (EACCES);

	igb = (igb_t *)ks->ks_private;
	igb_ks = (igb_stat_t *)ks->ks_data;
	hw = &igb->hw;

	mutex_enter(&igb->gen_lock);

	/*
	 * Basic information.
	 */
	igb_ks->reset_count.value.ui64 = igb->reset_count;
	igb_ks->dout_sync.value.ui64 = igb->dout_sync;

#ifdef IGB_DEBUG
	igb_ks->rx_frame_error.value.ui64 = 0;
	igb_ks->rx_cksum_error.value.ui64 = 0;
	igb_ks->rx_exceed_pkt.value.ui64 = 0;
	for (i = 0; i < igb->num_rx_rings; i++) {
		igb_ks->rx_frame_error.value.ui64 +=
		    igb->rx_rings[i].stat_frame_error;
		igb_ks->rx_cksum_error.value.ui64 +=
		    igb->rx_rings[i].stat_cksum_error;
		igb_ks->rx_exceed_pkt.value.ui64 +=
		    igb->rx_rings[i].stat_exceed_pkt;
	}

	igb_ks->tx_overload.value.ui64 = 0;
	igb_ks->tx_fail_no_tbd.value.ui64 = 0;
	igb_ks->tx_fail_no_tcb.value.ui64 = 0;
	igb_ks->tx_fail_dma_bind.value.ui64 = 0;
	igb_ks->tx_reschedule.value.ui64 = 0;
	for (i = 0; i < igb->num_tx_rings; i++) {
		igb_ks->tx_overload.value.ui64 +=
		    igb->tx_rings[i].stat_overload;
		igb_ks->tx_fail_no_tbd.value.ui64 +=
		    igb->tx_rings[i].stat_fail_no_tbd;
		igb_ks->tx_fail_no_tcb.value.ui64 +=
		    igb->tx_rings[i].stat_fail_no_tcb;
		igb_ks->tx_fail_dma_bind.value.ui64 +=
		    igb->tx_rings[i].stat_fail_dma_bind;
		igb_ks->tx_reschedule.value.ui64 +=
		    igb->tx_rings[i].stat_reschedule;
	}

	/*
	 * Hardware calculated statistics.
	 */
	igb_ks->gprc.value.ul += E1000_READ_REG(hw, E1000_GPRC);
	igb_ks->gptc.value.ul += E1000_READ_REG(hw, E1000_GPTC);
	igb_ks->prc64.value.ul += E1000_READ_REG(hw, E1000_PRC64);
	igb_ks->prc127.value.ul += E1000_READ_REG(hw, E1000_PRC127);
	igb_ks->prc255.value.ul += E1000_READ_REG(hw, E1000_PRC255);
	igb_ks->prc511.value.ul += E1000_READ_REG(hw, E1000_PRC511);
	igb_ks->prc1023.value.ul += E1000_READ_REG(hw, E1000_PRC1023);
	igb_ks->prc1522.value.ul += E1000_READ_REG(hw, E1000_PRC1522);
	igb_ks->ptc64.value.ul += E1000_READ_REG(hw, E1000_PTC64);
	igb_ks->ptc127.value.ul += E1000_READ_REG(hw, E1000_PTC127);
	igb_ks->ptc255.value.ul += E1000_READ_REG(hw, E1000_PTC255);
	igb_ks->ptc511.value.ul += E1000_READ_REG(hw, E1000_PTC511);
	igb_ks->ptc1023.value.ul += E1000_READ_REG(hw, E1000_PTC1023);
	igb_ks->ptc1522.value.ul += E1000_READ_REG(hw, E1000_PTC1522);

	/*
	 * The 64-bit register will reset whenever the upper
	 * 32 bits are read. So we need to read the lower
	 * 32 bits first, then read the upper 32 bits.
	 */
	val_low = E1000_READ_REG(hw, E1000_GORCL);
	val_high = E1000_READ_REG(hw, E1000_GORCH);
	igb_ks->gor.value.ui64 += (uint64_t)val_high << 32 | (uint64_t)val_low;

	val_low = E1000_READ_REG(hw, E1000_GOTCL);
	val_high = E1000_READ_REG(hw, E1000_GOTCH);
	igb_ks->got.value.ui64 += (uint64_t)val_high << 32 | (uint64_t)val_low;
#endif
	igb_ks->symerrs.value.ui64 += E1000_READ_REG(hw, E1000_SYMERRS);
	igb_ks->mpc.value.ui64 += E1000_READ_REG(hw, E1000_MPC);
	igb_ks->rlec.value.ui64 += E1000_READ_REG(hw, E1000_RLEC);
	igb_ks->fcruc.value.ui64 += E1000_READ_REG(hw, E1000_FCRUC);
	igb_ks->rfc.value.ul += E1000_READ_REG(hw, E1000_RFC);
	igb_ks->tncrs.value.ul += E1000_READ_REG(hw, E1000_TNCRS);
	igb_ks->tsctc.value.ul += E1000_READ_REG(hw, E1000_TSCTC);
	igb_ks->tsctfc.value.ul += E1000_READ_REG(hw, E1000_TSCTFC);
	igb_ks->xonrxc.value.ui64 += E1000_READ_REG(hw, E1000_XONRXC);
	igb_ks->xontxc.value.ui64 += E1000_READ_REG(hw, E1000_XONTXC);
	igb_ks->xoffrxc.value.ui64 += E1000_READ_REG(hw, E1000_XOFFRXC);
	igb_ks->xofftxc.value.ui64 += E1000_READ_REG(hw, E1000_XOFFTXC);

	mutex_exit(&igb->gen_lock);

	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
		return (EIO);
	}

	return (0);
}
Esempio n. 9
0
int
igb_m_stat(void *arg, uint_t stat, uint64_t *val)
{
	igb_t *igb = (igb_t *)arg;
	struct e1000_hw *hw = &igb->hw;
	igb_stat_t *igb_ks;
	uint32_t low_val, high_val;

	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;

	mutex_enter(&igb->gen_lock);

	if (igb->igb_state & IGB_SUSPENDED) {
		mutex_exit(&igb->gen_lock);
		return (ECANCELED);
	}

	switch (stat) {
	case MAC_STAT_IFSPEED:
		*val = igb->link_speed * 1000000ull;
		break;

	case MAC_STAT_MULTIRCV:
		igb_ks->mprc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_MPRC);
		*val = igb_ks->mprc.value.ui64;
		break;

	case MAC_STAT_BRDCSTRCV:
		igb_ks->bprc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_BPRC);
		*val = igb_ks->bprc.value.ui64;
		break;

	case MAC_STAT_MULTIXMT:
		igb_ks->mptc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_MPTC);
		*val = igb_ks->mptc.value.ui64;
		break;

	case MAC_STAT_BRDCSTXMT:
		igb_ks->bptc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_BPTC);
		*val = igb_ks->bptc.value.ui64;
		break;

	case MAC_STAT_NORCVBUF:
		igb_ks->rnbc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_RNBC);
		*val = igb_ks->rnbc.value.ui64;
		break;

	case MAC_STAT_IERRORS:
		igb_ks->rxerrc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_RXERRC);
		igb_ks->algnerrc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_ALGNERRC);
		igb_ks->rlec.value.ui64 +=
		    E1000_READ_REG(hw, E1000_RLEC);
		igb_ks->crcerrs.value.ui64 +=
		    E1000_READ_REG(hw, E1000_CRCERRS);
		igb_ks->cexterr.value.ui64 +=
		    E1000_READ_REG(hw, E1000_CEXTERR);
		*val = igb_ks->rxerrc.value.ui64 +
		    igb_ks->algnerrc.value.ui64 +
		    igb_ks->rlec.value.ui64 +
		    igb_ks->crcerrs.value.ui64 +
		    igb_ks->cexterr.value.ui64;
		break;

	case MAC_STAT_NOXMTBUF:
		*val = 0;
		break;

	case MAC_STAT_OERRORS:
		igb_ks->ecol.value.ui64 +=
		    E1000_READ_REG(hw, E1000_ECOL);
		*val = igb_ks->ecol.value.ui64;
		break;

	case MAC_STAT_COLLISIONS:
		igb_ks->colc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_COLC);
		*val = igb_ks->colc.value.ui64;
		break;

	case MAC_STAT_RBYTES:
		/*
		 * The 64-bit register will reset whenever the upper
		 * 32 bits are read. So we need to read the lower
		 * 32 bits first, then read the upper 32 bits.
		 */
		low_val = E1000_READ_REG(hw, E1000_TORL);
		high_val = E1000_READ_REG(hw, E1000_TORH);
		igb_ks->tor.value.ui64 +=
		    (uint64_t)high_val << 32 | (uint64_t)low_val;
		*val = igb_ks->tor.value.ui64;
		break;

	case MAC_STAT_IPACKETS:
		igb_ks->tpr.value.ui64 +=
		    E1000_READ_REG(hw, E1000_TPR);
		*val = igb_ks->tpr.value.ui64;
		break;

	case MAC_STAT_OBYTES:
		/*
		 * The 64-bit register will reset whenever the upper
		 * 32 bits are read. So we need to read the lower
		 * 32 bits first, then read the upper 32 bits.
		 */
		low_val = E1000_READ_REG(hw, E1000_TOTL);
		high_val = E1000_READ_REG(hw, E1000_TOTH);
		igb_ks->tot.value.ui64 +=
		    (uint64_t)high_val << 32 | (uint64_t)low_val;
		*val = igb_ks->tot.value.ui64;
		break;

	case MAC_STAT_OPACKETS:
		igb_ks->tpt.value.ui64 +=
		    E1000_READ_REG(hw, E1000_TPT);
		*val = igb_ks->tpt.value.ui64;
		break;

	/* RFC 1643 stats */
	case ETHER_STAT_ALIGN_ERRORS:
		igb_ks->algnerrc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_ALGNERRC);
		*val = igb_ks->algnerrc.value.ui64;
		break;

	case ETHER_STAT_FCS_ERRORS:
		igb_ks->crcerrs.value.ui64 +=
		    E1000_READ_REG(hw, E1000_CRCERRS);
		*val = igb_ks->crcerrs.value.ui64;
		break;

	case ETHER_STAT_FIRST_COLLISIONS:
		igb_ks->scc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_SCC);
		*val = igb_ks->scc.value.ui64;
		break;

	case ETHER_STAT_MULTI_COLLISIONS:
		igb_ks->mcc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_MCC);
		*val = igb_ks->mcc.value.ui64;
		break;

	case ETHER_STAT_SQE_ERRORS:
		igb_ks->sec.value.ui64 +=
		    E1000_READ_REG(hw, E1000_SEC);
		*val = igb_ks->sec.value.ui64;
		break;

	case ETHER_STAT_DEFER_XMTS:
		igb_ks->dc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_DC);
		*val = igb_ks->dc.value.ui64;
		break;

	case ETHER_STAT_TX_LATE_COLLISIONS:
		igb_ks->latecol.value.ui64 +=
		    E1000_READ_REG(hw, E1000_LATECOL);
		*val = igb_ks->latecol.value.ui64;
		break;

	case ETHER_STAT_EX_COLLISIONS:
		igb_ks->ecol.value.ui64 +=
		    E1000_READ_REG(hw, E1000_ECOL);
		*val = igb_ks->ecol.value.ui64;
		break;

	case ETHER_STAT_MACXMT_ERRORS:
		igb_ks->ecol.value.ui64 +=
		    E1000_READ_REG(hw, E1000_ECOL);
		*val = igb_ks->ecol.value.ui64;
		break;

	case ETHER_STAT_CARRIER_ERRORS:
		igb_ks->cexterr.value.ui64 +=
		    E1000_READ_REG(hw, E1000_CEXTERR);
		*val = igb_ks->cexterr.value.ui64;
		break;

	case ETHER_STAT_TOOLONG_ERRORS:
		igb_ks->roc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_ROC);
		*val = igb_ks->roc.value.ui64;
		break;

	case ETHER_STAT_MACRCV_ERRORS:
		igb_ks->rxerrc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_RXERRC);
		*val = igb_ks->rxerrc.value.ui64;
		break;

	/* MII/GMII stats */
	case ETHER_STAT_XCVR_ADDR:
		/* The Internal PHY's MDI address for each MAC is 1 */
		*val = 1;
		break;

	case ETHER_STAT_XCVR_ID:
		*val = hw->phy.id | hw->phy.revision;
		break;

	case ETHER_STAT_XCVR_INUSE:
		switch (igb->link_speed) {
		case SPEED_1000:
			*val =
			    (hw->phy.media_type == e1000_media_type_copper) ?
			    XCVR_1000T : XCVR_1000X;
			break;
		case SPEED_100:
			*val =
			    (hw->phy.media_type == e1000_media_type_copper) ?
			    (igb->param_100t4_cap == 1) ?
			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
			break;
		case SPEED_10:
			*val = XCVR_10;
			break;
		default:
			*val = XCVR_NONE;
			break;
		}
		break;

	case ETHER_STAT_CAP_1000FDX:
		*val = igb->param_1000fdx_cap;
		break;

	case ETHER_STAT_CAP_1000HDX:
		*val = igb->param_1000hdx_cap;
		break;

	case ETHER_STAT_CAP_100FDX:
		*val = igb->param_100fdx_cap;
		break;

	case ETHER_STAT_CAP_100HDX:
		*val = igb->param_100hdx_cap;
		break;

	case ETHER_STAT_CAP_10FDX:
		*val = igb->param_10fdx_cap;
		break;

	case ETHER_STAT_CAP_10HDX:
		*val = igb->param_10hdx_cap;
		break;

	case ETHER_STAT_CAP_ASMPAUSE:
		*val = igb->param_asym_pause_cap;
		break;

	case ETHER_STAT_CAP_PAUSE:
		*val = igb->param_pause_cap;
		break;

	case ETHER_STAT_CAP_AUTONEG:
		*val = igb->param_autoneg_cap;
		break;

	case ETHER_STAT_ADV_CAP_1000FDX:
		*val = igb->param_adv_1000fdx_cap;
		break;

	case ETHER_STAT_ADV_CAP_1000HDX:
		*val = igb->param_adv_1000hdx_cap;
		break;

	case ETHER_STAT_ADV_CAP_100FDX:
		*val = igb->param_adv_100fdx_cap;
		break;

	case ETHER_STAT_ADV_CAP_100HDX:
		*val = igb->param_adv_100hdx_cap;
		break;

	case ETHER_STAT_ADV_CAP_10FDX:
		*val = igb->param_adv_10fdx_cap;
		break;

	case ETHER_STAT_ADV_CAP_10HDX:
		*val = igb->param_adv_10hdx_cap;
		break;

	case ETHER_STAT_ADV_CAP_ASMPAUSE:
		*val = igb->param_adv_asym_pause_cap;
		break;

	case ETHER_STAT_ADV_CAP_PAUSE:
		*val = igb->param_adv_pause_cap;
		break;

	case ETHER_STAT_ADV_CAP_AUTONEG:
		*val = hw->mac.autoneg;
		break;

	case ETHER_STAT_LP_CAP_1000FDX:
		*val = igb->param_lp_1000fdx_cap;
		break;

	case ETHER_STAT_LP_CAP_1000HDX:
		*val = igb->param_lp_1000hdx_cap;
		break;

	case ETHER_STAT_LP_CAP_100FDX:
		*val = igb->param_lp_100fdx_cap;
		break;

	case ETHER_STAT_LP_CAP_100HDX:
		*val = igb->param_lp_100hdx_cap;
		break;

	case ETHER_STAT_LP_CAP_10FDX:
		*val = igb->param_lp_10fdx_cap;
		break;

	case ETHER_STAT_LP_CAP_10HDX:
		*val = igb->param_lp_10hdx_cap;
		break;

	case ETHER_STAT_LP_CAP_ASMPAUSE:
		*val = igb->param_lp_asym_pause_cap;
		break;

	case ETHER_STAT_LP_CAP_PAUSE:
		*val = igb->param_lp_pause_cap;
		break;

	case ETHER_STAT_LP_CAP_AUTONEG:
		*val = igb->param_lp_autoneg_cap;
		break;

	case ETHER_STAT_LINK_ASMPAUSE:
		*val = igb->param_asym_pause_cap;
		break;

	case ETHER_STAT_LINK_PAUSE:
		*val = igb->param_pause_cap;
		break;

	case ETHER_STAT_LINK_AUTONEG:
		*val = hw->mac.autoneg;
		break;

	case ETHER_STAT_LINK_DUPLEX:
		*val = (igb->link_duplex == FULL_DUPLEX) ?
		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
		break;

	case ETHER_STAT_TOOSHORT_ERRORS:
		igb_ks->ruc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_RUC);
		*val = igb_ks->ruc.value.ui64;
		break;

	case ETHER_STAT_CAP_REMFAULT:
		*val = igb->param_rem_fault;
		break;

	case ETHER_STAT_ADV_REMFAULT:
		*val = igb->param_adv_rem_fault;
		break;

	case ETHER_STAT_LP_REMFAULT:
		*val = igb->param_lp_rem_fault;
		break;

	case ETHER_STAT_JABBER_ERRORS:
		igb_ks->rjc.value.ui64 +=
		    E1000_READ_REG(hw, E1000_RJC);
		*val = igb_ks->rjc.value.ui64;
		break;

	case ETHER_STAT_CAP_100T4:
		*val = igb->param_100t4_cap;
		break;

	case ETHER_STAT_ADV_CAP_100T4:
		*val = igb->param_adv_100t4_cap;
		break;

	case ETHER_STAT_LP_CAP_100T4:
		*val = igb->param_lp_100t4_cap;
		break;

	default:
		mutex_exit(&igb->gen_lock);
		return (ENOTSUP);
	}

	mutex_exit(&igb->gen_lock);

	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
		return (EIO);
	}

	return (0);
}
Esempio n. 10
0
/*
 * function to xmit  Single packet over the wire
 *
 * wq - pointer to WQ
 * mp - Pointer to packet chain
 *
 * return pointer to the packet
 */
mblk_t *
oce_send_packet(struct oce_wq *wq, mblk_t *mp)
{
	struct oce_nic_hdr_wqe *wqeh;
	struct oce_dev *dev;
	struct ether_header *eh;
	struct ether_vlan_header *evh;
	int32_t num_wqes;
	uint16_t etype;
	uint32_t ip_offset;
	uint32_t csum_flags = 0;
	boolean_t use_copy = B_FALSE;
	boolean_t tagged   = B_FALSE;
	uint16_t  vlan_tag;
	uint32_t  reg_value = 0;
	oce_wqe_desc_t *wqed = NULL;
	mblk_t *nmp = NULL;
	mblk_t *tmp = NULL;
	uint32_t pkt_len = 0;
	int num_mblks = 0;
	int ret = 0;
	uint32_t mss = 0;
	uint32_t flags = 0;
	int len = 0;

	/* retrieve the adap priv struct ptr */
	dev = wq->parent;

	/* check if we have enough free slots */
	if (wq->wq_free < dev->tx_reclaim_threshold) {
		(void) oce_process_tx_compl(wq, B_FALSE);
	}
	if (wq->wq_free < OCE_MAX_TX_HDL) {
		return (mp);
	}

	/* check if we should copy */
	for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
		pkt_len += MBLKL(tmp);
		num_mblks++;
	}

	if (pkt_len == 0 || num_mblks == 0) {
		freemsg(mp);
		return (NULL);
	}

	/* retrieve LSO information */
	mac_lso_get(mp, &mss, &flags);

	/* get the offload flags */
	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);

	/* restrict the mapped segment to wat we support */
	if (num_mblks  > OCE_MAX_TX_HDL) {
		nmp = msgpullup(mp, -1);
		if (nmp == NULL) {
			atomic_inc_32(&wq->pkt_drops);
			freemsg(mp);
			return (NULL);
		}
		/* Reset it to new collapsed mp */
		freemsg(mp);
		mp = nmp;
	}

	/* Get the packet descriptor for Tx */
	wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
	if (wqed == NULL) {
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}
	eh = (struct ether_header *)(void *)mp->b_rptr;
	if (ntohs(eh->ether_type) == VLAN_TPID) {
		evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
		tagged = B_TRUE;
		etype = ntohs(evh->ether_type);
		ip_offset = sizeof (struct ether_vlan_header);
		pkt_len -= VTAG_SIZE;
		vlan_tag = ntohs(evh->ether_tci);
		oce_remove_vtag(mp);
	} else {
		etype = ntohs(eh->ether_type);
		ip_offset = sizeof (struct ether_header);
	}

	/* Save the WQ pointer */
	wqed->wq = wq;
	wqed->frag_idx = 1; /* index zero is always header */
	wqed->frag_cnt = 0;
	wqed->nhdl = 0;
	wqed->mp = NULL;
	OCE_LIST_LINK_INIT(&wqed->link);

	/* If entire packet is less than the copy limit  just do copy */
	if (pkt_len < dev->tx_bcopy_limit) {
		use_copy = B_TRUE;
		ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
	} else {
		/* copy or dma map the individual fragments */
		for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
			len = MBLKL(nmp);
			if (len == 0) {
				continue;
			}
			if (len < dev->tx_bcopy_limit) {
				ret = oce_bcopy_wqe(wq, wqed, nmp, len);
			} else {
				ret = oce_map_wqe(wq, wqed, nmp, len);
			}
			if (ret != 0)
				break;
		}
	}

	/*
	 * Any failure other than insufficient Q entries
	 * drop the packet
	 */
	if (ret != 0) {
		oce_free_wqed(wq, wqed);
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}

	wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
	bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));

	/* fill rest of wqe header fields based on packet */
	if (flags & HW_LSO) {
		wqeh->u0.s.lso = B_TRUE;
		wqeh->u0.s.lso_mss = mss;
	}
	if (csum_flags & HCK_FULLCKSUM) {
		uint8_t *proto;
		if (etype == ETHERTYPE_IP) {
			proto = (uint8_t *)(void *)
			    (mp->b_rptr + ip_offset);
			if (proto[9] == 6)
				/* IPPROTO_TCP */
				wqeh->u0.s.tcpcs = B_TRUE;
			else if (proto[9] == 17)
				/* IPPROTO_UDP */
				wqeh->u0.s.udpcs = B_TRUE;
		}
	}

	if (csum_flags & HCK_IPV4_HDRCKSUM)
		wqeh->u0.s.ipcs = B_TRUE;
	if (tagged) {
		wqeh->u0.s.vlan = B_TRUE;
		wqeh->u0.s.vlan_tag = vlan_tag;
	}

	wqeh->u0.s.complete = B_TRUE;
	wqeh->u0.s.event = B_TRUE;
	wqeh->u0.s.crc = B_TRUE;
	wqeh->u0.s.total_length = pkt_len;

	num_wqes = wqed->frag_cnt + 1;

	/* h/w expects even no. of WQEs */
	if (num_wqes & 0x1) {
		bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
		num_wqes++;
	}
	wqed->wqe_cnt = (uint16_t)num_wqes;
	wqeh->u0.s.num_wqe = num_wqes;
	DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));

	mutex_enter(&wq->tx_lock);
	if (num_wqes > wq->wq_free) {
		atomic_inc_32(&wq->tx_deferd);
		mutex_exit(&wq->tx_lock);
		goto wqe_fail;
	}
	atomic_add_32(&wq->wq_free, -num_wqes);

	/* fill the wq for adapter */
	oce_fill_ring_descs(wq, wqed);

	/* Set the mp pointer in the wqe descriptor */
	if (use_copy == B_FALSE) {
		wqed->mp = mp;
	}
	/* Add the packet desc to list to be retrieved during cmpl */
	OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list,  wqed);
	(void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORDEV);

	/* ring tx doorbell */
	reg_value = (num_wqes << 16) | wq->wq_id;
	/* Ring the door bell  */
	OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
	mutex_exit(&wq->tx_lock);
	if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
	}

	/* free mp if copied or packet chain collapsed */
	if (use_copy == B_TRUE) {
		freemsg(mp);
	}
	return (NULL);

wqe_fail:

	if (tagged) {
		oce_insert_vtag(mp, vlan_tag);
	}
	oce_free_wqed(wq, wqed);
	return (mp);
} /* oce_send_packet */
Esempio n. 11
0
int
bge_m_stat(void *arg, uint_t stat, uint64_t *val)
{
	bge_t *bgep = arg;
	bge_statistics_t *bstp;
	bge_statistics_reg_t *pstats;

	if (bgep->bge_chip_state != BGE_CHIP_RUNNING) {
		return (EINVAL);
	}

	if (bgep->chipid.statistic_type == BGE_STAT_BLK)
		bstp = DMA_VPTR(bgep->statistics);
	else {
		pstats = bgep->pstats;
		pstats->ifHCOutOctets +=
		    bge_reg_get32(bgep, STAT_IFHCOUT_OCTETS_REG);
		pstats->etherStatsCollisions +=
		    bge_reg_get32(bgep, STAT_ETHER_COLLIS_REG);
		pstats->outXonSent +=
		    bge_reg_get32(bgep, STAT_OUTXON_SENT_REG);
		pstats->outXoffSent +=
		    bge_reg_get32(bgep, STAT_OUTXOFF_SENT_REG);
		pstats->dot3StatsInternalMacTransmitErrors +=
		    bge_reg_get32(bgep, STAT_DOT3_INTMACTX_ERR_REG);
		pstats->dot3StatsSingleCollisionFrames +=
		    bge_reg_get32(bgep, STAT_DOT3_SCOLLI_FRAME_REG);
		pstats->dot3StatsMultipleCollisionFrames +=
		    bge_reg_get32(bgep, STAT_DOT3_MCOLLI_FRAME_REG);
		pstats->dot3StatsDeferredTransmissions +=
		    bge_reg_get32(bgep, STAT_DOT3_DEFERED_TX_REG);
		pstats->dot3StatsExcessiveCollisions +=
		    bge_reg_get32(bgep, STAT_DOT3_EXCE_COLLI_REG);
		pstats->dot3StatsLateCollisions +=
		    bge_reg_get32(bgep, STAT_DOT3_LATE_COLLI_REG);
		pstats->ifHCOutUcastPkts +=
		    bge_reg_get32(bgep, STAT_IFHCOUT_UPKGS_REG);
		pstats->ifHCOutMulticastPkts +=
		    bge_reg_get32(bgep, STAT_IFHCOUT_MPKGS_REG);
		pstats->ifHCOutBroadcastPkts +=
		    bge_reg_get32(bgep, STAT_IFHCOUT_BPKGS_REG);
		pstats->ifHCInOctets +=
		    bge_reg_get32(bgep, STAT_IFHCIN_OCTETS_REG);
		pstats->etherStatsFragments +=
		    bge_reg_get32(bgep, STAT_ETHER_FRAGMENT_REG);
		pstats->ifHCInUcastPkts +=
		    bge_reg_get32(bgep, STAT_IFHCIN_UPKGS_REG);
		pstats->ifHCInMulticastPkts +=
		    bge_reg_get32(bgep, STAT_IFHCIN_MPKGS_REG);
		pstats->ifHCInBroadcastPkts +=
		    bge_reg_get32(bgep, STAT_IFHCIN_BPKGS_REG);
		pstats->dot3StatsFCSErrors +=
		    bge_reg_get32(bgep, STAT_DOT3_FCS_ERR_REG);
		pstats->dot3StatsAlignmentErrors +=
		    bge_reg_get32(bgep, STAT_DOT3_ALIGN_ERR_REG);
		pstats->xonPauseFramesReceived +=
		    bge_reg_get32(bgep, STAT_XON_PAUSE_RX_REG);
		pstats->xoffPauseFramesReceived +=
		    bge_reg_get32(bgep, STAT_XOFF_PAUSE_RX_REG);
		pstats->macControlFramesReceived +=
		    bge_reg_get32(bgep, STAT_MAC_CTRL_RX_REG);
		pstats->xoffStateEntered +=
		    bge_reg_get32(bgep, STAT_XOFF_STATE_ENTER_REG);
		pstats->dot3StatsFrameTooLongs +=
		    bge_reg_get32(bgep, STAT_DOT3_FRAME_TOOLONG_REG);
		pstats->etherStatsJabbers +=
		    bge_reg_get32(bgep, STAT_ETHER_JABBERS_REG);
		pstats->etherStatsUndersizePkts +=
		    bge_reg_get32(bgep, STAT_ETHER_UNDERSIZE_REG);
		mutex_enter(bgep->genlock);
		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(bgep->devinfo,
			    DDI_SERVICE_UNAFFECTED);
		}
		mutex_exit(bgep->genlock);
	}

	switch (stat) {
	case MAC_STAT_IFSPEED:
		*val = (bgep->link_state != LINK_STATE_UNKNOWN) ?
		           (bgep->param_link_speed * 1000000ull) : 0;
		break;

	case MAC_STAT_MULTIRCV:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCInMulticastPkts;
		else
			*val = pstats->ifHCInMulticastPkts;
		break;

	case MAC_STAT_BRDCSTRCV:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCInBroadcastPkts;
		else
			*val = pstats->ifHCInBroadcastPkts;
		break;

	case MAC_STAT_MULTIXMT:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCOutMulticastPkts;
		else
			*val = pstats->ifHCOutMulticastPkts;
		break;

	case MAC_STAT_BRDCSTXMT:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCOutBroadcastPkts;
		else
			*val = pstats->ifHCOutBroadcastPkts;
		break;

	case MAC_STAT_NORCVBUF:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifInDiscards;
		else
			*val = 0;
		break;

	case MAC_STAT_IERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK) {
			*val = bstp->s.dot3StatsFCSErrors +
			    bstp->s.dot3StatsAlignmentErrors +
			    bstp->s.dot3StatsFrameTooLongs +
			    bstp->s.etherStatsUndersizePkts +
			    bstp->s.etherStatsJabbers;
		} else {
			*val = pstats->dot3StatsFCSErrors +
			    pstats->dot3StatsAlignmentErrors +
			    pstats->dot3StatsFrameTooLongs +
			    pstats->etherStatsUndersizePkts +
			    pstats->etherStatsJabbers;
		}
		break;

	case MAC_STAT_NOXMTBUF:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifOutDiscards;
		else
			*val = 0;
		break;

	case MAC_STAT_OERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifOutDiscards;
		else
			*val = 0;
		break;

	case MAC_STAT_COLLISIONS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.etherStatsCollisions;
		else
			*val = pstats->etherStatsCollisions;
		break;

	case MAC_STAT_RBYTES:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCInOctets;
		else
			*val = pstats->ifHCInOctets;
		break;

	case MAC_STAT_IPACKETS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCInUcastPkts +
			    bstp->s.ifHCInMulticastPkts +
			    bstp->s.ifHCInBroadcastPkts;
		else
			*val = pstats->ifHCInUcastPkts +
			    pstats->ifHCInMulticastPkts +
			    pstats->ifHCInBroadcastPkts;
		break;

	case MAC_STAT_OBYTES:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCOutOctets;
		else
			*val = pstats->ifHCOutOctets;
		break;

	case MAC_STAT_OPACKETS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.ifHCOutUcastPkts +
			    bstp->s.ifHCOutMulticastPkts +
			    bstp->s.ifHCOutBroadcastPkts;
		else
			*val = pstats->ifHCOutUcastPkts +
			    pstats->ifHCOutMulticastPkts +
			    pstats->ifHCOutBroadcastPkts;
		break;

	case ETHER_STAT_ALIGN_ERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsAlignmentErrors;
		else
			*val = pstats->dot3StatsAlignmentErrors;
		break;

	case ETHER_STAT_FCS_ERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsFCSErrors;
		else
			*val = pstats->dot3StatsFCSErrors;
		break;

	case ETHER_STAT_FIRST_COLLISIONS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsSingleCollisionFrames;
		else
			*val = pstats->dot3StatsSingleCollisionFrames;
		break;

	case ETHER_STAT_MULTI_COLLISIONS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsMultipleCollisionFrames;
		else
			*val = pstats->dot3StatsMultipleCollisionFrames;
		break;

	case ETHER_STAT_DEFER_XMTS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsDeferredTransmissions;
		else
			*val = pstats->dot3StatsDeferredTransmissions;
		break;

	case ETHER_STAT_TX_LATE_COLLISIONS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsLateCollisions;
		else
			*val = pstats->dot3StatsLateCollisions;
		break;

	case ETHER_STAT_EX_COLLISIONS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsExcessiveCollisions;
		else
			*val = pstats->dot3StatsExcessiveCollisions;
		break;

	case ETHER_STAT_MACXMT_ERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsInternalMacTransmitErrors;
		else
			*val = bgep->pstats->dot3StatsInternalMacTransmitErrors;
		break;

	case ETHER_STAT_CARRIER_ERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsCarrierSenseErrors;
		else
			*val = 0;
		break;

	case ETHER_STAT_TOOLONG_ERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.dot3StatsFrameTooLongs;
		else
			*val = pstats->dot3StatsFrameTooLongs;
		break;

#if (MAC_VERSION > 1)
	case ETHER_STAT_TOOSHORT_ERRORS:
		if (bgep->chipid.statistic_type == BGE_STAT_BLK)
			*val = bstp->s.etherStatsUndersizePkts;
		else
			*val = pstats->etherStatsUndersizePkts;
		break;
#endif

	case ETHER_STAT_XCVR_ADDR:
		*val = bgep->phy_mii_addr;
		break;

	case ETHER_STAT_XCVR_ID:
		mutex_enter(bgep->genlock);
		*val = bge_mii_get16(bgep, MII_PHYIDH);
		*val <<= 16;
		*val |= bge_mii_get16(bgep, MII_PHYIDL);
		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(bgep->devinfo,
			    DDI_SERVICE_UNAFFECTED);
		}
		mutex_exit(bgep->genlock);
		break;

	case ETHER_STAT_XCVR_INUSE:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = XCVR_1000X;
		else
			*val = XCVR_1000T;
		break;

	case ETHER_STAT_CAP_1000FDX:
		*val = 1;
		break;

	case ETHER_STAT_CAP_1000HDX:
		*val = 1;
		break;

	case ETHER_STAT_CAP_100FDX:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = 0;
		else
			*val = 1;
		break;

	case ETHER_STAT_CAP_100HDX:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = 0;
		else
			*val = 1;
		break;

	case ETHER_STAT_CAP_10FDX:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = 0;
		else
			*val = 1;
		break;

	case ETHER_STAT_CAP_10HDX:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = 0;
		else
			*val = 1;
		break;

	case ETHER_STAT_CAP_ASMPAUSE:
		*val = 1;
		break;

	case ETHER_STAT_CAP_PAUSE:
		*val = 1;
		break;

	case ETHER_STAT_CAP_AUTONEG:
		*val = 1;
		break;

#if (MAC_VERSION > 1)
	case ETHER_STAT_CAP_REMFAULT:
		*val = 1;
		break;
#endif

	case ETHER_STAT_ADV_CAP_1000FDX:
		*val = bgep->param_adv_1000fdx;
		break;

	case ETHER_STAT_ADV_CAP_1000HDX:
		*val = bgep->param_adv_1000hdx;
		break;

	case ETHER_STAT_ADV_CAP_100FDX:
		*val = bgep->param_adv_100fdx;
		break;

	case ETHER_STAT_ADV_CAP_100HDX:
		*val = bgep->param_adv_100hdx;
		break;

	case ETHER_STAT_ADV_CAP_10FDX:
		*val = bgep->param_adv_10fdx;
		break;

	case ETHER_STAT_ADV_CAP_10HDX:
		*val = bgep->param_adv_10hdx;
		break;

	case ETHER_STAT_ADV_CAP_ASMPAUSE:
		*val = bgep->param_adv_asym_pause;
		break;

	case ETHER_STAT_ADV_CAP_PAUSE:
		*val = bgep->param_adv_pause;
		break;

	case ETHER_STAT_ADV_CAP_AUTONEG:
		*val = bgep->param_adv_autoneg;
		break;

#if (MAC_VERSION > 1)
	case ETHER_STAT_ADV_REMFAULT:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = 0;
		else {
			mutex_enter(bgep->genlock);
			*val = bge_mii_get16(bgep, MII_AN_ADVERT) &
			    MII_AN_ADVERT_REMFAULT ? 1 : 0;
			if (bge_check_acc_handle(bgep, bgep->io_handle) !=
			    DDI_FM_OK) {
				ddi_fm_service_impact(bgep->devinfo,
				    DDI_SERVICE_UNAFFECTED);
			}
			mutex_exit(bgep->genlock);
		}
		break;
#endif

	case ETHER_STAT_LP_CAP_1000FDX:
		*val = bgep->param_lp_1000fdx;
		break;

	case ETHER_STAT_LP_CAP_1000HDX:
		*val = bgep->param_lp_1000hdx;
		break;

	case ETHER_STAT_LP_CAP_100FDX:
		*val = bgep->param_lp_100fdx;
		break;

	case ETHER_STAT_LP_CAP_100HDX:
		*val = bgep->param_lp_100hdx;
		break;

	case ETHER_STAT_LP_CAP_10FDX:
		*val = bgep->param_lp_10fdx;
		break;

	case ETHER_STAT_LP_CAP_10HDX:
		*val = bgep->param_lp_10hdx;
		break;

	case ETHER_STAT_LP_CAP_ASMPAUSE:
		*val = bgep->param_lp_asym_pause;
		break;

	case ETHER_STAT_LP_CAP_PAUSE:
		*val = bgep->param_lp_pause;
		break;

	case ETHER_STAT_LP_CAP_AUTONEG:
		*val = bgep->param_lp_autoneg;
		break;

#if (MAC_VERSION > 1)
	case ETHER_STAT_LP_REMFAULT:
		if (bgep->chipid.flags & CHIP_FLAG_SERDES)
			*val = 0;
		else {
			mutex_enter(bgep->genlock);
			*val = bge_mii_get16(bgep, MII_AN_LPABLE) &
			    MII_AN_ADVERT_REMFAULT ? 1 : 0;
			if (bge_check_acc_handle(bgep, bgep->io_handle) !=
			    DDI_FM_OK) {
				ddi_fm_service_impact(bgep->devinfo,
				    DDI_SERVICE_UNAFFECTED);
			}
			mutex_exit(bgep->genlock);
		}
		break;
#endif

	case ETHER_STAT_LINK_ASMPAUSE:
		*val = bgep->param_adv_asym_pause &&
		    bgep->param_lp_asym_pause &&
		    bgep->param_adv_pause != bgep->param_lp_pause;
		break;

	case ETHER_STAT_LINK_PAUSE:
		*val = bgep->param_link_rx_pause;
		break;

	case ETHER_STAT_LINK_AUTONEG:
		*val = bgep->param_link_autoneg;
		break;

	case ETHER_STAT_LINK_DUPLEX:
		*val = (bgep->link_state != LINK_STATE_UNKNOWN) ?
		           bgep->param_link_duplex : LINK_DUPLEX_UNKNOWN;
		break;

	default:
		return (ENOTSUP);
	}

	return (0);
}
Esempio n. 12
0
static int
bge_phydata_update(kstat_t *ksp, int flag)
{
	bge_t *bgep;
	kstat_named_t *knp;
	const bge_ksindex_t *ksip;

	if (flag != KSTAT_READ)
		return (EACCES);

	bgep = ksp->ks_private;
	if (bgep->bge_chip_state == BGE_CHIP_FAULT)
		return (EIO);

	knp = ksp->ks_data;

	/*
	 * Read the PHY registers & update the kstats ...
	 *
	 * We need to hold the mutex while performing MII reads, but
	 * we don't want to hold it across the entire sequence of reads.
	 * So we grab and release it on each iteration, 'cos it doesn't
	 * really matter if the kstats are less than 100% consistent ...
	 */
	for (ksip = bge_phydata; ksip->name != NULL; ++knp, ++ksip) {
		mutex_enter(bgep->genlock);
		switch (ksip->index) {
		case MII_STATUS:
			knp->value.ui64 = bgep->phy_gen_status;
			break;

		case MII_PHYIDH:
			knp->value.ui64 = bge_mii_get16(bgep, MII_PHYIDH);
			knp->value.ui64 <<= 16;
			knp->value.ui64 |= bge_mii_get16(bgep, MII_PHYIDL);
			break;

		case EEE_MODE_REG:
			knp->value.ui64 = 0;
			if (bgep->link_state == LINK_STATE_UP)
			{
				knp->value.ui64 =
				    (bge_reg_get32(bgep, EEE_MODE_REG) & 0x80) ?
				        1 : 0;
			}
			break;

		default:
			knp->value.ui64 = bge_mii_get16(bgep, ksip->index);
			break;
		}
		if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(bgep->devinfo,
			    DDI_SERVICE_DEGRADED);
			mutex_exit(bgep->genlock);
			return (EIO);
		}
		mutex_exit(bgep->genlock);
	}

	return (0);
}
Esempio n. 13
0
static int
bge_driverinfo_update(kstat_t *ksp, int flag)
{
	bge_t *bgep;
	kstat_named_t *knp;
	ddi_acc_handle_t handle;

	if (flag != KSTAT_READ)
		return (EACCES);

	bgep = ksp->ks_private;
	if (bgep->bge_chip_state == BGE_CHIP_FAULT)
		return (EIO);

	knp = ksp->ks_data;

	(knp++)->value.ui64 = bgep->rx_buff[0].cookie.dmac_laddress;
	(knp++)->value.ui64 = bgep->tx_buff[0].cookie.dmac_laddress;
	(knp++)->value.ui64 = bgep->rx_desc[0].cookie.dmac_laddress;
	(knp++)->value.ui64 = bgep->tx_desc.cookie.dmac_laddress;

	(knp++)->value.ui64 = bgep->send[0].tx_free;
	(knp++)->value.ui64 = bgep->send[0].tx_array;
	(knp++)->value.ui64 = bgep->send[0].tc_next;
	(knp++)->value.ui64 = bgep->send[0].tx_next;
	(knp++)->value.ui64 = bgep->send[0].txfill_next;
	(knp++)->value.ui64 = bgep->send[0].txpkt_next;
	(knp++)->value.ui64 = bgep->send[0].txbuf_pop_queue->count +
	    bgep->send[0].txbuf_push_queue->count;
	(knp++)->value.ui64 = bgep->send[0].tx_flow;
	(knp++)->value.ui64 = bgep->tx_resched_needed;
	(knp++)->value.ui64 = bgep->tx_resched;
	(knp++)->value.ui64 = bgep->send[0].tx_nobuf;
	(knp++)->value.ui64 = bgep->send[0].tx_nobd;
	(knp++)->value.ui64 = bgep->send[0].tx_block;
	(knp++)->value.ui64 = bgep->send[0].tx_alloc_fail;

	(knp++)->value.ui64 = bgep->watchdog;
	(knp++)->value.ui64 = bgep->chip_resets;
	(knp++)->value.ui64 = bgep->missed_dmas;
	(knp++)->value.ui64 = bgep->missed_updates;

	/*
	 * Hold the mutex while accessing the chip registers
	 * just in case the factotum is trying to reset it!
	 */
	handle = bgep->cfg_handle;
	mutex_enter(bgep->genlock);
	(knp++)->value.ui64 = pci_config_get32(handle, PCI_CONF_BGE_MHCR);
	(knp++)->value.ui64 = pci_config_get32(handle, PCI_CONF_BGE_PDRWCR);
	(knp++)->value.ui64 = pci_config_get32(handle, PCI_CONF_BGE_PCISTATE);
	if (bge_check_acc_handle(bgep, bgep->cfg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
		mutex_exit(bgep->genlock);
		return (EIO);
	}

	(knp++)->value.ui64 = bge_reg_get32(bgep, BUFFER_MANAGER_STATUS_REG);
	(knp++)->value.ui64 = bge_reg_get32(bgep, RCV_INITIATOR_STATUS_REG);
	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(bgep->devinfo, DDI_SERVICE_DEGRADED);
		mutex_exit(bgep->genlock);
		return (EIO);
	}
	mutex_exit(bgep->genlock);

	return (0);
}