/*
 * e1000g_receive - main receive routine
 *
 * This routine will process packets received in an interrupt
 */
mblk_t *
e1000g_receive(e1000g_rx_ring_t *rx_ring, mblk_t **tail, uint_t sz)
{
	struct e1000_hw *hw;
	mblk_t *nmp;
	mblk_t *ret_mp;
	mblk_t *ret_nmp;
	struct e1000_rx_desc *current_desc;
	struct e1000_rx_desc *last_desc;
	p_rx_sw_packet_t packet;
	p_rx_sw_packet_t newpkt;
	uint16_t length;
	uint32_t pkt_count;
	uint32_t desc_count;
	boolean_t accept_frame;
	boolean_t end_of_packet;
	boolean_t need_copy;
	struct e1000g *Adapter;
	dma_buffer_t *rx_buf;
	uint16_t cksumflags;
	uint_t chain_sz = 0;
	e1000g_rx_data_t *rx_data;
	uint32_t max_size;
	uint32_t min_size;

	ret_mp = NULL;
	ret_nmp = NULL;
	pkt_count = 0;
	desc_count = 0;
	cksumflags = 0;

	Adapter = rx_ring->adapter;
	rx_data = rx_ring->rx_data;
	hw = &Adapter->shared;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORKERNEL);

	if (e1000g_check_dma_handle(rx_data->rbd_dma_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
		return (NULL);
	}

	current_desc = rx_data->rbd_next;
	if (!(current_desc->status & E1000_RXD_STAT_DD)) {
		/*
		 * don't send anything up. just clear the RFD
		 */
		E1000G_DEBUG_STAT(rx_ring->stat_none);
		return (NULL);
	}

	max_size = Adapter->max_frame_size - ETHERFCSL - VLAN_TAGSZ;
	min_size = ETHERMIN;

	/*
	 * Loop through the receive descriptors starting at the last known
	 * descriptor owned by the hardware that begins a packet.
	 */
	while ((current_desc->status & E1000_RXD_STAT_DD) &&
	    (pkt_count < Adapter->rx_limit_onintr) &&
	    ((sz == E1000G_CHAIN_NO_LIMIT) || (chain_sz <= sz))) {

		desc_count++;
		/*
		 * Now this can happen in Jumbo frame situation.
		 */
		if (current_desc->status & E1000_RXD_STAT_EOP) {
			/* packet has EOP set */
			end_of_packet = B_TRUE;
		} else {
			/*
			 * If this received buffer does not have the
			 * End-Of-Packet bit set, the received packet
			 * will consume multiple buffers. We won't send this
			 * packet upstack till we get all the related buffers.
			 */
			end_of_packet = B_FALSE;
		}

		/*
		 * Get a pointer to the actual receive buffer
		 * The mp->b_rptr is mapped to The CurrentDescriptor
		 * Buffer Address.
		 */
		packet =
		    (p_rx_sw_packet_t)QUEUE_GET_HEAD(&rx_data->recv_list);
		ASSERT(packet != NULL);

		rx_buf = packet->rx_buf;

		length = current_desc->length;

#ifdef __sparc
		if (packet->dma_type == USE_DVMA)
			dvma_sync(rx_buf->dma_handle, 0,
			    DDI_DMA_SYNC_FORKERNEL);
		else
			(void) ddi_dma_sync(rx_buf->dma_handle,
			    E1000G_IPALIGNROOM, length,
			    DDI_DMA_SYNC_FORKERNEL);
#else
		(void) ddi_dma_sync(rx_buf->dma_handle,
		    E1000G_IPALIGNROOM, length,
		    DDI_DMA_SYNC_FORKERNEL);
#endif

		if (e1000g_check_dma_handle(
		    rx_buf->dma_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(Adapter->dip,
			    DDI_SERVICE_DEGRADED);
			Adapter->e1000g_state |= E1000G_ERROR;

			goto rx_drop;
		}

		accept_frame = (current_desc->errors == 0) ||
		    ((current_desc->errors &
		    (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) != 0);

		if (hw->mac.type == e1000_82543) {
			unsigned char last_byte;

			last_byte =
			    *((unsigned char *)rx_buf->address + length - 1);

			if (TBI_ACCEPT(hw,
			    current_desc->status, current_desc->errors,
			    current_desc->length, last_byte,
			    Adapter->min_frame_size, Adapter->max_frame_size)) {

				e1000_tbi_adjust_stats(Adapter,
				    length, hw->mac.addr);

				length--;
				accept_frame = B_TRUE;
			} else if (e1000_tbi_sbp_enabled_82543(hw) &&
			    (current_desc->errors == E1000_RXD_ERR_CE)) {
				accept_frame = B_TRUE;
			}
		}

		/*
		 * Indicate the packet to the NOS if it was good.
		 * Normally, hardware will discard bad packets for us.
		 * Check for the packet to be a valid Ethernet packet
		 */
		if (!accept_frame) {
			/*
			 * error in incoming packet, either the packet is not a
			 * ethernet size packet, or the packet has an error. In
			 * either case, the packet will simply be discarded.
			 */
			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
			    "Process Receive Interrupts: Error in Packet\n");

			E1000G_STAT(rx_ring->stat_error);
			/*
			 * Returning here as we are done here. There is
			 * no point in waiting for while loop to elapse
			 * and the things which were done. More efficient
			 * and less error prone...
			 */
			goto rx_drop;
		}

		/*
		 * If the Ethernet CRC is not stripped by the hardware,
		 * we need to strip it before sending it up to the stack.
		 */
		if (end_of_packet && !Adapter->strip_crc) {
			if (length > ETHERFCSL) {
				length -= ETHERFCSL;
			} else {
				/*
				 * If the fragment is smaller than the CRC,
				 * drop this fragment, do the processing of
				 * the end of the packet.
				 */
				ASSERT(rx_data->rx_mblk_tail != NULL);
				rx_data->rx_mblk_tail->b_wptr -=
				    ETHERFCSL - length;
				rx_data->rx_mblk_len -=
				    ETHERFCSL - length;

				QUEUE_POP_HEAD(&rx_data->recv_list);

				goto rx_end_of_packet;
			}
		}

		need_copy = B_TRUE;

		if (length <= Adapter->rx_bcopy_thresh)
			goto rx_copy;

		/*
		 * Get the pre-constructed mblk that was associated
		 * to the receive data buffer.
		 */
		if (packet->mp == NULL) {
			packet->mp = desballoc((unsigned char *)
			    rx_buf->address, length,
			    BPRI_MED, &packet->free_rtn);
		}

		if (packet->mp != NULL) {
			/*
			 * We have two sets of buffer pool. One associated with
			 * the Rxdescriptors and other a freelist buffer pool.
			 * Each time we get a good packet, Try to get a buffer
			 * from the freelist pool using e1000g_get_buf. If we
			 * get free buffer, then replace the descriptor buffer
			 * address with the free buffer we just got, and pass
			 * the pre-constructed mblk upstack. (note no copying)
			 *
			 * If we failed to get a free buffer, then try to
			 * allocate a new buffer(mp) and copy the recv buffer
			 * content to our newly allocated buffer(mp). Don't
			 * disturb the desriptor buffer address. (note copying)
			 */
			newpkt = e1000g_get_buf(rx_data);

			if (newpkt != NULL) {
				/*
				 * Get the mblk associated to the data,
				 * and strip it off the sw packet.
				 */
				nmp = packet->mp;
				packet->mp = NULL;
				atomic_inc_32(&packet->ref_cnt);

				/*
				 * Now replace old buffer with the new
				 * one we got from free list
				 * Both the RxSwPacket as well as the
				 * Receive Buffer Descriptor will now
				 * point to this new packet.
				 */
				packet = newpkt;

				current_desc->buffer_addr =
				    newpkt->rx_buf->dma_address;

				need_copy = B_FALSE;
			} else {
				/* EMPTY */
				E1000G_DEBUG_STAT(rx_ring->stat_no_freepkt);
			}
		}

rx_copy:
		if (need_copy) {
			/*
			 * No buffers available on free list,
			 * bcopy the data from the buffer and
			 * keep the original buffer. Dont want to
			 * do this.. Yack but no other way
			 */
			if ((nmp = allocb(length + E1000G_IPALIGNROOM,
			    BPRI_MED)) == NULL) {
				/*
				 * The system has no buffers available
				 * to send up the incoming packet, hence
				 * the packet will have to be processed
				 * when there're more buffers available.
				 */
				E1000G_STAT(rx_ring->stat_allocb_fail);
				goto rx_drop;
			}
			nmp->b_rptr += E1000G_IPALIGNROOM;
			nmp->b_wptr += E1000G_IPALIGNROOM;
			/*
			 * The free list did not have any buffers
			 * available, so, the received packet will
			 * have to be copied into a mp and the original
			 * buffer will have to be retained for future
			 * packet reception.
			 */
			bcopy(rx_buf->address, nmp->b_wptr, length);
		}

		/*
		 * The rx_sw_packet MUST be popped off the
		 * RxSwPacketList before either a putnext or freemsg
		 * is done on the mp that has now been created by the
		 * desballoc. If not, it is possible that the free
		 * routine will get called from the interrupt context
		 * and try to put this packet on the free list
		 */
		(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

		ASSERT(nmp != NULL);
		nmp->b_wptr += length;

		if (rx_data->rx_mblk == NULL) {
			/*
			 *  TCP/UDP checksum offload and
			 *  IP checksum offload
			 */
			if (!(current_desc->status & E1000_RXD_STAT_IXSM)) {
				/*
				 * Check TCP/UDP checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_TCPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_TCPE))
					cksumflags |= HCK_FULLCKSUM |
					    HCK_FULLCKSUM_OK;
				/*
				 * Check IP Checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_IPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_IPE))
					cksumflags |= HCK_IPV4_HDRCKSUM;
			}
		}

		/*
		 * We need to maintain our packet chain in the global
		 * Adapter structure, for the Rx processing can end
		 * with a fragment that has no EOP set.
		 */
		if (rx_data->rx_mblk == NULL) {
			/* Get the head of the message chain */
			rx_data->rx_mblk = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len = length;
		} else {	/* Not the first packet */
			/* Continue adding buffers */
			rx_data->rx_mblk_tail->b_cont = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len += length;
		}
		ASSERT(rx_data->rx_mblk != NULL);
		ASSERT(rx_data->rx_mblk_tail != NULL);
		ASSERT(rx_data->rx_mblk_tail->b_cont == NULL);

		/*
		 * Now this MP is ready to travel upwards but some more
		 * fragments are coming.
		 * We will send packet upwards as soon as we get EOP
		 * set on the packet.
		 */
		if (!end_of_packet) {
			/*
			 * continue to get the next descriptor,
			 * Tail would be advanced at the end
			 */
			goto rx_next_desc;
		}

rx_end_of_packet:
		if (E1000G_IS_VLAN_PACKET(rx_data->rx_mblk->b_rptr))
			max_size = Adapter->max_frame_size - ETHERFCSL;

		if ((rx_data->rx_mblk_len > max_size) ||
		    (rx_data->rx_mblk_len < min_size)) {
			E1000G_STAT(rx_ring->stat_size_error);
			goto rx_drop;
		}

		/*
		 * Found packet with EOP
		 * Process the last fragment.
		 */
		if (cksumflags != 0) {
			(void) hcksum_assoc(rx_data->rx_mblk,
			    NULL, NULL, 0, 0, 0, 0, cksumflags, 0);
			cksumflags = 0;
		}

		/*
		 * Count packets that span multi-descriptors
		 */
		E1000G_DEBUG_STAT_COND(rx_ring->stat_multi_desc,
		    (rx_data->rx_mblk->b_cont != NULL));

		/*
		 * Append to list to send upstream
		 */
		if (ret_mp == NULL) {
			ret_mp = ret_nmp = rx_data->rx_mblk;
		} else {
			ret_nmp->b_next = rx_data->rx_mblk;
			ret_nmp = rx_data->rx_mblk;
		}
		ret_nmp->b_next = NULL;
		*tail = ret_nmp;
		chain_sz += length;

		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;

		pkt_count++;

rx_next_desc:
		/*
		 * Zero out the receive descriptors status
		 */
		current_desc->status = 0;

		if (current_desc == rx_data->rbd_last)
			rx_data->rbd_next = rx_data->rbd_first;
		else
			rx_data->rbd_next++;

		last_desc = current_desc;
		current_desc = rx_data->rbd_next;

		/*
		 * Put the buffer that we just indicated back
		 * at the end of our list
		 */
		QUEUE_PUSH_TAIL(&rx_data->recv_list,
		    &packet->Link);
	}	/* while loop */

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	Adapter->rx_pkt_cnt = pkt_count;

	return (ret_mp);

rx_drop:
	/*
	 * Zero out the receive descriptors status
	 */
	current_desc->status = 0;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	if (current_desc == rx_data->rbd_last)
		rx_data->rbd_next = rx_data->rbd_first;
	else
		rx_data->rbd_next++;

	last_desc = current_desc;

	(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

	QUEUE_PUSH_TAIL(&rx_data->recv_list, &packet->Link);
	/*
	 * Reclaim all old buffers already allocated during
	 * Jumbo receives.....for incomplete reception
	 */
	if (rx_data->rx_mblk != NULL) {
		freemsg(rx_data->rx_mblk);
		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;
	}
	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	return (ret_mp);
}
Exemple #2
0
/**
 *  e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
 *  @hw: pointer to the HW structure
 *  @stats: Struct containing statistic register values
 *  @frame_len: The length of the frame in question
 *  @mac_addr: The Ethernet destination address of the frame in question
 *  @max_frame_size: The maximum frame size
 *
 *  Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
 **/
void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
                                  struct e1000_hw_stats *stats, u32 frame_len,
                                  u8 *mac_addr, u32 max_frame_size)
{
	if (!(e1000_tbi_sbp_enabled_82543(hw)))
		goto out;

	/* First adjust the frame length. */
	frame_len--;
	/*
	 * We need to adjust the statistics counters, since the hardware
	 * counters overcount this packet as a CRC error and undercount
	 * the packet as a good packet
	 */
	/* This packet should not be counted as a CRC error.    */
	stats->crcerrs--;
	/* This packet does count as a Good Packet Received.    */
	stats->gprc++;

	/* Adjust the Good Octets received counters             */
	stats->gorc += frame_len;

	/*
	 * Is this a broadcast or multicast?  Check broadcast first,
	 * since the test for a multicast frame will test positive on
	 * a broadcast frame.
	 */
	if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
		/* Broadcast packet */
		stats->bprc++;
	else if (*mac_addr & 0x01)
		/* Multicast packet */
		stats->mprc++;

	/*
	 * In this case, the hardware has overcounted the number of
	 * oversize frames.
	 */
	if ((frame_len == max_frame_size) && (stats->roc > 0))
		stats->roc--;

	/*
	 * Adjust the bin counters when the extra byte put the frame in the
	 * wrong bin. Remember that the frame_len was adjusted above.
	 */
	if (frame_len == 64) {
		stats->prc64++;
		stats->prc127--;
	} else if (frame_len == 127) {
		stats->prc127++;
		stats->prc255--;
	} else if (frame_len == 255) {
		stats->prc255++;
		stats->prc511--;
	} else if (frame_len == 511) {
		stats->prc511++;
		stats->prc1023--;
	} else if (frame_len == 1023) {
		stats->prc1023++;
		stats->prc1522--;
	} else if (frame_len == 1522) {
		stats->prc1522++;
	}

out:
	return;
}
/*
 * e1000g_rx_setup - setup rx data structures
 *
 * This routine initializes all of the receive related
 * structures. This includes the receive descriptors, the
 * actual receive buffers, and the rx_sw_packet software
 * structures.
 */
void
e1000g_rx_setup(struct e1000g *Adapter)
{
	struct e1000_hw *hw;
	p_rx_sw_packet_t packet;
	struct e1000_rx_desc *descriptor;
	uint32_t buf_low;
	uint32_t buf_high;
	uint32_t reg_val;
	uint32_t rctl;
	uint32_t rxdctl;
	uint32_t ert;
	uint16_t phy_data;
	int i;
	int size;
	e1000g_rx_data_t *rx_data;

	hw = &Adapter->shared;
	rx_data = Adapter->rx_ring->rx_data;

	/*
	 * zero out all of the receive buffer descriptor memory
	 * assures any previous data or status is erased
	 */
	bzero(rx_data->rbd_area,
	    sizeof (struct e1000_rx_desc) * Adapter->rx_desc_num);

	if (!Adapter->rx_buffer_setup) {
		/* Init the list of "Receive Buffer" */
		QUEUE_INIT_LIST(&rx_data->recv_list);

		/* Init the list of "Free Receive Buffer" */
		QUEUE_INIT_LIST(&rx_data->free_list);

		/* Init the list of "Free Receive Buffer" */
		QUEUE_INIT_LIST(&rx_data->recycle_list);
		/*
		 * Setup Receive list and the Free list. Note that
		 * the both were allocated in one packet area.
		 */
		packet = rx_data->packet_area;
		descriptor = rx_data->rbd_first;

		for (i = 0; i < Adapter->rx_desc_num;
		    i++, packet = packet->next, descriptor++) {
			ASSERT(packet != NULL);
			ASSERT(descriptor != NULL);
			descriptor->buffer_addr =
			    packet->rx_buf->dma_address;

			/* Add this rx_sw_packet to the receive list */
			QUEUE_PUSH_TAIL(&rx_data->recv_list,
			    &packet->Link);
		}

		for (i = 0; i < Adapter->rx_freelist_num;
		    i++, packet = packet->next) {
			ASSERT(packet != NULL);
			/* Add this rx_sw_packet to the free list */
			QUEUE_PUSH_TAIL(&rx_data->free_list,
			    &packet->Link);
		}
		rx_data->avail_freepkt = Adapter->rx_freelist_num;
		rx_data->recycle_freepkt = 0;

		Adapter->rx_buffer_setup = B_TRUE;
	} else {
		/* Setup the initial pointer to the first rx descriptor */
		packet = (p_rx_sw_packet_t)
		    QUEUE_GET_HEAD(&rx_data->recv_list);
		descriptor = rx_data->rbd_first;

		for (i = 0; i < Adapter->rx_desc_num; i++) {
			ASSERT(packet != NULL);
			ASSERT(descriptor != NULL);
			descriptor->buffer_addr =
			    packet->rx_buf->dma_address;

			/* Get next rx_sw_packet */
			packet = (p_rx_sw_packet_t)
			    QUEUE_GET_NEXT(&rx_data->recv_list, &packet->Link);
			descriptor++;
		}
	}

	E1000_WRITE_REG(&Adapter->shared, E1000_RDTR, Adapter->rx_intr_delay);
	E1000G_DEBUGLOG_1(Adapter, E1000G_INFO_LEVEL,
	    "E1000_RDTR: 0x%x\n", Adapter->rx_intr_delay);
	if (hw->mac.type >= e1000_82540) {
		E1000_WRITE_REG(&Adapter->shared, E1000_RADV,
		    Adapter->rx_intr_abs_delay);
		E1000G_DEBUGLOG_1(Adapter, E1000G_INFO_LEVEL,
		    "E1000_RADV: 0x%x\n", Adapter->rx_intr_abs_delay);
	}

	/*
	 * Setup our descriptor pointers
	 */
	rx_data->rbd_next = rx_data->rbd_first;

	size = Adapter->rx_desc_num * sizeof (struct e1000_rx_desc);
	E1000_WRITE_REG(hw, E1000_RDLEN(0), size);
	size = E1000_READ_REG(hw, E1000_RDLEN(0));

	/* To get lower order bits */
	buf_low = (uint32_t)rx_data->rbd_dma_addr;
	/* To get the higher order bits */
	buf_high = (uint32_t)(rx_data->rbd_dma_addr >> 32);

	E1000_WRITE_REG(hw, E1000_RDBAH(0), buf_high);
	E1000_WRITE_REG(hw, E1000_RDBAL(0), buf_low);

	/*
	 * Setup our HW Rx Head & Tail descriptor pointers
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(rx_data->rbd_last - rx_data->rbd_first));
	E1000_WRITE_REG(hw, E1000_RDH(0), 0);

	/*
	 * Setup the Receive Control Register (RCTL), and ENABLE the
	 * receiver. The initial configuration is to: Enable the receiver,
	 * accept broadcasts, discard bad packets (and long packets),
	 * disable VLAN filter checking, set the receive descriptor
	 * minimum threshold size to 1/2, and the receive buffer size to
	 * 2k.
	 */
	rctl = E1000_RCTL_EN |		/* Enable Receive Unit */
	    E1000_RCTL_BAM |		/* Accept Broadcast Packets */
	    E1000_RCTL_LPE |		/* Large Packet Enable bit */
	    (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT) |
	    E1000_RCTL_RDMTS_HALF |
	    E1000_RCTL_LBM_NO;		/* Loopback Mode = none */

	if (Adapter->strip_crc)
		rctl |= E1000_RCTL_SECRC;	/* Strip Ethernet CRC */

	if (Adapter->mem_workaround_82546 &&
	    ((hw->mac.type == e1000_82545) ||
	    (hw->mac.type == e1000_82546) ||
	    (hw->mac.type == e1000_82546_rev_3))) {
		rctl |= E1000_RCTL_SZ_2048;
	} else {
		if ((Adapter->max_frame_size > FRAME_SIZE_UPTO_2K) &&
		    (Adapter->max_frame_size <= FRAME_SIZE_UPTO_4K))
			rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
		else if ((Adapter->max_frame_size > FRAME_SIZE_UPTO_4K) &&
		    (Adapter->max_frame_size <= FRAME_SIZE_UPTO_8K))
			rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
		else if ((Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) &&
		    (Adapter->max_frame_size <= FRAME_SIZE_UPTO_16K))
			rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX;
		else
			rctl |= E1000_RCTL_SZ_2048;
	}

	if (e1000_tbi_sbp_enabled_82543(hw))
		rctl |= E1000_RCTL_SBP;

	/*
	 * Enable Early Receive Threshold (ERT) on supported devices.
	 * Only takes effect when packet size is equal or larger than the
	 * specified value (in 8 byte units), e.g. using jumbo frames.
	 */
	if ((hw->mac.type == e1000_82573) ||
	    (hw->mac.type == e1000_82574) ||
	    (hw->mac.type == e1000_ich9lan) ||
	    (hw->mac.type == e1000_ich10lan)) {

		ert = E1000_ERT_2048;

		/*
		 * Special modification when ERT and
		 * jumbo frames are enabled
		 */
		if (Adapter->default_mtu > ETHERMTU) {
			rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
			E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 0x3);
			ert |= (1 << 13);
		}

		E1000_WRITE_REG(hw, E1000_ERT, ert);
	}

	/* Workaround errata on 82577/8 adapters with large frames */
	if ((hw->mac.type == e1000_pchlan) &&
	    (Adapter->default_mtu > ETHERMTU)) {

		(void) e1000_read_phy_reg(hw, PHY_REG(770, 26), &phy_data);
		phy_data &= 0xfff8;
		phy_data |= (1 << 2);
		(void) e1000_write_phy_reg(hw, PHY_REG(770, 26), phy_data);

		if (hw->phy.type == e1000_phy_82577) {
			(void) e1000_read_phy_reg(hw, 22, &phy_data);
			phy_data &= 0x0fff;
			phy_data |= (1 << 14);
			(void) e1000_write_phy_reg(hw, 0x10, 0x2823);
			(void) e1000_write_phy_reg(hw, 0x11, 0x0003);
			(void) e1000_write_phy_reg(hw, 22, phy_data);
		}
	}

	reg_val =
	    E1000_RXCSUM_TUOFL |	/* TCP/UDP checksum offload Enable */
	    E1000_RXCSUM_IPOFL;		/* IP checksum offload Enable */

	E1000_WRITE_REG(hw, E1000_RXCSUM, reg_val);

	/*
	 * Workaround: Set bit 16 (IPv6_ExDIS) to disable the
	 * processing of received IPV6 extension headers
	 */
	if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
		reg_val = E1000_READ_REG(hw, E1000_RFCTL);
		reg_val |= (E1000_RFCTL_IPV6_EX_DIS |
		    E1000_RFCTL_NEW_IPV6_EXT_DIS);
		E1000_WRITE_REG(hw, E1000_RFCTL, reg_val);
	}

	/* Write to enable the receive unit */
	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
Exemple #4
0
/**
 *  e1000_check_for_copper_link_82543 - Check for link (Copper)
 *  @hw: pointer to the HW structure
 *
 *  Checks the phy for link, if link exists, do the following:
 *   - check for downshift
 *   - do polarity workaround (if necessary)
 *   - configure collision distance
 *   - configure flow control after link up
 *   - configure tbi compatibility
 **/
static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw)
{
	struct e1000_mac_info *mac = &hw->mac;
	u32 icr, rctl;
	s32 ret_val;
	u16 speed, duplex;
	bool link;

	DEBUGFUNC("e1000_check_for_copper_link_82543");

	if (!mac->get_link_status) {
		ret_val = E1000_SUCCESS;
		goto out;
	}

	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
	if (ret_val)
		goto out;

	if (!link)
		goto out; /* No link detected */

	mac->get_link_status = FALSE;

	e1000_check_downshift_generic(hw);

	/*
	 * If we are forcing speed/duplex, then we can return since
	 * we have already determined whether we have link or not.
	 */
	if (!mac->autoneg) {
		/*
		 * If speed and duplex are forced to 10H or 10F, then we will
		 * implement the polarity reversal workaround.  We disable
		 * interrupts first, and upon returning, place the devices
		 * interrupt state to its previous value except for the link
		 * status change interrupt which will happened due to the
		 * execution of this workaround.
		 */
		if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) {
			E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
			ret_val = e1000_polarity_reversal_workaround_82543(hw);
			icr = E1000_READ_REG(hw, E1000_ICR);
			E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC));
			E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
		}

		ret_val = -E1000_ERR_CONFIG;
		goto out;
	}

	/*
	 * We have a M88E1000 PHY and Auto-Neg is enabled.  If we
	 * have Si on board that is 82544 or newer, Auto
	 * Speed Detection takes care of MAC speed/duplex
	 * configuration.  So we only need to configure Collision
	 * Distance in the MAC.  Otherwise, we need to force
	 * speed/duplex on the MAC to the current PHY speed/duplex
	 * settings.
	 */
	if (mac->type == e1000_82544)
		e1000_config_collision_dist_generic(hw);
	else {
		ret_val = e1000_config_mac_to_phy_82543(hw);
		if (ret_val) {
			DEBUGOUT("Error configuring MAC to PHY settings\n");
			goto out;
		}
	}

	/*
	 * Configure Flow Control now that Auto-Neg has completed.
	 * First, we need to restore the desired flow control
	 * settings because we may have had to re-autoneg with a
	 * different link partner.
	 */
	ret_val = e1000_config_fc_after_link_up_generic(hw);
	if (ret_val) {
		DEBUGOUT("Error configuring flow control\n");
	}

	/*
	 * At this point we know that we are on copper and we have
	 * auto-negotiated link.  These are conditions for checking the link
	 * partner capability register.  We use the link speed to determine if
	 * TBI compatibility needs to be turned on or off.  If the link is not
	 * at gigabit speed, then TBI compatibility is not needed.  If we are
	 * at gigabit speed, we turn on TBI compatibility.
	 */
	if (e1000_tbi_compatibility_enabled_82543(hw)) {
		ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
		if (ret_val) {
			DEBUGOUT("Error getting link speed and duplex\n");
			return ret_val;
		}
		if (speed != SPEED_1000) {
			/*
			 * If link speed is not set to gigabit speed,
			 * we do not need to enable TBI compatibility.
			 */
			if (e1000_tbi_sbp_enabled_82543(hw)) {
				/*
				 * If we previously were in the mode,
				 * turn it off.
				 */
				e1000_set_tbi_sbp_82543(hw, FALSE);
				rctl = E1000_READ_REG(hw, E1000_RCTL);
				rctl &= ~E1000_RCTL_SBP;
				E1000_WRITE_REG(hw, E1000_RCTL, rctl);
			}
		} else {
			/*
			 * If TBI compatibility is was previously off,
			 * turn it on. For compatibility with a TBI link
			 * partner, we will store bad packets. Some
			 * frames have an additional byte on the end and
			 * will look like CRC errors to to the hardware.
			 */
			if (!e1000_tbi_sbp_enabled_82543(hw)) {
				e1000_set_tbi_sbp_82543(hw, TRUE);
				rctl = E1000_READ_REG(hw, E1000_RCTL);
				rctl |= E1000_RCTL_SBP;
				E1000_WRITE_REG(hw, E1000_RCTL, rctl);
			}
		}
	}
out:
	return ret_val;
}