Ejemplo n.º 1
0
static bool dna_e1000e_clean_rx_irq(struct e1000_adapter *adapter) {
  bool ret;
  int i, debug = 0;
  struct e1000_ring *rx_ring = adapter->rx_ring;
  union e1000_rx_desc_extended *rx_desc;
  struct e1000_buffer *buffer_info;
  struct e1000_hw *hw = &adapter->hw;
  u32 staterr;

  /* The register contains the last packet that we have read */
  i = E1000_READ_REG(hw, E1000_RDT(0));
  if(++i == rx_ring->count)
    i = 0;

  rx_ring->next_to_clean = i; 
  rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  buffer_info = &rx_ring->buffer_info[i];
  
  if(unlikely(debug))
    printk(KERN_INFO
	   "DNA: dna_e1000_clean_rx_irq(%s)[id=%d][status=%d][rx_reg=%u]\n",
	   adapter->netdev->name, i, staterr,
	   E1000_READ_REG(&adapter->hw, E1000_RDT(0)));

  if(staterr & E1000_RXD_STAT_DD) {
    if(!adapter->dna.interrupt_received) {
      if(waitqueue_active(&adapter->dna.packet_waitqueue)) {
	wake_up_interruptible(&adapter->dna.packet_waitqueue);
	adapter->dna.interrupt_received = 1;

	if(unlikely(debug))
	  printk(KERN_WARNING "DNA: dna_e1000_clean_rx_irq(%s): "
		 "woken up [slot=%d] XXXX\n", adapter->netdev->name, i);

      }
    }

    if(unlikely(debug))
      printk(KERN_WARNING "DNA: dna_e1000_clean_rx_irq(%s): "
	     "woken up [slot=%d][interrupt_received=%d]\n",
	     adapter->netdev->name, i, adapter->dna.interrupt_received);

    ret = TRUE;
  } else
    ret = FALSE;

  return(ret);		
}
Ejemplo n.º 2
0
/**
 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
 * @adapter: board private structure
 *
 * Configure the Rx unit of the MAC after a reset.
 **/
static void e1000e_configure_rx ( struct e1000_adapter *adapter )
{
	struct e1000_hw *hw = &adapter->hw;
	uint32_t rctl;

	DBGP ( "e1000_configure_rx\n" );

	/* disable receives while setting up the descriptors */
	rctl = E1000_READ_REG ( hw, E1000_RCTL );
	E1000_WRITE_REG ( hw, E1000_RCTL, rctl & ~E1000_RCTL_EN );
	e1e_flush();
	mdelay(10);

	adapter->rx_curr = 0;

	/* Setup the HW Rx Head and Tail Descriptor Pointers and
	 * the Base and Length of the Rx Descriptor Ring */

	E1000_WRITE_REG ( hw, E1000_RDBAL(0), virt_to_bus ( adapter->rx_base ) );
	E1000_WRITE_REG ( hw, E1000_RDBAH(0), 0 );
	E1000_WRITE_REG ( hw, E1000_RDLEN(0), adapter->rx_ring_size );

	E1000_WRITE_REG ( hw, E1000_RDH(0), 0 );
	E1000_WRITE_REG ( hw, E1000_RDT(0), NUM_RX_DESC - 1 );

	/* Enable Receives */
	rctl |=	 E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
		 E1000_RCTL_MPE;
	E1000_WRITE_REG ( hw, E1000_RCTL, rctl );
	e1e_flush();

	DBG ( "E1000_RDBAL(0): %#08x\n",  E1000_READ_REG ( hw, E1000_RDBAL(0) ) );
	DBG ( "E1000_RDLEN(0): %d\n",	  E1000_READ_REG ( hw, E1000_RDLEN(0) ) );
	DBG ( "E1000_RCTL:  %#08x\n",  E1000_READ_REG ( hw, E1000_RCTL ) );
}
Ejemplo n.º 3
0
void dna_cleanup_rx_ring(struct igb_ring *rx_ring) {
  struct igb_adapter	  *adapter = netdev_priv(rx_ring->netdev);
  struct e1000_hw	  *hw = &adapter->hw;
  union e1000_adv_rx_desc *rx_desc, *shadow_rx_desc;
  u32 head = E1000_READ_REG(hw, E1000_RDH(rx_ring->reg_idx));
  u32 tail;
  
  /*
  tail = E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx))
  u32 count = rx_ring->count;
  
  if(unlikely(enable_debug))
    printk("[DNA] dna_cleanup_rx_ring(%d): [head=%u][tail=%u]\n", rx_ring->queue_index, head, tail);

  // We now point to the next slot where packets will be received
  if(++tail == rx_ring->count) tail = 0;

  while(count > 0) {
    if(tail == head) break; // Do not go beyond head

    rx_desc = IGB_RX_DESC(rx_ring, tail);
    shadow_rx_desc = IGB_RX_DESC(rx_ring, tail + rx_ring->count);
    
    if(rx_desc->wb.upper.status_error != 0) {
      print_adv_rx_descr(rx_desc);
      break;
    }

    // Writeback
    rx_desc->wb.upper.status_error = 0;
    rx_desc->read.hdr_addr = shadow_rx_desc->read.hdr_addr, rx_desc->read.pkt_addr = shadow_rx_desc->read.pkt_addr;
    E1000_WRITE_REG(hw, E1000_RDT(rx_ring->reg_idx), tail);

    if(unlikely(enable_debug))
      printk("[DNA] dna_cleanup_rx_ring(%d): idx=%d\n", rx_ring->queue_index, tail);

    if(++tail == rx_ring->count) tail = 0;
    count--;
  }
  */


  /* resetting all */

  for (i=0; i<rx_ring->count; i++) {
    rx_desc = IGB_RX_DESC(rx_ring, i);
    shadow_rx_desc = IGB_RX_DESC(rx_ring, i + rx_ring->count);

    rx_desc->wb.upper.status_error = 0;
    rx_desc->read.hdr_addr = shadow_rx_desc->read.hdr_addr;
    rx_desc->read.pkt_addr = shadow_rx_desc->read.pkt_addr;
  }

  if (head == 0) tail = rx_ring->count - 1;
  else tail = head - 1;

  E1000_WRITE_REG(hw, E1000_RDT(rx_ring->reg_idx), tail);
}
Ejemplo n.º 4
0
int wait_packet_function_ptr(void *data, int mode) {
  struct e1000_adapter *adapter = (struct e1000_adapter*)data;

  if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] called [mode=%d]\n", mode);

  if(mode == 1) {
    struct e1000_ring *rx_ring = adapter->rx_ring;
    union e1000_rx_desc_extended *rx_desc;

    u16 i = E1000_READ_REG(&adapter->hw, E1000_RDT(0));
    
    /* Very important: update the value from the register set from userland.
     * Here i is the last I've read (zero-copy implementation) */
    if(++i == rx_ring->count) i = 0;
    /* Here i is the next I have to read */
    
    rx_ring->next_to_clean = i;

    rx_desc = E1000_RX_DESC_EXT(*rx_ring, rx_ring->next_to_clean);
    if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Check if a packet is arrived\n");

    prefetch(rx_desc);

    if(!(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD)) {
      adapter->dna.interrupt_received = 0;

#if 0
      if(!adapter->dna.interrupt_enabled) {
	e1000_irq_enable(adapter), adapter->dna.interrupt_enabled = 1;
	if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Packet not arrived yet: enabling interrupts\n");
      }
#endif
    } else
      adapter->dna.interrupt_received = 1;

    return(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD);
  } else {
    if(adapter->dna.interrupt_enabled) {
      e1000_irq_disable(adapter);
      adapter->dna.interrupt_enabled = 0;
      if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Disabled interrupts\n");
    }
    return(0);
  }
}
Ejemplo n.º 5
0
static int dna_igb_clean_rx_irq(struct igb_q_vector *q_vector,
				  struct igb_ring *rx_ring, int budget) {
  union e1000_adv_rx_desc	*rx_desc;
  u32				staterr;
  u16				i;
  struct igb_adapter	        *adapter = q_vector->adapter;
  struct e1000_hw		*hw = &adapter->hw;

  i = E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx));
  if(++i == rx_ring->count)
    i = 0;

  rx_ring->next_to_clean = i;

  //i = E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx));
  rx_desc = IGB_RX_DESC(rx_ring, i);
  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

  if(rx_ring->dna.queue_in_use) {
    /*
      A userland application is using the queue so it's not time to
      mess up with indexes but just to wakeup apps (if waiting)
    */

    if(staterr & E1000_RXD_STAT_DD) {
      if(unlikely(enable_debug))
	printk(KERN_INFO "DNA: got a packet [index=%d]!\n", i);

      if(waitqueue_active(&rx_ring->dna.rx_tx.rx.packet_waitqueue)) {
	wake_up_interruptible(&rx_ring->dna.rx_tx.rx.packet_waitqueue);
	rx_ring->dna.rx_tx.rx.interrupt_received = 1;

	if(unlikely(enable_debug))
	  printk("%s(%s): woken up ring=%d, [slot=%d] XXX\n",
		 __FUNCTION__, rx_ring->netdev->name,
		 rx_ring->reg_idx, i);
      }
    }

    // goto dump_stats;
  }

  return(budget);
}
Ejemplo n.º 6
0
/**
 * e1000_refill_rx_ring - allocate Rx io_buffers
 *
 * @v adapter	e1000 private structure
 *
 * @ret rc	 Returns 0 on success, negative on failure
 **/
static int e1000e_refill_rx_ring ( struct e1000_adapter *adapter )
{
	int i, rx_curr;
	int rc = 0;
	struct e1000_rx_desc *rx_curr_desc;
	struct e1000_hw *hw = &adapter->hw;
	struct io_buffer *iob;

	DBGP ("e1000_refill_rx_ring\n");

	for ( i = 0; i < NUM_RX_DESC; i++ ) {
		rx_curr = ( ( adapter->rx_curr + i ) % NUM_RX_DESC );
		rx_curr_desc = adapter->rx_base + rx_curr;

		if ( rx_curr_desc->status & E1000_RXD_STAT_DD )
			continue;

		if ( adapter->rx_iobuf[rx_curr] != NULL )
			continue;

		DBG2 ( "Refilling rx desc %d\n", rx_curr );

		iob = alloc_iob ( MAXIMUM_ETHERNET_VLAN_SIZE );
		adapter->rx_iobuf[rx_curr] = iob;

		if ( ! iob ) {
			DBG ( "alloc_iob failed\n" );
			rc = -ENOMEM;
			break;
		} else {
			rx_curr_desc->buffer_addr = virt_to_bus ( iob->data );

			E1000_WRITE_REG ( hw, E1000_RDT(0), rx_curr );
		}
	}
	return rc;
}
Ejemplo n.º 7
0
/*
 * e1000g_receive - main receive routine
 *
 * This routine will process packets received in an interrupt
 */
mblk_t *
e1000g_receive(e1000g_rx_ring_t *rx_ring, mblk_t **tail, uint_t sz)
{
	struct e1000_hw *hw;
	mblk_t *nmp;
	mblk_t *ret_mp;
	mblk_t *ret_nmp;
	struct e1000_rx_desc *current_desc;
	struct e1000_rx_desc *last_desc;
	p_rx_sw_packet_t packet;
	p_rx_sw_packet_t newpkt;
	uint16_t length;
	uint32_t pkt_count;
	uint32_t desc_count;
	boolean_t accept_frame;
	boolean_t end_of_packet;
	boolean_t need_copy;
	struct e1000g *Adapter;
	dma_buffer_t *rx_buf;
	uint16_t cksumflags;
	uint_t chain_sz = 0;
	e1000g_rx_data_t *rx_data;
	uint32_t max_size;
	uint32_t min_size;

	ret_mp = NULL;
	ret_nmp = NULL;
	pkt_count = 0;
	desc_count = 0;
	cksumflags = 0;

	Adapter = rx_ring->adapter;
	rx_data = rx_ring->rx_data;
	hw = &Adapter->shared;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORKERNEL);

	if (e1000g_check_dma_handle(rx_data->rbd_dma_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
		return (NULL);
	}

	current_desc = rx_data->rbd_next;
	if (!(current_desc->status & E1000_RXD_STAT_DD)) {
		/*
		 * don't send anything up. just clear the RFD
		 */
		E1000G_DEBUG_STAT(rx_ring->stat_none);
		return (NULL);
	}

	max_size = Adapter->max_frame_size - ETHERFCSL - VLAN_TAGSZ;
	min_size = ETHERMIN;

	/*
	 * Loop through the receive descriptors starting at the last known
	 * descriptor owned by the hardware that begins a packet.
	 */
	while ((current_desc->status & E1000_RXD_STAT_DD) &&
	    (pkt_count < Adapter->rx_limit_onintr) &&
	    ((sz == E1000G_CHAIN_NO_LIMIT) || (chain_sz <= sz))) {

		desc_count++;
		/*
		 * Now this can happen in Jumbo frame situation.
		 */
		if (current_desc->status & E1000_RXD_STAT_EOP) {
			/* packet has EOP set */
			end_of_packet = B_TRUE;
		} else {
			/*
			 * If this received buffer does not have the
			 * End-Of-Packet bit set, the received packet
			 * will consume multiple buffers. We won't send this
			 * packet upstack till we get all the related buffers.
			 */
			end_of_packet = B_FALSE;
		}

		/*
		 * Get a pointer to the actual receive buffer
		 * The mp->b_rptr is mapped to The CurrentDescriptor
		 * Buffer Address.
		 */
		packet =
		    (p_rx_sw_packet_t)QUEUE_GET_HEAD(&rx_data->recv_list);
		ASSERT(packet != NULL);

		rx_buf = packet->rx_buf;

		length = current_desc->length;

#ifdef __sparc
		if (packet->dma_type == USE_DVMA)
			dvma_sync(rx_buf->dma_handle, 0,
			    DDI_DMA_SYNC_FORKERNEL);
		else
			(void) ddi_dma_sync(rx_buf->dma_handle,
			    E1000G_IPALIGNROOM, length,
			    DDI_DMA_SYNC_FORKERNEL);
#else
		(void) ddi_dma_sync(rx_buf->dma_handle,
		    E1000G_IPALIGNROOM, length,
		    DDI_DMA_SYNC_FORKERNEL);
#endif

		if (e1000g_check_dma_handle(
		    rx_buf->dma_handle) != DDI_FM_OK) {
			ddi_fm_service_impact(Adapter->dip,
			    DDI_SERVICE_DEGRADED);
			Adapter->e1000g_state |= E1000G_ERROR;

			goto rx_drop;
		}

		accept_frame = (current_desc->errors == 0) ||
		    ((current_desc->errors &
		    (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) != 0);

		if (hw->mac.type == e1000_82543) {
			unsigned char last_byte;

			last_byte =
			    *((unsigned char *)rx_buf->address + length - 1);

			if (TBI_ACCEPT(hw,
			    current_desc->status, current_desc->errors,
			    current_desc->length, last_byte,
			    Adapter->min_frame_size, Adapter->max_frame_size)) {

				e1000_tbi_adjust_stats(Adapter,
				    length, hw->mac.addr);

				length--;
				accept_frame = B_TRUE;
			} else if (e1000_tbi_sbp_enabled_82543(hw) &&
			    (current_desc->errors == E1000_RXD_ERR_CE)) {
				accept_frame = B_TRUE;
			}
		}

		/*
		 * Indicate the packet to the NOS if it was good.
		 * Normally, hardware will discard bad packets for us.
		 * Check for the packet to be a valid Ethernet packet
		 */
		if (!accept_frame) {
			/*
			 * error in incoming packet, either the packet is not a
			 * ethernet size packet, or the packet has an error. In
			 * either case, the packet will simply be discarded.
			 */
			E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
			    "Process Receive Interrupts: Error in Packet\n");

			E1000G_STAT(rx_ring->stat_error);
			/*
			 * Returning here as we are done here. There is
			 * no point in waiting for while loop to elapse
			 * and the things which were done. More efficient
			 * and less error prone...
			 */
			goto rx_drop;
		}

		/*
		 * If the Ethernet CRC is not stripped by the hardware,
		 * we need to strip it before sending it up to the stack.
		 */
		if (end_of_packet && !Adapter->strip_crc) {
			if (length > ETHERFCSL) {
				length -= ETHERFCSL;
			} else {
				/*
				 * If the fragment is smaller than the CRC,
				 * drop this fragment, do the processing of
				 * the end of the packet.
				 */
				ASSERT(rx_data->rx_mblk_tail != NULL);
				rx_data->rx_mblk_tail->b_wptr -=
				    ETHERFCSL - length;
				rx_data->rx_mblk_len -=
				    ETHERFCSL - length;

				QUEUE_POP_HEAD(&rx_data->recv_list);

				goto rx_end_of_packet;
			}
		}

		need_copy = B_TRUE;

		if (length <= Adapter->rx_bcopy_thresh)
			goto rx_copy;

		/*
		 * Get the pre-constructed mblk that was associated
		 * to the receive data buffer.
		 */
		if (packet->mp == NULL) {
			packet->mp = desballoc((unsigned char *)
			    rx_buf->address, length,
			    BPRI_MED, &packet->free_rtn);
		}

		if (packet->mp != NULL) {
			/*
			 * We have two sets of buffer pool. One associated with
			 * the Rxdescriptors and other a freelist buffer pool.
			 * Each time we get a good packet, Try to get a buffer
			 * from the freelist pool using e1000g_get_buf. If we
			 * get free buffer, then replace the descriptor buffer
			 * address with the free buffer we just got, and pass
			 * the pre-constructed mblk upstack. (note no copying)
			 *
			 * If we failed to get a free buffer, then try to
			 * allocate a new buffer(mp) and copy the recv buffer
			 * content to our newly allocated buffer(mp). Don't
			 * disturb the desriptor buffer address. (note copying)
			 */
			newpkt = e1000g_get_buf(rx_data);

			if (newpkt != NULL) {
				/*
				 * Get the mblk associated to the data,
				 * and strip it off the sw packet.
				 */
				nmp = packet->mp;
				packet->mp = NULL;
				atomic_inc_32(&packet->ref_cnt);

				/*
				 * Now replace old buffer with the new
				 * one we got from free list
				 * Both the RxSwPacket as well as the
				 * Receive Buffer Descriptor will now
				 * point to this new packet.
				 */
				packet = newpkt;

				current_desc->buffer_addr =
				    newpkt->rx_buf->dma_address;

				need_copy = B_FALSE;
			} else {
				/* EMPTY */
				E1000G_DEBUG_STAT(rx_ring->stat_no_freepkt);
			}
		}

rx_copy:
		if (need_copy) {
			/*
			 * No buffers available on free list,
			 * bcopy the data from the buffer and
			 * keep the original buffer. Dont want to
			 * do this.. Yack but no other way
			 */
			if ((nmp = allocb(length + E1000G_IPALIGNROOM,
			    BPRI_MED)) == NULL) {
				/*
				 * The system has no buffers available
				 * to send up the incoming packet, hence
				 * the packet will have to be processed
				 * when there're more buffers available.
				 */
				E1000G_STAT(rx_ring->stat_allocb_fail);
				goto rx_drop;
			}
			nmp->b_rptr += E1000G_IPALIGNROOM;
			nmp->b_wptr += E1000G_IPALIGNROOM;
			/*
			 * The free list did not have any buffers
			 * available, so, the received packet will
			 * have to be copied into a mp and the original
			 * buffer will have to be retained for future
			 * packet reception.
			 */
			bcopy(rx_buf->address, nmp->b_wptr, length);
		}

		/*
		 * The rx_sw_packet MUST be popped off the
		 * RxSwPacketList before either a putnext or freemsg
		 * is done on the mp that has now been created by the
		 * desballoc. If not, it is possible that the free
		 * routine will get called from the interrupt context
		 * and try to put this packet on the free list
		 */
		(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

		ASSERT(nmp != NULL);
		nmp->b_wptr += length;

		if (rx_data->rx_mblk == NULL) {
			/*
			 *  TCP/UDP checksum offload and
			 *  IP checksum offload
			 */
			if (!(current_desc->status & E1000_RXD_STAT_IXSM)) {
				/*
				 * Check TCP/UDP checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_TCPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_TCPE))
					cksumflags |= HCK_FULLCKSUM |
					    HCK_FULLCKSUM_OK;
				/*
				 * Check IP Checksum
				 */
				if ((current_desc->status &
				    E1000_RXD_STAT_IPCS) &&
				    !(current_desc->errors &
				    E1000_RXD_ERR_IPE))
					cksumflags |= HCK_IPV4_HDRCKSUM;
			}
		}

		/*
		 * We need to maintain our packet chain in the global
		 * Adapter structure, for the Rx processing can end
		 * with a fragment that has no EOP set.
		 */
		if (rx_data->rx_mblk == NULL) {
			/* Get the head of the message chain */
			rx_data->rx_mblk = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len = length;
		} else {	/* Not the first packet */
			/* Continue adding buffers */
			rx_data->rx_mblk_tail->b_cont = nmp;
			rx_data->rx_mblk_tail = nmp;
			rx_data->rx_mblk_len += length;
		}
		ASSERT(rx_data->rx_mblk != NULL);
		ASSERT(rx_data->rx_mblk_tail != NULL);
		ASSERT(rx_data->rx_mblk_tail->b_cont == NULL);

		/*
		 * Now this MP is ready to travel upwards but some more
		 * fragments are coming.
		 * We will send packet upwards as soon as we get EOP
		 * set on the packet.
		 */
		if (!end_of_packet) {
			/*
			 * continue to get the next descriptor,
			 * Tail would be advanced at the end
			 */
			goto rx_next_desc;
		}

rx_end_of_packet:
		if (E1000G_IS_VLAN_PACKET(rx_data->rx_mblk->b_rptr))
			max_size = Adapter->max_frame_size - ETHERFCSL;

		if ((rx_data->rx_mblk_len > max_size) ||
		    (rx_data->rx_mblk_len < min_size)) {
			E1000G_STAT(rx_ring->stat_size_error);
			goto rx_drop;
		}

		/*
		 * Found packet with EOP
		 * Process the last fragment.
		 */
		if (cksumflags != 0) {
			(void) hcksum_assoc(rx_data->rx_mblk,
			    NULL, NULL, 0, 0, 0, 0, cksumflags, 0);
			cksumflags = 0;
		}

		/*
		 * Count packets that span multi-descriptors
		 */
		E1000G_DEBUG_STAT_COND(rx_ring->stat_multi_desc,
		    (rx_data->rx_mblk->b_cont != NULL));

		/*
		 * Append to list to send upstream
		 */
		if (ret_mp == NULL) {
			ret_mp = ret_nmp = rx_data->rx_mblk;
		} else {
			ret_nmp->b_next = rx_data->rx_mblk;
			ret_nmp = rx_data->rx_mblk;
		}
		ret_nmp->b_next = NULL;
		*tail = ret_nmp;
		chain_sz += length;

		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;

		pkt_count++;

rx_next_desc:
		/*
		 * Zero out the receive descriptors status
		 */
		current_desc->status = 0;

		if (current_desc == rx_data->rbd_last)
			rx_data->rbd_next = rx_data->rbd_first;
		else
			rx_data->rbd_next++;

		last_desc = current_desc;
		current_desc = rx_data->rbd_next;

		/*
		 * Put the buffer that we just indicated back
		 * at the end of our list
		 */
		QUEUE_PUSH_TAIL(&rx_data->recv_list,
		    &packet->Link);
	}	/* while loop */

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	Adapter->rx_pkt_cnt = pkt_count;

	return (ret_mp);

rx_drop:
	/*
	 * Zero out the receive descriptors status
	 */
	current_desc->status = 0;

	/* Sync the Rx descriptor DMA buffers */
	(void) ddi_dma_sync(rx_data->rbd_dma_handle,
	    0, 0, DDI_DMA_SYNC_FORDEV);

	if (current_desc == rx_data->rbd_last)
		rx_data->rbd_next = rx_data->rbd_first;
	else
		rx_data->rbd_next++;

	last_desc = current_desc;

	(p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list);

	QUEUE_PUSH_TAIL(&rx_data->recv_list, &packet->Link);
	/*
	 * Reclaim all old buffers already allocated during
	 * Jumbo receives.....for incomplete reception
	 */
	if (rx_data->rx_mblk != NULL) {
		freemsg(rx_data->rx_mblk);
		rx_data->rx_mblk = NULL;
		rx_data->rx_mblk_tail = NULL;
		rx_data->rx_mblk_len = 0;
	}
	/*
	 * Advance the E1000's Receive Queue #0 "Tail Pointer".
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(last_desc - rx_data->rbd_first));

	if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
		Adapter->e1000g_state |= E1000G_ERROR;
	}

	return (ret_mp);
}
Ejemplo n.º 8
0
/*
 * e1000g_rx_setup - setup rx data structures
 *
 * This routine initializes all of the receive related
 * structures. This includes the receive descriptors, the
 * actual receive buffers, and the rx_sw_packet software
 * structures.
 */
void
e1000g_rx_setup(struct e1000g *Adapter)
{
	struct e1000_hw *hw;
	p_rx_sw_packet_t packet;
	struct e1000_rx_desc *descriptor;
	uint32_t buf_low;
	uint32_t buf_high;
	uint32_t reg_val;
	uint32_t rctl;
	uint32_t rxdctl;
	uint32_t ert;
	uint16_t phy_data;
	int i;
	int size;
	e1000g_rx_data_t *rx_data;

	hw = &Adapter->shared;
	rx_data = Adapter->rx_ring->rx_data;

	/*
	 * zero out all of the receive buffer descriptor memory
	 * assures any previous data or status is erased
	 */
	bzero(rx_data->rbd_area,
	    sizeof (struct e1000_rx_desc) * Adapter->rx_desc_num);

	if (!Adapter->rx_buffer_setup) {
		/* Init the list of "Receive Buffer" */
		QUEUE_INIT_LIST(&rx_data->recv_list);

		/* Init the list of "Free Receive Buffer" */
		QUEUE_INIT_LIST(&rx_data->free_list);

		/* Init the list of "Free Receive Buffer" */
		QUEUE_INIT_LIST(&rx_data->recycle_list);
		/*
		 * Setup Receive list and the Free list. Note that
		 * the both were allocated in one packet area.
		 */
		packet = rx_data->packet_area;
		descriptor = rx_data->rbd_first;

		for (i = 0; i < Adapter->rx_desc_num;
		    i++, packet = packet->next, descriptor++) {
			ASSERT(packet != NULL);
			ASSERT(descriptor != NULL);
			descriptor->buffer_addr =
			    packet->rx_buf->dma_address;

			/* Add this rx_sw_packet to the receive list */
			QUEUE_PUSH_TAIL(&rx_data->recv_list,
			    &packet->Link);
		}

		for (i = 0; i < Adapter->rx_freelist_num;
		    i++, packet = packet->next) {
			ASSERT(packet != NULL);
			/* Add this rx_sw_packet to the free list */
			QUEUE_PUSH_TAIL(&rx_data->free_list,
			    &packet->Link);
		}
		rx_data->avail_freepkt = Adapter->rx_freelist_num;
		rx_data->recycle_freepkt = 0;

		Adapter->rx_buffer_setup = B_TRUE;
	} else {
		/* Setup the initial pointer to the first rx descriptor */
		packet = (p_rx_sw_packet_t)
		    QUEUE_GET_HEAD(&rx_data->recv_list);
		descriptor = rx_data->rbd_first;

		for (i = 0; i < Adapter->rx_desc_num; i++) {
			ASSERT(packet != NULL);
			ASSERT(descriptor != NULL);
			descriptor->buffer_addr =
			    packet->rx_buf->dma_address;

			/* Get next rx_sw_packet */
			packet = (p_rx_sw_packet_t)
			    QUEUE_GET_NEXT(&rx_data->recv_list, &packet->Link);
			descriptor++;
		}
	}

	E1000_WRITE_REG(&Adapter->shared, E1000_RDTR, Adapter->rx_intr_delay);
	E1000G_DEBUGLOG_1(Adapter, E1000G_INFO_LEVEL,
	    "E1000_RDTR: 0x%x\n", Adapter->rx_intr_delay);
	if (hw->mac.type >= e1000_82540) {
		E1000_WRITE_REG(&Adapter->shared, E1000_RADV,
		    Adapter->rx_intr_abs_delay);
		E1000G_DEBUGLOG_1(Adapter, E1000G_INFO_LEVEL,
		    "E1000_RADV: 0x%x\n", Adapter->rx_intr_abs_delay);
	}

	/*
	 * Setup our descriptor pointers
	 */
	rx_data->rbd_next = rx_data->rbd_first;

	size = Adapter->rx_desc_num * sizeof (struct e1000_rx_desc);
	E1000_WRITE_REG(hw, E1000_RDLEN(0), size);
	size = E1000_READ_REG(hw, E1000_RDLEN(0));

	/* To get lower order bits */
	buf_low = (uint32_t)rx_data->rbd_dma_addr;
	/* To get the higher order bits */
	buf_high = (uint32_t)(rx_data->rbd_dma_addr >> 32);

	E1000_WRITE_REG(hw, E1000_RDBAH(0), buf_high);
	E1000_WRITE_REG(hw, E1000_RDBAL(0), buf_low);

	/*
	 * Setup our HW Rx Head & Tail descriptor pointers
	 */
	E1000_WRITE_REG(hw, E1000_RDT(0),
	    (uint32_t)(rx_data->rbd_last - rx_data->rbd_first));
	E1000_WRITE_REG(hw, E1000_RDH(0), 0);

	/*
	 * Setup the Receive Control Register (RCTL), and ENABLE the
	 * receiver. The initial configuration is to: Enable the receiver,
	 * accept broadcasts, discard bad packets (and long packets),
	 * disable VLAN filter checking, set the receive descriptor
	 * minimum threshold size to 1/2, and the receive buffer size to
	 * 2k.
	 */
	rctl = E1000_RCTL_EN |		/* Enable Receive Unit */
	    E1000_RCTL_BAM |		/* Accept Broadcast Packets */
	    E1000_RCTL_LPE |		/* Large Packet Enable bit */
	    (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT) |
	    E1000_RCTL_RDMTS_HALF |
	    E1000_RCTL_LBM_NO;		/* Loopback Mode = none */

	if (Adapter->strip_crc)
		rctl |= E1000_RCTL_SECRC;	/* Strip Ethernet CRC */

	if (Adapter->mem_workaround_82546 &&
	    ((hw->mac.type == e1000_82545) ||
	    (hw->mac.type == e1000_82546) ||
	    (hw->mac.type == e1000_82546_rev_3))) {
		rctl |= E1000_RCTL_SZ_2048;
	} else {
		if ((Adapter->max_frame_size > FRAME_SIZE_UPTO_2K) &&
		    (Adapter->max_frame_size <= FRAME_SIZE_UPTO_4K))
			rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
		else if ((Adapter->max_frame_size > FRAME_SIZE_UPTO_4K) &&
		    (Adapter->max_frame_size <= FRAME_SIZE_UPTO_8K))
			rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
		else if ((Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) &&
		    (Adapter->max_frame_size <= FRAME_SIZE_UPTO_16K))
			rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX;
		else
			rctl |= E1000_RCTL_SZ_2048;
	}

	if (e1000_tbi_sbp_enabled_82543(hw))
		rctl |= E1000_RCTL_SBP;

	/*
	 * Enable Early Receive Threshold (ERT) on supported devices.
	 * Only takes effect when packet size is equal or larger than the
	 * specified value (in 8 byte units), e.g. using jumbo frames.
	 */
	if ((hw->mac.type == e1000_82573) ||
	    (hw->mac.type == e1000_82574) ||
	    (hw->mac.type == e1000_ich9lan) ||
	    (hw->mac.type == e1000_ich10lan)) {

		ert = E1000_ERT_2048;

		/*
		 * Special modification when ERT and
		 * jumbo frames are enabled
		 */
		if (Adapter->default_mtu > ETHERMTU) {
			rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
			E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 0x3);
			ert |= (1 << 13);
		}

		E1000_WRITE_REG(hw, E1000_ERT, ert);
	}

	/* Workaround errata on 82577/8 adapters with large frames */
	if ((hw->mac.type == e1000_pchlan) &&
	    (Adapter->default_mtu > ETHERMTU)) {

		(void) e1000_read_phy_reg(hw, PHY_REG(770, 26), &phy_data);
		phy_data &= 0xfff8;
		phy_data |= (1 << 2);
		(void) e1000_write_phy_reg(hw, PHY_REG(770, 26), phy_data);

		if (hw->phy.type == e1000_phy_82577) {
			(void) e1000_read_phy_reg(hw, 22, &phy_data);
			phy_data &= 0x0fff;
			phy_data |= (1 << 14);
			(void) e1000_write_phy_reg(hw, 0x10, 0x2823);
			(void) e1000_write_phy_reg(hw, 0x11, 0x0003);
			(void) e1000_write_phy_reg(hw, 22, phy_data);
		}
	}

	reg_val =
	    E1000_RXCSUM_TUOFL |	/* TCP/UDP checksum offload Enable */
	    E1000_RXCSUM_IPOFL;		/* IP checksum offload Enable */

	E1000_WRITE_REG(hw, E1000_RXCSUM, reg_val);

	/*
	 * Workaround: Set bit 16 (IPv6_ExDIS) to disable the
	 * processing of received IPV6 extension headers
	 */
	if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
		reg_val = E1000_READ_REG(hw, E1000_RFCTL);
		reg_val |= (E1000_RFCTL_IPV6_EX_DIS |
		    E1000_RFCTL_NEW_IPV6_EXT_DIS);
		E1000_WRITE_REG(hw, E1000_RFCTL, reg_val);
	}

	/* Write to enable the receive unit */
	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
Ejemplo n.º 9
0
void dna_igb_alloc_rx_buffers(struct igb_ring *rx_ring, struct pfring_hooks *hook) {
  union e1000_adv_rx_desc *rx_desc, *shadow_rx_desc;
  struct igb_rx_buffer *bi;
  u16 i;
  struct igb_adapter 	*adapter = netdev_priv(rx_ring->netdev);
  struct e1000_hw	*hw = &adapter->hw;
  u16			cache_line_size;
  struct igb_ring	*tx_ring = adapter->tx_ring[rx_ring->queue_index];
  mem_ring_info         rx_info = {0};
  mem_ring_info         tx_info = {0};
  int                   num_slots_per_page;

  /* Check if the memory has been already allocated */
  if(rx_ring->dna.memory_allocated) return;

  /* nothing to do or no valid netdev defined */
  if (!netdev_ring(rx_ring))
    return;

  if (!hook) {
    printk("[DNA] WARNING The PF_RING module is NOT loaded.\n");
    printk("[DNA] WARNING Please load it, before loading this module\n");
    return;
  }

  init_waitqueue_head(&rx_ring->dna.rx_tx.rx.packet_waitqueue);

  cache_line_size = cpu_to_le16(igb_read_pci_cfg_word(hw, IGB_PCI_DEVICE_CACHE_LINE_SIZE));
  cache_line_size &= 0x00FF;
  cache_line_size *= PCI_DEVICE_CACHE_LINE_SIZE_BYTES;
  if(cache_line_size == 0) cache_line_size = 64;

  if(unlikely(enable_debug))
    printk("%s(): pci cache line size %d\n",__FUNCTION__, cache_line_size);

  rx_ring->dna.packet_slot_len  = ALIGN(rx_ring->rx_buffer_len, cache_line_size);
  rx_ring->dna.packet_num_slots = rx_ring->count;

  rx_ring->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER;

  num_slots_per_page = rx_ring->dna.tot_packet_memory / rx_ring->dna.packet_slot_len;

  rx_ring->dna.num_memory_pages = (rx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  for(i=0; i<rx_ring->dna.num_memory_pages; i++) {
    rx_ring->dna.rx_tx.rx.packet_memory[i] =
      alloc_contiguous_memory(&rx_ring->dna.tot_packet_memory, &rx_ring->dna.mem_order);
  
    if (rx_ring->dna.rx_tx.rx.packet_memory[i] == 0) {
      printk("\n\n%s() ERROR: not enough memory for RX DMA ring!!\n\n\n",
	     __FUNCTION__);
      return;
    }

    /*
    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated RX %u@%u bytes at 0x%08lx [slot_len=%d]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i], rx_ring->dna.packet_slot_len);
    */
  }

  for(i=0; i < rx_ring->count; i++) {
    u_int offset, page_index;
    char *pkt;
    
    page_index = i / num_slots_per_page;
    offset = (i % num_slots_per_page) * rx_ring->dna.packet_slot_len;
    pkt = (char *)(rx_ring->dna.rx_tx.rx.packet_memory[page_index] + offset);

    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated RX %u@%u bytes at 0x%08lx [slot_len=%d][page_index=%u][offset=%u]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i],
	     rx_ring->dna.packet_slot_len, page_index, offset);

    bi      = &rx_ring->rx_buffer_info[i];
    bi->skb = NULL;
    rx_desc = IGB_RX_DESC(rx_ring, i);

    /*
    if(unlikely(enable_debug))
      printk("%s(): [%s@%d] Mapping RX slot %d of %d [pktaddr=%p][rx_desc=%p][offset=%u]\n",
	     __FUNCTION__, 
	     rx_ring->netdev->name, rx_ring->queue_index,
	     i, rx_ring->dna.packet_num_slots,
	     pkt, rx_desc, offset);
    */

    bi->dma = pci_map_single(to_pci_dev(rx_ring->dev), pkt,
			     rx_ring->dna.packet_slot_len,
			     PCI_DMA_FROMDEVICE);

    /* Standard MTU */
    rx_desc->read.hdr_addr = 0;
    rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
    rx_desc->wb.upper.status_error = 0;

    shadow_rx_desc = IGB_RX_DESC(rx_ring, i + rx_ring->count);
    memcpy(shadow_rx_desc, rx_desc, sizeof(union e1000_adv_rx_desc));

    if(unlikely(enable_debug)) {
      print_adv_rx_descr(rx_desc);
      print_adv_rx_descr(shadow_rx_desc);
    }

    igb_release_rx_desc(rx_ring, i);
  } /* for */

    /* Shadow */
  rx_desc = IGB_RX_DESC(rx_ring, 0);

  dna_reset_rx_ring(rx_ring);

  if(unlikely(enable_debug))
  printk("[DNA] next_to_clean=%u/next_to_use=%u [register=%d]\n",
	 rx_ring->next_to_clean, rx_ring->next_to_use, 
	 E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx)));

  /* Allocate TX memory */
  
  tx_ring->dna.tot_packet_memory = rx_ring->dna.tot_packet_memory;
  tx_ring->dna.packet_slot_len   = rx_ring->dna.packet_slot_len;
  tx_ring->dna.packet_num_slots  = tx_ring->count;
  tx_ring->dna.mem_order         = rx_ring->dna.mem_order;
  tx_ring->dna.num_memory_pages  = (tx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  dna_igb_alloc_tx_buffers(tx_ring, hook);

  rx_info.packet_memory_num_chunks    = rx_ring->dna.num_memory_pages;
  rx_info.packet_memory_chunk_len     = rx_ring->dna.tot_packet_memory;
  rx_info.packet_memory_num_slots     = rx_ring->dna.packet_num_slots;
  rx_info.packet_memory_slot_len      = rx_ring->dna.packet_slot_len;
  rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size;
  
  tx_info.packet_memory_num_chunks    = tx_ring->dna.num_memory_pages;
  tx_info.packet_memory_chunk_len     = tx_ring->dna.tot_packet_memory;
  tx_info.packet_memory_num_slots     = tx_ring->dna.packet_num_slots;
  tx_info.packet_memory_slot_len      = tx_ring->dna.packet_slot_len;
  tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size;

  /* Register with PF_RING */

  hook->ring_dna_device_handler(add_device_mapping,
				dna_v1,
  				&rx_info,
				&tx_info,
				rx_ring->dna.rx_tx.rx.packet_memory,
				rx_ring->desc, /* Packet descriptors */
				tx_ring->dna.rx_tx.tx.packet_memory,
				tx_ring->desc, /* Packet descriptors */
				(void*)rx_ring->netdev->mem_start,
				rx_ring->netdev->mem_end - rx_ring->netdev->mem_start,
				rx_ring->queue_index, /* Channel Id */
				rx_ring->netdev,
				rx_ring->dev,
				dna_model(hw),
				rx_ring->netdev->dev_addr,
				&rx_ring->dna.rx_tx.rx.packet_waitqueue,
				&rx_ring->dna.rx_tx.rx.interrupt_received,
				(void*)rx_ring, (void*)tx_ring,
				wait_packet_function_ptr,
				notify_function_ptr);

  if(unlikely(enable_debug))
    printk("[DNA] igb: %s: Enabled DNA on queue %d [RX][size=%u][count=%d] [TX][size=%u][count=%d]\n",
	   rx_ring->netdev->name, rx_ring->queue_index, rx_ring->size, rx_ring->count, tx_ring->size, tx_ring->count);

  rx_ring->dna.memory_allocated = 1;
}
Ejemplo n.º 10
0
int wait_packet_function_ptr(void *data, int mode)
{
  struct igb_ring        *rx_ring = (struct igb_ring*)data;
  struct igb_adapter	 *adapter = netdev_priv(rx_ring->netdev);
  struct e1000_hw	      *hw = &adapter->hw;

  if(unlikely(enable_debug))
    printk("%s(): enter [mode=%d/%s][queueId=%d][next_to_clean=%u][next_to_use=%d]\n",
	   __FUNCTION__, mode, mode == 1 ? "enable int" : "disable int",
	   rx_ring->queue_index, rx_ring->next_to_clean, rx_ring->next_to_use);

  if(!rx_ring->dna.memory_allocated) return(0);

  if(mode == 1 /* Enable interrupt */) {
    union e1000_adv_rx_desc *rx_desc;
    u32	staterr;
    u8	reg_idx = rx_ring->reg_idx;
    u16	i = E1000_READ_REG(hw, E1000_RDT(reg_idx));

    /* Very important: update the value from the register set from userland
     * Here i is the last I've read (zero-copy implementation) */
    if(++i == rx_ring->count)
      i = 0;
    /* Here i is the next I have to read */

    rx_ring->next_to_clean = i;

    rx_desc = IGB_RX_DESC(rx_ring, i);
    prefetch(rx_desc);
    staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

    if(unlikely(enable_debug)) {
      printk("%s(): Check if a packet is arrived [idx=%d][staterr=%d][len=%d]\n",
	     __FUNCTION__, i, staterr, rx_desc->wb.upper.length);

      print_adv_rx_descr(rx_desc);
    }

    if(!(staterr & E1000_RXD_STAT_DD)) {
      rx_ring->dna.rx_tx.rx.interrupt_received = 0;

      if(!rx_ring->dna.rx_tx.rx.interrupt_enabled) {
	igb_irq_enable_queues(adapter, rx_ring->queue_index);

	if(unlikely(enable_debug))
	  printk("%s(): Enabled interrupts, queue = %d\n", __FUNCTION__, rx_ring->queue_index);

	rx_ring->dna.rx_tx.rx.interrupt_enabled = 1;

	if(unlikely(enable_debug))
	  printk("%s(): Packet not arrived yet: enabling "
		 "interrupts, queue=%d, i=%d\n",
		 __FUNCTION__,rx_ring->queue_index, i);
      }

      /* Refresh the value */
      staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
    } else {
      rx_ring->dna.rx_tx.rx.interrupt_received = 1; 
    }

    if(unlikely(enable_debug))
      printk("%s(): Packet received: %d\n", __FUNCTION__, staterr & E1000_RXD_STAT_DD);

    return(staterr & E1000_RXD_STAT_DD);
  } else {
    /* Disable interrupts */

    igb_irq_disable_queues(adapter, ((u64)1 << rx_ring->queue_index));

    rx_ring->dna.rx_tx.rx.interrupt_enabled = 0;

    if(unlikely(enable_debug))
      printk("%s(): Disabled interrupts, queue = %d\n", __FUNCTION__, rx_ring->queue_index);

    return(0);
  }
}
Ejemplo n.º 11
0
/**
 *  e1000_translate_register_82542 - Translate the proper register offset
 *  @reg: e1000 register to be read
 *
 *  Registers in 82542 are located in different offsets than other adapters
 *  even though they function in the same manner.  This function takes in
 *  the name of the register to read and returns the correct offset for
 *  82542 silicon.
 **/
u32 e1000_translate_register_82542(u32 reg)
{
    /*
     * Some of the 82542 registers are located at different
     * offsets than they are in newer adapters.
     * Despite the difference in location, the registers
     * function in the same manner.
     */
    switch (reg) {
    case E1000_RA:
        reg = 0x00040;
        break;
    case E1000_RDTR:
        reg = 0x00108;
        break;
    case E1000_RDBAL(0):
        reg = 0x00110;
        break;
    case E1000_RDBAH(0):
        reg = 0x00114;
        break;
    case E1000_RDLEN(0):
        reg = 0x00118;
        break;
    case E1000_RDH(0):
        reg = 0x00120;
        break;
    case E1000_RDT(0):
        reg = 0x00128;
        break;
    case E1000_RDBAL(1):
        reg = 0x00138;
        break;
    case E1000_RDBAH(1):
        reg = 0x0013C;
        break;
    case E1000_RDLEN(1):
        reg = 0x00140;
        break;
    case E1000_RDH(1):
        reg = 0x00148;
        break;
    case E1000_RDT(1):
        reg = 0x00150;
        break;
    case E1000_FCRTH:
        reg = 0x00160;
        break;
    case E1000_FCRTL:
        reg = 0x00168;
        break;
    case E1000_MTA:
        reg = 0x00200;
        break;
    case E1000_TDBAL(0):
        reg = 0x00420;
        break;
    case E1000_TDBAH(0):
        reg = 0x00424;
        break;
    case E1000_TDLEN(0):
        reg = 0x00428;
        break;
    case E1000_TDH(0):
        reg = 0x00430;
        break;
    case E1000_TDT(0):
        reg = 0x00438;
        break;
    case E1000_TIDV:
        reg = 0x00440;
        break;
    case E1000_VFTA:
        reg = 0x00600;
        break;
    case E1000_TDFH:
        reg = 0x08010;
        break;
    case E1000_TDFT:
        reg = 0x08018;
        break;
    default:
        break;
    }

    return reg;
}
Ejemplo n.º 12
0
Archivo: ethtool.c Proyecto: A-K/linux
static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
{
	struct e1000_hw *hw = &adapter->hw;
	struct e1000_mac_info *mac = &adapter->hw.mac;
	u32 value;
	u32 before;
	u32 after;
	u32 i;
	u32 toggle;
	u32 mask;
	u32 wlock_mac = 0;

	/*
	 * The status register is Read Only, so a write should fail.
	 * Some bits that get toggled are ignored.
	 */
	switch (mac->type) {
	/* there are several bits on newer hardware that are r/w */
	case e1000_82571:
	case e1000_82572:
	case e1000_80003es2lan:
		toggle = 0x7FFFF3FF;
		break;
        default:
		toggle = 0x7FFFF033;
		break;
	}

	before = er32(STATUS);
	value = (er32(STATUS) & toggle);
	ew32(STATUS, toggle);
	after = er32(STATUS) & toggle;
	if (value != after) {
		e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
		      after, value);
		*data = 1;
		return 1;
	}
	/* restore previous status */
	ew32(STATUS, before);

	if (!(adapter->flags & FLAG_IS_ICH)) {
		REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
		REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
		REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
		REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
	}

	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);

	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);

	before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
	REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);

	REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
	if (!(adapter->flags & FLAG_IS_ICH))
		REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
	REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
	REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
	mask = 0x8003FFFF;
	switch (mac->type) {
	case e1000_ich10lan:
	case e1000_pchlan:
	case e1000_pch2lan:
	case e1000_pch_lpt:
		mask |= (1 << 18);
		break;
	default:
		break;
	}

	if (mac->type == e1000_pch_lpt)
		wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
		    E1000_FWSM_WLOCK_MAC_SHIFT;

	for (i = 0; i < mac->rar_entry_count; i++) {
		/* Cannot test write-protected SHRAL[n] registers */
		if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
			continue;

		REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
				       mask, 0xFFFFFFFF);
	}

	for (i = 0; i < mac->mta_reg_count; i++)
		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);

	*data = 0;

	return 0;
}
Ejemplo n.º 13
0
void alloc_dna_memory(struct e1000_adapter *adapter) {
  struct net_device *netdev = adapter->netdev;
  struct pci_dev *pdev = adapter->pdev;
  struct e1000_ring *rx_ring = adapter->rx_ring;
  struct e1000_ring *tx_ring = adapter->tx_ring;
  struct e1000_tx_desc *tx_desc, *shadow_tx_desc;
  struct pfring_hooks *hook = (struct pfring_hooks*)netdev->pfring_ptr;
  union e1000_rx_desc_extended *rx_desc, *shadow_rx_desc;
  struct e1000_buffer *buffer_info;
  u16 cache_line_size;
  struct sk_buff *skb;
  unsigned int i;
  int cleaned_count = rx_ring->count; /* Allocate all slots in one shot */
  unsigned int bufsz = adapter->rx_buffer_len;
  mem_ring_info         rx_info = {0}; 
  mem_ring_info         tx_info = {0};
  int                   num_slots_per_page;

  /* Buffers are allocated all in one shot so we'll pass here once */
#if 0
  printk("[DNA] e1000_alloc_rx_buffers(cleaned_count=%d)[%s][slot len=%u/%u]\n",
	 cleaned_count, adapter->netdev->name,
	 bufsz, adapter->max_hw_frame_size);
#endif

  if(hook && (hook->magic == PF_RING)) {
    if(adapter->dna.rx_packet_memory[0] == 0) {
      pci_read_config_word(adapter->pdev,
			   0x0C /* Conf. Space Cache Line Size offset */,
			   &cache_line_size);
      cache_line_size *= 2; /* word (2-byte) to bytes */

      if(cache_line_size == 0) cache_line_size = 64;

      if (0)
	printk("[DNA] Cache line size is %u bytes\n", cache_line_size);

      adapter->dna.packet_slot_len  = ALIGN(bufsz, cache_line_size);
      adapter->dna.packet_num_slots = cleaned_count;

      adapter->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER;

      num_slots_per_page = adapter->dna.tot_packet_memory / adapter->dna.packet_slot_len;

      adapter->dna.num_memory_pages = (adapter->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

      if(0)
	printk("[DNA] Allocating memory [%u slots][%u memory pages][tot_packet_memory %u bytes]\n",
	       adapter->dna.packet_num_slots, adapter->dna.num_memory_pages, adapter->dna.tot_packet_memory);

      for(i=0; i<adapter->dna.num_memory_pages; i++) {
	adapter->dna.rx_packet_memory[i] =
	  alloc_contiguous_memory(&adapter->dna.tot_packet_memory, &adapter->dna.mem_order);

	if(adapter->dna.rx_packet_memory[i] != 0) {
	  if(0)
	    printk("[DNA]  Successfully allocated %lu bytes at "
		   "0x%08lx [slot_len=%d]\n",
		   (unsigned long) adapter->dna.tot_packet_memory,
		   (unsigned long) adapter->dna.rx_packet_memory[i],
		   adapter->dna.packet_slot_len);
	} else {
	  printk("[DNA]  ERROR: not enough memory for DMA ring\n");
	  return;
	}
      }

      for(i=0; i<cleaned_count; i++) {
	u_int page_index, offset;

	page_index = i / num_slots_per_page;
	offset = (i % num_slots_per_page) * adapter->dna.packet_slot_len;
	skb = (struct sk_buff *)(adapter->dna.rx_packet_memory[page_index] + offset);

	if(0)
	  printk("[DNA] Allocating slot %d of %d [addr=%p][page_index=%u][offset=%u]\n",
		 i, adapter->dna.packet_num_slots, skb, page_index, offset);

	buffer_info = &rx_ring->buffer_info[i];
	buffer_info->skb = skb;
	buffer_info->length = adapter->rx_buffer_len;
	buffer_info->dma = pci_map_single(pdev, skb,
					  buffer_info->length,
					  PCI_DMA_FROMDEVICE);

#if 0
	printk("[DNA] Mapping buffer %d [ptr=%p][len=%d]\n",
	       i, (void*)buffer_info->dma, adapter->dna.packet_slot_len);
#endif

	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
	rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);

	shadow_rx_desc = E1000_RX_DESC_EXT(*rx_ring, i + rx_ring->count);
	memcpy(shadow_rx_desc, rx_desc, sizeof(union e1000_rx_desc_extended));
      }

      wmb();

      /* The statement below syncs the value of tail (next to read) to 
       * count-1 instead of 0 for zero-copy (one slot back) */
      E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count-1);

      e1000_irq_disable(adapter);
      //e1000_irq_enable(adapter);

      /* TX */

      if(adapter->dna.tx_packet_memory[0] == 0) {

	for(i=0; i<adapter->dna.num_memory_pages; i++) {
	  adapter->dna.tx_packet_memory[i] =
	    alloc_contiguous_memory(&adapter->dna.tot_packet_memory, &adapter->dna.mem_order);

	  if(adapter->dna.tx_packet_memory[i] != 0) {
	    if(0)
	      printk("[DNA] [TX] Successfully allocated %lu bytes at "
		     "0x%08lx [slot_len=%d]\n",
		     (unsigned long) adapter->dna.tot_packet_memory,
		     (unsigned long) adapter->dna.rx_packet_memory[i],
		     adapter->dna.packet_slot_len);
	  } else {
	    printk("[DNA]  ERROR: not enough memory for DMA ring\n");
	    return;
	  }
	}

	for(i=0; i<cleaned_count; i++) {
	  u_int page_index, offset;

	  page_index = i / num_slots_per_page;
	  offset = (i % num_slots_per_page) * adapter->dna.packet_slot_len;
	  skb = (struct sk_buff *)(adapter->dna.tx_packet_memory[page_index] + offset);

	  if(0)
	    printk("[DNA] [TX] Allocating slot %d of %d [addr=%p][page_index=%u][offset=%u]\n",
		   i, adapter->dna.packet_num_slots, skb, page_index, offset);

	  buffer_info = &tx_ring->buffer_info[i];
	  buffer_info->skb = skb;
	  buffer_info->length = adapter->rx_buffer_len;
	  buffer_info->dma = pci_map_single(pdev, skb,
					    buffer_info->length,
					    PCI_DMA_TODEVICE);

#if 0
	  printk("[DNA] Mapping buffer %d [ptr=%p][len=%d]\n",
		 i, (void*)buffer_info->dma, adapter->dna.packet_slot_len);
#endif

	  tx_desc = E1000_TX_DESC(*tx_ring, i);
	  tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);

	  /* Note that shadows are useless for e1000e with standard DNA, but used by libzero */
	  shadow_tx_desc = E1000_TX_DESC(*tx_ring, i + tx_ring->count); 
	  memcpy(shadow_tx_desc, tx_desc, sizeof(struct e1000_tx_desc));
	}
      }

      rx_info.packet_memory_num_chunks    = adapter->dna.num_memory_pages;
      rx_info.packet_memory_chunk_len     = adapter->dna.tot_packet_memory;
      rx_info.packet_memory_num_slots     = adapter->dna.packet_num_slots;
      rx_info.packet_memory_slot_len      = adapter->dna.packet_slot_len;
      rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size;
  
      tx_info.packet_memory_num_chunks    = adapter->dna.num_memory_pages;
      tx_info.packet_memory_chunk_len     = adapter->dna.tot_packet_memory;
      tx_info.packet_memory_num_slots     = adapter->dna.packet_num_slots;
      tx_info.packet_memory_slot_len      = adapter->dna.packet_slot_len;
      tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size;

      /* Register with PF_RING */
      hook->ring_dna_device_handler(add_device_mapping,
				    dna_v1,
  				    &rx_info,
				    &tx_info,
				    adapter->dna.rx_packet_memory,
				    rx_ring->desc,
				    adapter->dna.tx_packet_memory,
				    tx_ring->desc, /* Packet descriptors */
				    (void*)netdev->mem_start,
				    netdev->mem_end-netdev->mem_start,
				    0, /* Channel Id */
				    netdev,
				    &pdev->dev,
				    intel_e1000e,
				    adapter->netdev->dev_addr,
				    &adapter->dna.packet_waitqueue,
				    &adapter->dna.interrupt_received,
				    (void*)adapter,
				    wait_packet_function_ptr,
				    notify_function_ptr);

      if(1) printk("[DNA] Enabled DNA on %s (rx len=%u, tx len=%u)\n", 
                   adapter->netdev->name, rx_ring->size, tx_ring->size);
    } else {
      printk("WARNING e1000_alloc_rx_buffers(cleaned_count=%d)"
	     "[%s][%lu] already allocated\n",
	     cleaned_count, adapter->netdev->name,
	     adapter->dna.rx_packet_memory[0]);

    }
  }
}
Ejemplo n.º 14
0
/*
 * mac_dump - dump important mac registers
 */
void
mac_dump(void *instance)
{
    struct e1000g *Adapter = (struct e1000g *)instance;
    struct e1000_hw *hw = &Adapter->shared;
    int i;

    /* {name, offset} for each mac register */
    Regi_t macreg[NUM_REGS] = {
        {"CTRL",	E1000_CTRL},	{"STATUS",	E1000_STATUS},
        {"EECD",	E1000_EECD},	{"EERD",	E1000_EERD},
        {"CTRL_EXT", E1000_CTRL_EXT}, {"FLA",	E1000_FLA},
        {"MDIC",	E1000_MDIC},	{"SCTL",	E1000_SCTL},
        {"FCAL",	E1000_FCAL},	{"FCAH",	E1000_FCAH},
        {"FCT",	E1000_FCT},	{"VET",		E1000_VET},
        {"ICR",	E1000_ICR},	{"ITR",		E1000_ITR},
        {"ICS",	E1000_ICS},	{"IMS",		E1000_IMS},
        {"IMC",	E1000_IMC},	{"IAM",		E1000_IAM},
        {"RCTL",	E1000_RCTL},	{"FCTTV",	E1000_FCTTV},
        {"TXCW",	E1000_TXCW},	{"RXCW",	E1000_RXCW},
        {"TCTL",	E1000_TCTL},	{"TIPG",	E1000_TIPG},
        {"AIT",	E1000_AIT},	{"LEDCTL",	E1000_LEDCTL},
        {"PBA",	E1000_PBA},	{"PBS",		E1000_PBS},
        {"EEMNGCTL", E1000_EEMNGCTL}, {"ERT",	E1000_ERT},
        {"FCRTL",	E1000_FCRTL},	{"FCRTH",	E1000_FCRTH},
        {"PSRCTL",	E1000_PSRCTL},	{"RDBAL(0)",	E1000_RDBAL(0)},
        {"RDBAH(0)", E1000_RDBAH(0)}, {"RDLEN(0)",	E1000_RDLEN(0)},
        {"RDH(0)",	E1000_RDH(0)},	{"RDT(0)",	E1000_RDT(0)},
        {"RDTR",	E1000_RDTR},	{"RXDCTL(0)",	E1000_RXDCTL(0)},
        {"RADV",	E1000_RADV},	{"RDBAL(1)",	E1000_RDBAL(1)},
        {"RDBAH(1)", E1000_RDBAH(1)}, {"RDLEN(1)",	E1000_RDLEN(1)},
        {"RDH(1)",	E1000_RDH(1)},	{"RDT(1)",	E1000_RDT(1)},
        {"RXDCTL(1)", E1000_RXDCTL(1)}, {"RSRPD",	E1000_RSRPD},
        {"RAID",	E1000_RAID},	{"CPUVEC",	E1000_CPUVEC},
        {"TDFH",	E1000_TDFH},	{"TDFT",	E1000_TDFT},
        {"TDFHS",	E1000_TDFHS},	{"TDFTS",	E1000_TDFTS},
        {"TDFPC",	E1000_TDFPC},	{"TDBAL(0)",	E1000_TDBAL(0)},
        {"TDBAH(0)", E1000_TDBAH(0)}, {"TDLEN(0)",	E1000_TDLEN(0)},
        {"TDH(0)",	E1000_TDH(0)},	{"TDT(0)",	E1000_TDT(0)},
        {"TIDV",	E1000_TIDV},	{"TXDCTL(0)",	E1000_TXDCTL(0)},
        {"TADV",	E1000_TADV},	{"TARC(0)",	E1000_TARC(0)},
        {"TDBAL(1)", E1000_TDBAL(1)}, {"TDBAH(1)",	E1000_TDBAH(1)},
        {"TDLEN(1)", E1000_TDLEN(1)}, {"TDH(1)",	E1000_TDH(1)},
        {"TDT(1)",	E1000_TDT(1)},	{"TXDCTL(1)",	E1000_TXDCTL(1)},
        {"TARC(1)",	E1000_TARC(1)},	{"ALGNERRC",	E1000_ALGNERRC},
        {"RXERRC",	E1000_RXERRC},	{"MPC",		E1000_MPC},
        {"SCC",	E1000_SCC},	{"ECOL",	E1000_ECOL},
        {"MCC",	E1000_MCC},	{"LATECOL",	E1000_LATECOL},
        {"COLC",	E1000_COLC},	{"DC",		E1000_DC},
        {"TNCRS",	E1000_TNCRS},	{"SEC",		E1000_SEC},
        {"CEXTERR",	E1000_CEXTERR},	{"RLEC",	E1000_RLEC},
        {"XONRXC",	E1000_XONRXC},	{"XONTXC",	E1000_XONTXC},
        {"XOFFRXC",	E1000_XOFFRXC},	{"XOFFTXC",	E1000_XOFFTXC},
        {"FCRUC",	E1000_FCRUC},	{"PRC64",	E1000_PRC64},
        {"PRC127",	E1000_PRC127},	{"PRC255",	E1000_PRC255},
        {"PRC511",	E1000_PRC511},	{"PRC1023",	E1000_PRC1023},
        {"PRC1522",	E1000_PRC1522},	{"GPRC",	E1000_GPRC},
        {"BPRC",	E1000_BPRC},	{"MPRC",	E1000_MPRC},
        {"GPTC",	E1000_GPTC},	{"GORCL",	E1000_GORCL},
        {"GORCH",	E1000_GORCH},	{"GOTCL",	E1000_GOTCL},
        {"GOTCH",	E1000_GOTCH},	{"RNBC",	E1000_RNBC},
        {"RUC",	E1000_RUC},	{"RFC",		E1000_RFC},
        {"ROC",	E1000_ROC},	{"RJC",		E1000_RJC},
        {"MGTPRC",	E1000_MGTPRC},	{"MGTPDC",	E1000_MGTPDC},
        {"MGTPTC",	E1000_MGTPTC},	{"TORL",	E1000_TORL},
        {"TORH",	E1000_TORH},	{"TOTL",	E1000_TOTL},
        {"TOTH",	E1000_TOTH},	{"TPR",		E1000_TPR},
        {"TPT",	E1000_TPT},	{"PTC64",	E1000_PTC64},
        {"PTC127",	E1000_PTC127},	{"PTC255",	E1000_PTC255},
        {"PTC511",	E1000_PTC511},	{"PTC1023",	E1000_PTC1023},
        {"PTC1522",	E1000_PTC1522},	{"MPTC",	E1000_MPTC},
        {"BPTC",	E1000_BPTC},	{"TSCTC",	E1000_TSCTC},
        {"TSCTFC",	E1000_TSCTFC},	{"IAC",		E1000_IAC},
        {"ICRXPTC",	E1000_ICRXPTC},	{"ICRXATC",	E1000_ICRXATC},
        {"ICTXPTC",	E1000_ICTXPTC},	{"ICTXATC",	E1000_ICTXATC},
        {"ICTXQEC",	E1000_ICTXQEC},	{"ICTXQMTC",	E1000_ICTXQMTC},
        {"ICRXDMTC", E1000_ICRXDMTC}, {"ICRXOC",	E1000_ICRXOC},
        {"RXCSUM",	E1000_RXCSUM},	{"RFCTL",	E1000_RFCTL},
        {"WUC",	E1000_WUC},	{"WUFC",	E1000_WUFC},
        {"WUS",	E1000_WUS},	{"MRQC",	E1000_MRQC},
        {"MANC",	E1000_MANC},	{"IPAV",	E1000_IPAV},
        {"MANC2H",	E1000_MANC2H},	{"RSSIM",	E1000_RSSIM},
        {"RSSIR",	E1000_RSSIR},	{"WUPL",	E1000_WUPL},
        {"GCR",	E1000_GCR},	{"GSCL_1",	E1000_GSCL_1},
        {"GSCL_2",	E1000_GSCL_2},	{"GSCL_3",	E1000_GSCL_3},
        {"GSCL_4",	E1000_GSCL_4},	{"FACTPS",	E1000_FACTPS},
        {"FWSM",	E1000_FWSM},
    };

    e1000g_log(Adapter, CE_CONT, "Begin MAC dump\n");

    for (i = 0; i < NUM_REGS; i++) {
        e1000g_log(Adapter, CE_CONT,
                   "macreg %10s offset: 0x%x   value: 0x%x\n",
                   macreg[i].name, macreg[i].offset,
                   e1000_read_reg(hw, macreg[i].offset));
    }
}
Ejemplo n.º 15
0
/*------------------------------------------------------------------------
 * _82545EM_configure_rx - Configure Receive Unit after Reset
 *------------------------------------------------------------------------
 */
local void _82545EM_configure_rx(
	struct 	ether *ethptr
	)
{
	uint32 rctl, rxcsum;

	rctl = e1000_io_readl(ethptr->iobase, E1000_RCTL);

	/* Disable receiver while configuring. */

	e1000_io_writel(ethptr->iobase, E1000_RCTL, rctl & ~E1000_RCTL_EN);

	/* Enable receiver, accept broadcast packets, no loopback, and 	*/
	/* 	free buffer threshold is set to 1/2 RDLEN. 		*/

	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
	rctl |= E1000_RCTL_EN 	  | 
		E1000_RCTL_BAM 	  |
		E1000_RCTL_LBM_NO |
		E1000_RCTL_RDMTS_HALF;

	/* Do not store bad packets, do not pass MAC control frame, 	*/
	/* 	disable long packet receive and CRC strip 		*/
	
	rctl &= ~(E1000_RCTL_SBP   |
		  E1000_RCTL_LPE   |
		  E1000_RCTL_SECRC |
		  E1000_RCTL_PMCF);
	
	/* Setup buffer sizes */

	rctl &= ~(E1000_RCTL_BSEX |
		  E1000_RCTL_SZ_4096);
	rctl |= E1000_RCTL_SZ_2048;

	/* Set the Receive Delay Timer Register, let driver be notified */
	/* 	immediately each time a new packet has been stored in 	*/
	/* 	memory 							*/

	e1000_io_writel(ethptr->iobase, E1000_RDTR, E1000_RDTR_DEFAULT);
	e1000_io_writel(ethptr->iobase, E1000_RADV, E1000_RADV_DEFAULT);

	/* IRQ moderation */

	e1000_io_writel(ethptr->iobase, E1000_ITR, 
			1000000000 / (E1000_ITR_DEFAULT * 256));

	/* Setup the HW Rx Head and Tail Descriptor Pointers, the Base 	*/
	/* 	and Length of the Rx Descriptor Ring 			*/

	e1000_io_writel(ethptr->iobase, E1000_RDBAL(0), 
			(uint32)ethptr->rxRing);
	e1000_io_writel(ethptr->iobase, E1000_RDBAH(0), 0);
	e1000_io_writel(ethptr->iobase, E1000_RDLEN(0), 
			E1000_RDSIZE * ethptr->rxRingSize);
	e1000_io_writel(ethptr->iobase, E1000_RDH(0), 0);
	e1000_io_writel(ethptr->iobase, E1000_RDT(0), 
			ethptr->rxRingSize - E1000_RING_BOUNDARY);

	/* Disable Receive Checksum Offload for IPv4, TCP and UDP. */

	rxcsum = e1000_io_readl(ethptr->iobase, E1000_RXCSUM);
	rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL);
	e1000_io_writel(ethptr->iobase, E1000_RXCSUM, rxcsum);

	/* Enable receiver. */

	e1000_io_writel(ethptr->iobase, E1000_RCTL, rctl);
}