Beispiel #1
0
static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
{
	u32 ntc = rx_ring->next_to_clean + 1;

	ntc = (ntc < rx_ring->count) ? ntc : 0;
	rx_ring->next_to_clean = ntc;
	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
}
Beispiel #2
0
static int dna_ixgbe_rx_dump(struct ixgbe_ring *rx_ring) {
  int j, found=0;

  for(j=0; j<rx_ring->count; j++) {
    union ixgbe_adv_rx_desc *rx_desc = IXGBE_RX_DESC(rx_ring, j);

    if(rx_desc->wb.upper.status_error) {
      printk("[%d][status=%u]\n", j, rx_desc->wb.upper.status_error);
      // for(i=0; i<16; i++) printf("%02X ", ptr[i+offset] & 0xFF);
      found++;
    }
  }

  return(found);
}
Beispiel #3
0
static bool dna_ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
				  struct ixgbe_ring *rx_ring, int budget) {
  union ixgbe_adv_rx_desc	*rx_desc, *shadow_rx_desc;
  u32				staterr;
  u16				i, num_laps = 0, last_cleaned_idx;
  struct ixgbe_adapter	        *adapter = q_vector->adapter;
  struct ixgbe_hw		*hw = &adapter->hw;
  unsigned int total_rx_packets = 0;

  last_cleaned_idx  = i = IXGBE_READ_REG(hw, IXGBE_RDT(rx_ring->reg_idx));
  if(++i == rx_ring->count)
    i = 0;

  rx_ring->next_to_clean = i;

  //i = IXGBE_READ_REG(hw, IXGBE_RDT(rx_ring->reg_idx));
  rx_desc = IXGBE_RX_DESC(rx_ring, i);
  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

  if(rx_ring->dna.queue_in_use) {
    /*
      A userland application is using the queue so it's not time to
      mess up with indexes but just to wakeup apps (if waiting)
    */

    if(staterr & IXGBE_RXD_STAT_DD) {
      if(unlikely(enable_debug))
	printk(KERN_INFO "DNA: got a packet [index=%d]!\n", i);

      if(waitqueue_active(&rx_ring->dna.rx_tx.rx.packet_waitqueue)) {
	wake_up_interruptible(&rx_ring->dna.rx_tx.rx.packet_waitqueue);
	rx_ring->dna.rx_tx.rx.interrupt_received = 1;

	if(unlikely(enable_debug))
	  printk("%s(%s): woken up ring=%d, [slot=%d] XXX\n",
		 __FUNCTION__, rx_ring->netdev->name,
		 rx_ring->reg_idx, i);
      }
    }

    // goto dump_stats;
    return(!!budget);
  }

  /* Only 82598 needs kernel housekeeping (82599 does not need that thanks
     to the drop bit), as the drop flag does not seem to work
  */
  if(adapter->hw.mac.type != ixgbe_mac_82598EB)
    return(!!budget);

  if( /* staterr || */ enable_debug) {
    if(strcmp(rx_ring->netdev->name, "eth7") == 0)
      printk("[DNA] %s(): %s@%d [used=%d][idx=%d][next_to_use=%u][#unused=%d][staterr=%d][full=%d][pkt_ptr=%llu]\n", __FUNCTION__,
	     rx_ring->netdev->name, rx_ring->queue_index,
	     rx_ring->dna.queue_in_use, i, rx_ring->next_to_use,
	     ixgbe_desc_unused(rx_ring), staterr, dna_ixgbe_rx_dump(rx_ring), rx_desc->read.pkt_addr);
  }

  /*
    This RX queue is not in use

    IMPORTANT
    We need to poll queues not in use as otherwise they will stop the operations
    also on queues where there is an application running that consumes the packets
  */
  while(staterr & IXGBE_RXD_STAT_DD) {
    shadow_rx_desc = IXGBE_RX_DESC(rx_ring, i+rx_ring->count);
    rx_desc->wb.upper.status_error = 0, last_cleaned_idx = i;
    rx_desc->read.hdr_addr = shadow_rx_desc->read.hdr_addr, rx_desc->read.pkt_addr = shadow_rx_desc->read.pkt_addr;

    rmb();

    // REMOVE BELOW
    // ixgbe_release_rx_desc(rx_ring, i); /* Not needed */

    i++, num_laps++, budget--;
    if(i == rx_ring->count)
      i = 0;

    rx_desc = IXGBE_RX_DESC(rx_ring, i);
    prefetch(rx_desc);
    staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

    if(budget == 0) break;
  }

  rx_ring->stats.packets += total_rx_packets;
  // rx_ring->stats.bytes += total_rx_bytes;
  q_vector->rx.total_packets += total_rx_packets;
  // q_vector->rx.total_bytes += total_rx_bytes;

  /* Update register */
  rx_ring->next_to_clean = i, IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rx_ring->reg_idx), last_cleaned_idx);

  if(unlikely(enable_debug)) {
    int j=0, full = 0, other = 0, null_dma = 0;
    struct ixgbe_rx_buffer *bi;

    for(j=0; j<rx_ring->count; j++) {
      rx_desc = IXGBE_RX_DESC(rx_ring, j);
      prefetch(rx_desc);
      staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

      bi = &rx_ring->rx_buffer_info[i];

      if(staterr & IXGBE_RXD_STAT_DD)
	full++;
      else if(staterr)
	other++;

      if(bi->dma == 0) null_dma++;
    }

    printk("[DNA] %s(): %s@%d [laps=%d][budget=%d][full=%d/other=%d][next_to_clean=%u][next_to_use=%d][#unused=%d][null_dma=%d]\n",
	   __FUNCTION__,
	   rx_ring->netdev->name, rx_ring->queue_index,
	   num_laps, budget, full, other,
	   rx_ring->next_to_clean, rx_ring->next_to_use,
	   ixgbe_desc_unused(rx_ring), null_dma);
  }

  return(!!budget);
}
Beispiel #4
0
void dna_ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring) {
  union ixgbe_adv_rx_desc *rx_desc, *shadow_rx_desc;
  struct ixgbe_rx_buffer *bi;
  u16 i;
  struct ixgbe_adapter 	*adapter = netdev_priv(rx_ring->netdev);
  struct ixgbe_hw       *hw = &adapter->hw;
  u16	                cache_line_size;
  struct ixgbe_ring     *tx_ring = adapter->tx_ring[rx_ring->queue_index];
  struct pfring_hooks   *hook = (struct pfring_hooks*)rx_ring->netdev->pfring_ptr;
  mem_ring_info         rx_info = {0};
  mem_ring_info         tx_info = {0};
  int                   num_slots_per_page;

  /* Check if the memory has been already allocated */
  if(rx_ring->dna.memory_allocated) return;

  /* nothing to do or no valid netdev defined */
  if (!netdev_ring(rx_ring))
    return;

  if (!hook) {
    printk("[DNA] WARNING The PF_RING module is NOT loaded.\n");
    printk("[DNA] WARNING Please load it, before loading this module\n");
    return;
  }

  init_waitqueue_head(&rx_ring->dna.rx_tx.rx.packet_waitqueue);

  cache_line_size = cpu_to_le16(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CACHE_LINE_SIZE));
  cache_line_size &= 0x00FF;
  cache_line_size *= PCI_DEVICE_CACHE_LINE_SIZE_BYTES;
  if(cache_line_size == 0) cache_line_size = 64;

  if(unlikely(enable_debug))
    printk("%s(): pci cache line size %d\n",__FUNCTION__, cache_line_size);

  rx_ring->dna.packet_slot_len  = ALIGN(rx_ring->rx_buf_len, cache_line_size);
  rx_ring->dna.packet_num_slots = rx_ring->count;

  rx_ring->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER;

  num_slots_per_page = rx_ring->dna.tot_packet_memory / rx_ring->dna.packet_slot_len;

  rx_ring->dna.num_memory_pages = (rx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  /* Packet Split disabled in DNA mode */
  //if (ring_is_ps_enabled(rx_ring)) {
    /* data will be put in this buffer */
    /* Original fuction allocate PAGE_SIZE/2 for this buffer*/
  //  rx_ring->dna.packet_slot_len  += PAGE_SIZE/2;
  //}

  if(unlikely(enable_debug))
    printk("%s(): RX dna.packet_slot_len=%d tot_packet_memory=%d num_memory_pages=%u num_slots_per_page=%d\n",
	   __FUNCTION__, 
	   rx_ring->dna.packet_slot_len,
	   rx_ring->dna.tot_packet_memory,
	   rx_ring->dna.num_memory_pages,
	   num_slots_per_page);

  for(i=0; i<rx_ring->dna.num_memory_pages; i++) {
    rx_ring->dna.rx_tx.rx.packet_memory[i] =
      alloc_contiguous_memory(&rx_ring->dna.tot_packet_memory, 
      			      &rx_ring->dna.mem_order, 
      			      rx_ring->q_vector->numa_node);

    if (rx_ring->dna.rx_tx.rx.packet_memory[i] == 0) {
      printk("\n\n%s() ERROR: not enough memory for RX DMA ring!!\n\n\n",
	     __FUNCTION__);
      return;
    }

    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated RX %u@%u bytes at 0x%08lx [slot_len=%d]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i], rx_ring->dna.packet_slot_len);
  }

  if(unlikely(enable_debug))
    printk("[DNA] %s(): %s@%d ptr=%p memory allocated on node %d\n", __FUNCTION__, 
      rx_ring->netdev->name, rx_ring->queue_index, rx_ring, rx_ring->q_vector->numa_node);

  for(i=0; i < rx_ring->count; i++) {
    u_int offset, page_index;
    char *pkt;

    page_index = i / num_slots_per_page;
    offset = (i % num_slots_per_page) * rx_ring->dna.packet_slot_len;
    pkt = (char *)(rx_ring->dna.rx_tx.rx.packet_memory[page_index] + offset);

    /*
    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully remapped RX %u@%u bytes at 0x%08lx [slot_len=%d][page_index=%u][offset=%u]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i],
	     rx_ring->dna.packet_slot_len, page_index, offset);
    */

    bi      = &rx_ring->rx_buffer_info[i];
    bi->skb = NULL;
    rx_desc = IXGBE_RX_DESC(rx_ring, i);

    if(unlikely(enable_debug))
      printk("%s(): Mapping RX slot %d of %d [pktaddr=%p][rx_desc=%p][offset=%u]\n",
	     __FUNCTION__, i, rx_ring->dna.packet_num_slots,
	     pkt, rx_desc, offset);

    bi->dma = pci_map_single(to_pci_dev(rx_ring->dev), pkt,
			     rx_ring->dna.packet_slot_len,
			     PCI_DMA_BIDIRECTIONAL /* PCI_DMA_FROMDEVICE */ );

    /* Packet Split disabled in DNA mode */
    //if (!ring_is_ps_enabled(rx_ring)) {
      rx_desc->read.hdr_addr = 0;
      rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
    //} else {
    //  rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
    //  rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + rx_ring->dna.packet_slot_len);
    //}

    rx_desc->wb.upper.status_error = 0;

    shadow_rx_desc = IXGBE_RX_DESC(rx_ring, i + rx_ring->count);
    memcpy(shadow_rx_desc, rx_desc, sizeof(union ixgbe_adv_rx_desc));

    if(unlikely(enable_debug)) {
      print_adv_rx_descr(rx_desc);
      print_adv_rx_descr(shadow_rx_desc);
    }

    ixgbe_release_rx_desc(rx_ring, i);
  } /* for */

  /* Shadow */
  rx_desc = IXGBE_RX_DESC(rx_ring, 0);

  /* Resetting index
     rx_ring->next_to_use   = the last slot where the next incoming packets can be copied (tail) */
  ixgbe_release_rx_desc(rx_ring, rx_ring->count-1);
  /* rx_ring->next_to_clean = the slot where the next incoming packet will be read (head) */
  rx_ring->next_to_clean = 0;

  /* Register with PF_RING */

  if(unlikely(enable_debug))
    printk("[DNA] next_to_clean=%u/next_to_use=%u [register=%d]\n",
	   rx_ring->next_to_clean, rx_ring->next_to_use, IXGBE_READ_REG(hw, IXGBE_RDT(rx_ring->reg_idx)));

  /* Allocate TX memory */
  tx_ring->dna.tot_packet_memory = rx_ring->dna.tot_packet_memory;
  tx_ring->dna.packet_slot_len   = rx_ring->dna.packet_slot_len;
  tx_ring->dna.packet_num_slots  = tx_ring->count;
  tx_ring->dna.mem_order         = rx_ring->dna.mem_order;
  tx_ring->dna.num_memory_pages  = (tx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  dna_ixgbe_alloc_tx_buffers(tx_ring, hook);

  rx_info.packet_memory_num_chunks    = rx_ring->dna.num_memory_pages;
  rx_info.packet_memory_chunk_len     = rx_ring->dna.tot_packet_memory;
  rx_info.packet_memory_num_slots     = rx_ring->dna.packet_num_slots;
  rx_info.packet_memory_slot_len      = rx_ring->dna.packet_slot_len;
  rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size;
  
  tx_info.packet_memory_num_chunks    = tx_ring->dna.num_memory_pages;
  tx_info.packet_memory_chunk_len     = tx_ring->dna.tot_packet_memory;
  tx_info.packet_memory_num_slots     = tx_ring->dna.packet_num_slots;
  tx_info.packet_memory_slot_len      = tx_ring->dna.packet_slot_len;
  tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size;

  hook->ring_dna_device_handler(add_device_mapping,
				dna_v1,
  				&rx_info,
				&tx_info,
				rx_ring->dna.rx_tx.rx.packet_memory,
				rx_ring->desc, /* Packet descriptors */
				tx_ring->dna.rx_tx.tx.packet_memory,
				tx_ring->desc, /* Packet descriptors */
				(void*)rx_ring->netdev->mem_start,
				rx_ring->netdev->mem_end - rx_ring->netdev->mem_start,
				rx_ring->queue_index, /* Channel Id */
				rx_ring->netdev,
				rx_ring->dev, /* for DMA mapping */
				dna_model(hw),
				rx_ring->netdev->dev_addr,
				&rx_ring->dna.rx_tx.rx.packet_waitqueue,
				&rx_ring->dna.rx_tx.rx.interrupt_received,
				(void*)rx_ring,
				wait_packet_function_ptr,
				notify_function_ptr);

  rx_ring->dna.memory_allocated = 1;

  if(unlikely(enable_debug))
    printk("[DNA] ixgbe: %s: Enabled DNA on queue %d [RX][size=%u][count=%d] [TX][size=%u][count=%d]\n",
	   rx_ring->netdev->name, rx_ring->queue_index, rx_ring->size, rx_ring->count, tx_ring->size, tx_ring->count);
#if 0
  if(adapter->hw.mac.type != ixgbe_mac_82598EB)
    ixgbe_irq_disable_queues(rx_ring->q_vector->adapter, ((u64)1 << rx_ring->queue_index));
#endif
}
Beispiel #5
0
int wait_packet_function_ptr(void *data, int mode)
{
  struct ixgbe_ring		*rx_ring = (struct ixgbe_ring*)data;
  struct ixgbe_adapter	*adapter = netdev_priv(rx_ring->netdev);
  struct ixgbe_hw		*hw = &adapter->hw;
  struct ixgbe_q_vector	*q_vector = rx_ring->q_vector;

  if(unlikely(enable_debug))
    printk("%s(): enter [mode=%d/%s][queueId=%d][next_to_clean=%u][next_to_use=%d]\n",
	   __FUNCTION__, mode, mode == 1 ? "enable int" : "disable int",
	   rx_ring->queue_index, rx_ring->next_to_clean, rx_ring->next_to_use);

  if(!rx_ring->dna.memory_allocated) return(0);

  if(mode == 1 /* Enable interrupt */) {
    union ixgbe_adv_rx_desc *rx_desc;
    u32	staterr;
    u8	reg_idx = rx_ring->reg_idx;
    u16	i = IXGBE_READ_REG(hw, IXGBE_RDT(reg_idx));

    /* Very important: update the value from the register set from userland
     * Here i is the last I've read (zero-copy implementation) */
    if(++i == rx_ring->count)
      i = 0;
    /* Here i is the next I have to read */

    rx_ring->next_to_clean = i;

    rx_desc = IXGBE_RX_DESC(rx_ring, i);
    prefetch(rx_desc);
    staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

    if(unlikely(enable_debug)) {
      printk("%s(): Check if a packet is arrived [idx=%d][staterr=%d][len=%d]\n",
	     __FUNCTION__, i, staterr, rx_desc->wb.upper.length);

      print_adv_rx_descr(rx_desc);
    }

    if(!(staterr & IXGBE_RXD_STAT_DD)) {
      rx_ring->dna.rx_tx.rx.interrupt_received = 0;

      if(!rx_ring->dna.rx_tx.rx.interrupt_enabled) {
	if(adapter->hw.mac.type != ixgbe_mac_82598EB)
	  ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));

	if(unlikely(enable_debug)) printk("%s(): Enabled interrupts, queue = %d\n", __FUNCTION__, q_vector->v_idx);
	rx_ring->dna.rx_tx.rx.interrupt_enabled = 1;

	if(unlikely(enable_debug))
	  printk("%s(): Packet not arrived yet: enabling "
		 "interrupts, queue=%d, i=%d\n",
		 __FUNCTION__,q_vector->v_idx, i);
      }

      /* Refresh the value */
      staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
    } else {
      rx_ring->dna.rx_tx.rx.interrupt_received = 1;
    }

    if(unlikely(enable_debug))
      printk("%s(): Packet received: %d\n", __FUNCTION__, staterr & IXGBE_RXD_STAT_DD);

    return(staterr & IXGBE_RXD_STAT_DD);
  } else {
    /* Disable interrupts */

    if(adapter->hw.mac.type != ixgbe_mac_82598EB)
      ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));

    rx_ring->dna.rx_tx.rx.interrupt_enabled = 0;

    if(unlikely(enable_debug))
      printk("%s(): Disabled interrupts, queue = %d\n", __FUNCTION__, q_vector->v_idx);

    return(0);
  }
}
Beispiel #6
0
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
			  struct ixgbe_ring *rx_ring,
			  const int budget)
{
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
	struct ixgbe_adapter *adapter = q_vector->adapter;
	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
	unsigned int xdp_res, xdp_xmit = 0;
	bool failure = false;
	struct sk_buff *skb;
	struct xdp_buff xdp;

	xdp.rxq = &rx_ring->xdp_rxq;

	while (likely(total_rx_packets < budget)) {
		union ixgbe_adv_rx_desc *rx_desc;
		struct ixgbe_rx_buffer *bi;
		unsigned int size;

		/* return some buffers to hardware, one at a time is too slow */
		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
			failure = failure ||
				  !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
								 cleaned_count);
			cleaned_count = 0;
		}

		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
		size = le16_to_cpu(rx_desc->wb.upper.length);
		if (!size)
			break;

		/* This memory barrier is needed to keep us from reading
		 * any other fields out of the rx_desc until we know the
		 * descriptor has been written back
		 */
		dma_rmb();

		bi = ixgbe_get_rx_buffer_zc(rx_ring, size);

		if (unlikely(!ixgbe_test_staterr(rx_desc,
						 IXGBE_RXD_STAT_EOP))) {
			struct ixgbe_rx_buffer *next_bi;

			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
			ixgbe_inc_ntc(rx_ring);
			next_bi =
			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
			next_bi->skb = ERR_PTR(-EINVAL);
			continue;
		}

		if (unlikely(bi->skb)) {
			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
			ixgbe_inc_ntc(rx_ring);
			continue;
		}

		xdp.data = bi->addr;
		xdp.data_meta = xdp.data;
		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
		xdp.data_end = xdp.data + size;
		xdp.handle = bi->handle;

		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);

		if (xdp_res) {
			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
				xdp_xmit |= xdp_res;
				bi->addr = NULL;
				bi->skb = NULL;
			} else {
				ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
			}
			total_rx_packets++;
			total_rx_bytes += size;

			cleaned_count++;
			ixgbe_inc_ntc(rx_ring);
			continue;
		}

		/* XDP_PASS path */
		skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
		if (!skb) {
			rx_ring->rx_stats.alloc_rx_buff_failed++;
			break;
		}

		cleaned_count++;
		ixgbe_inc_ntc(rx_ring);

		if (eth_skb_pad(skb))
			continue;

		total_rx_bytes += skb->len;
		total_rx_packets++;

		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
		ixgbe_rx_skb(q_vector, skb);
	}

	if (xdp_xmit & IXGBE_XDP_REDIR)
		xdp_do_flush_map();

	if (xdp_xmit & IXGBE_XDP_TX) {
		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];

		/* Force memory writes to complete before letting h/w
		 * know there are new descriptors to fetch.
		 */
		wmb();
		writel(ring->next_to_use, ring->tail);
	}

	u64_stats_update_begin(&rx_ring->syncp);
	rx_ring->stats.packets += total_rx_packets;
	rx_ring->stats.bytes += total_rx_bytes;
	u64_stats_update_end(&rx_ring->syncp);
	q_vector->rx.total_packets += total_rx_packets;
	q_vector->rx.total_bytes += total_rx_bytes;

	return failure ? budget : (int)total_rx_packets;
}
Beispiel #7
0
static __always_inline bool
__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
			    bool alloc(struct ixgbe_ring *rx_ring,
				       struct ixgbe_rx_buffer *bi))
{
	union ixgbe_adv_rx_desc *rx_desc;
	struct ixgbe_rx_buffer *bi;
	u16 i = rx_ring->next_to_use;
	bool ok = true;

	/* nothing to do */
	if (!cleaned_count)
		return true;

	rx_desc = IXGBE_RX_DESC(rx_ring, i);
	bi = &rx_ring->rx_buffer_info[i];
	i -= rx_ring->count;

	do {
		if (!alloc(rx_ring, bi)) {
			ok = false;
			break;
		}

		/* sync the buffer for use by the device */
		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
						 bi->page_offset,
						 rx_ring->rx_buf_len,
						 DMA_BIDIRECTIONAL);

		/* Refresh the desc even if buffer_addrs didn't change
		 * because each write-back erases this info.
		 */
		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);

		rx_desc++;
		bi++;
		i++;
		if (unlikely(!i)) {
			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
			bi = rx_ring->rx_buffer_info;
			i -= rx_ring->count;
		}

		/* clear the length for the next_to_use descriptor */
		rx_desc->wb.upper.length = 0;

		cleaned_count--;
	} while (cleaned_count);

	i += rx_ring->count;

	if (rx_ring->next_to_use != i) {
		rx_ring->next_to_use = i;

		/* update next to alloc since we have filled the ring */
		rx_ring->next_to_alloc = i;

		/* Force memory writes to complete before letting h/w
		 * know there are new descriptors to fetch.  (Only
		 * applicable for weak-ordered memory model archs,
		 * such as IA-64).
		 */
		wmb();
		writel(i, rx_ring->tail);
	}

	return ok;
}