Exemple #1
0
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
{
	union ixgbe_adv_tx_desc *tx_desc = NULL;
	struct ixgbe_tx_buffer *tx_bi;
	bool work_done = true;
	u32 len, cmd_type;
	dma_addr_t dma;

	while (budget-- > 0) {
		if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
		    !netif_carrier_ok(xdp_ring->netdev)) {
			work_done = false;
			break;
		}

		if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
			break;

		dma_sync_single_for_device(xdp_ring->dev, dma, len,
					   DMA_BIDIRECTIONAL);

		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
		tx_bi->bytecount = len;
		tx_bi->xdpf = NULL;

		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
		tx_desc->read.buffer_addr = cpu_to_le64(dma);

		/* put descriptor type bits */
		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
			   IXGBE_ADVTXD_DCMD_DEXT |
			   IXGBE_ADVTXD_DCMD_IFCS;
		cmd_type |= len | IXGBE_TXD_CMD;
		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
		tx_desc->read.olinfo_status =
			cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);

		xdp_ring->next_to_use++;
		if (xdp_ring->next_to_use == xdp_ring->count)
			xdp_ring->next_to_use = 0;
	}

	if (tx_desc) {
		ixgbe_xdp_ring_update_tail(xdp_ring);
		xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
	}

	return !!budget && work_done;
}
Exemple #2
0
static bool dna_ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
				  struct ixgbe_ring *rx_ring, int budget) {
  union ixgbe_adv_rx_desc	*rx_desc, *shadow_rx_desc;
  u32				staterr;
  u16				i, num_laps = 0, last_cleaned_idx;
  struct ixgbe_adapter	        *adapter = q_vector->adapter;
  struct ixgbe_hw		*hw = &adapter->hw;
  unsigned int total_rx_packets = 0;

  last_cleaned_idx  = i = IXGBE_READ_REG(hw, IXGBE_RDT(rx_ring->reg_idx));
  if(++i == rx_ring->count)
    i = 0;

  rx_ring->next_to_clean = i;

  //i = IXGBE_READ_REG(hw, IXGBE_RDT(rx_ring->reg_idx));
  rx_desc = IXGBE_RX_DESC(rx_ring, i);
  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

  if(rx_ring->dna.queue_in_use) {
    /*
      A userland application is using the queue so it's not time to
      mess up with indexes but just to wakeup apps (if waiting)
    */

    if(staterr & IXGBE_RXD_STAT_DD) {
      if(unlikely(enable_debug))
	printk(KERN_INFO "DNA: got a packet [index=%d]!\n", i);

      if(waitqueue_active(&rx_ring->dna.rx_tx.rx.packet_waitqueue)) {
	wake_up_interruptible(&rx_ring->dna.rx_tx.rx.packet_waitqueue);
	rx_ring->dna.rx_tx.rx.interrupt_received = 1;

	if(unlikely(enable_debug))
	  printk("%s(%s): woken up ring=%d, [slot=%d] XXX\n",
		 __FUNCTION__, rx_ring->netdev->name,
		 rx_ring->reg_idx, i);
      }
    }

    // goto dump_stats;
    return(!!budget);
  }

  /* Only 82598 needs kernel housekeeping (82599 does not need that thanks
     to the drop bit), as the drop flag does not seem to work
  */
  if(adapter->hw.mac.type != ixgbe_mac_82598EB)
    return(!!budget);

  if( /* staterr || */ enable_debug) {
    if(strcmp(rx_ring->netdev->name, "eth7") == 0)
      printk("[DNA] %s(): %s@%d [used=%d][idx=%d][next_to_use=%u][#unused=%d][staterr=%d][full=%d][pkt_ptr=%llu]\n", __FUNCTION__,
	     rx_ring->netdev->name, rx_ring->queue_index,
	     rx_ring->dna.queue_in_use, i, rx_ring->next_to_use,
	     ixgbe_desc_unused(rx_ring), staterr, dna_ixgbe_rx_dump(rx_ring), rx_desc->read.pkt_addr);
  }

  /*
    This RX queue is not in use

    IMPORTANT
    We need to poll queues not in use as otherwise they will stop the operations
    also on queues where there is an application running that consumes the packets
  */
  while(staterr & IXGBE_RXD_STAT_DD) {
    shadow_rx_desc = IXGBE_RX_DESC(rx_ring, i+rx_ring->count);
    rx_desc->wb.upper.status_error = 0, last_cleaned_idx = i;
    rx_desc->read.hdr_addr = shadow_rx_desc->read.hdr_addr, rx_desc->read.pkt_addr = shadow_rx_desc->read.pkt_addr;

    rmb();

    // REMOVE BELOW
    // ixgbe_release_rx_desc(rx_ring, i); /* Not needed */

    i++, num_laps++, budget--;
    if(i == rx_ring->count)
      i = 0;

    rx_desc = IXGBE_RX_DESC(rx_ring, i);
    prefetch(rx_desc);
    staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

    if(budget == 0) break;
  }

  rx_ring->stats.packets += total_rx_packets;
  // rx_ring->stats.bytes += total_rx_bytes;
  q_vector->rx.total_packets += total_rx_packets;
  // q_vector->rx.total_bytes += total_rx_bytes;

  /* Update register */
  rx_ring->next_to_clean = i, IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rx_ring->reg_idx), last_cleaned_idx);

  if(unlikely(enable_debug)) {
    int j=0, full = 0, other = 0, null_dma = 0;
    struct ixgbe_rx_buffer *bi;

    for(j=0; j<rx_ring->count; j++) {
      rx_desc = IXGBE_RX_DESC(rx_ring, j);
      prefetch(rx_desc);
      staterr = le32_to_cpu(rx_desc->wb.upper.status_error);

      bi = &rx_ring->rx_buffer_info[i];

      if(staterr & IXGBE_RXD_STAT_DD)
	full++;
      else if(staterr)
	other++;

      if(bi->dma == 0) null_dma++;
    }

    printk("[DNA] %s(): %s@%d [laps=%d][budget=%d][full=%d/other=%d][next_to_clean=%u][next_to_use=%d][#unused=%d][null_dma=%d]\n",
	   __FUNCTION__,
	   rx_ring->netdev->name, rx_ring->queue_index,
	   num_laps, budget, full, other,
	   rx_ring->next_to_clean, rx_ring->next_to_use,
	   ixgbe_desc_unused(rx_ring), null_dma);
  }

  return(!!budget);
}
Exemple #3
0
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
			  struct ixgbe_ring *rx_ring,
			  const int budget)
{
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
	struct ixgbe_adapter *adapter = q_vector->adapter;
	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
	unsigned int xdp_res, xdp_xmit = 0;
	bool failure = false;
	struct sk_buff *skb;
	struct xdp_buff xdp;

	xdp.rxq = &rx_ring->xdp_rxq;

	while (likely(total_rx_packets < budget)) {
		union ixgbe_adv_rx_desc *rx_desc;
		struct ixgbe_rx_buffer *bi;
		unsigned int size;

		/* return some buffers to hardware, one at a time is too slow */
		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
			failure = failure ||
				  !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
								 cleaned_count);
			cleaned_count = 0;
		}

		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
		size = le16_to_cpu(rx_desc->wb.upper.length);
		if (!size)
			break;

		/* This memory barrier is needed to keep us from reading
		 * any other fields out of the rx_desc until we know the
		 * descriptor has been written back
		 */
		dma_rmb();

		bi = ixgbe_get_rx_buffer_zc(rx_ring, size);

		if (unlikely(!ixgbe_test_staterr(rx_desc,
						 IXGBE_RXD_STAT_EOP))) {
			struct ixgbe_rx_buffer *next_bi;

			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
			ixgbe_inc_ntc(rx_ring);
			next_bi =
			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
			next_bi->skb = ERR_PTR(-EINVAL);
			continue;
		}

		if (unlikely(bi->skb)) {
			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
			ixgbe_inc_ntc(rx_ring);
			continue;
		}

		xdp.data = bi->addr;
		xdp.data_meta = xdp.data;
		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
		xdp.data_end = xdp.data + size;
		xdp.handle = bi->handle;

		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);

		if (xdp_res) {
			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
				xdp_xmit |= xdp_res;
				bi->addr = NULL;
				bi->skb = NULL;
			} else {
				ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
			}
			total_rx_packets++;
			total_rx_bytes += size;

			cleaned_count++;
			ixgbe_inc_ntc(rx_ring);
			continue;
		}

		/* XDP_PASS path */
		skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
		if (!skb) {
			rx_ring->rx_stats.alloc_rx_buff_failed++;
			break;
		}

		cleaned_count++;
		ixgbe_inc_ntc(rx_ring);

		if (eth_skb_pad(skb))
			continue;

		total_rx_bytes += skb->len;
		total_rx_packets++;

		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
		ixgbe_rx_skb(q_vector, skb);
	}

	if (xdp_xmit & IXGBE_XDP_REDIR)
		xdp_do_flush_map();

	if (xdp_xmit & IXGBE_XDP_TX) {
		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];

		/* Force memory writes to complete before letting h/w
		 * know there are new descriptors to fetch.
		 */
		wmb();
		writel(ring->next_to_use, ring->tail);
	}

	u64_stats_update_begin(&rx_ring->syncp);
	rx_ring->stats.packets += total_rx_packets;
	rx_ring->stats.bytes += total_rx_bytes;
	u64_stats_update_end(&rx_ring->syncp);
	q_vector->rx.total_packets += total_rx_packets;
	q_vector->rx.total_bytes += total_rx_bytes;

	return failure ? budget : (int)total_rx_packets;
}