示例#1
0
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
{
	union ixgbe_adv_tx_desc *tx_desc = NULL;
	struct ixgbe_tx_buffer *tx_bi;
	bool work_done = true;
	u32 len, cmd_type;
	dma_addr_t dma;

	while (budget-- > 0) {
		if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
		    !netif_carrier_ok(xdp_ring->netdev)) {
			work_done = false;
			break;
		}

		if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len))
			break;

		dma_sync_single_for_device(xdp_ring->dev, dma, len,
					   DMA_BIDIRECTIONAL);

		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
		tx_bi->bytecount = len;
		tx_bi->xdpf = NULL;

		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
		tx_desc->read.buffer_addr = cpu_to_le64(dma);

		/* put descriptor type bits */
		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
			   IXGBE_ADVTXD_DCMD_DEXT |
			   IXGBE_ADVTXD_DCMD_IFCS;
		cmd_type |= len | IXGBE_TXD_CMD;
		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
		tx_desc->read.olinfo_status =
			cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);

		xdp_ring->next_to_use++;
		if (xdp_ring->next_to_use == xdp_ring->count)
			xdp_ring->next_to_use = 0;
	}

	if (tx_desc) {
		ixgbe_xdp_ring_update_tail(xdp_ring);
		xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
	}

	return !!budget && work_done;
}
示例#2
0
void dna_ixgbe_alloc_tx_buffers(struct ixgbe_ring *tx_ring, struct pfring_hooks *hook) {
  union ixgbe_adv_tx_desc *tx_desc, *shadow_tx_desc;
  struct ixgbe_tx_buffer *bi;
  u16 i;
  int num_slots_per_page = tx_ring->dna.tot_packet_memory / tx_ring->dna.packet_slot_len;
  // struct ixgbe_adapter 	*adapter = netdev_priv(tx_ring->netdev);

  /* Check if the memory has been already allocated */
  if(tx_ring->dna.memory_allocated) return;

  /* nothing to do or no valid netdev defined */
  if (!netdev_ring(tx_ring))
    return;

  /* We suppose that RX and TX are in sync */

  if(unlikely(enable_debug))
    printk("%s(): tx_ring->dna.rx_tx.tx.tot_packet_memory=%d dna.num_memory_pages=%d\n",
	   __FUNCTION__, tx_ring->dna.tot_packet_memory, tx_ring->dna.num_memory_pages);

  for(i=0; i<tx_ring->dna.num_memory_pages; i++) {
    tx_ring->dna.rx_tx.tx.packet_memory[i] =
      alloc_contiguous_memory(&tx_ring->dna.tot_packet_memory,
			      &tx_ring->dna.mem_order,
			      tx_ring->q_vector->numa_node);

    if (tx_ring->dna.rx_tx.tx.packet_memory[i] == 0) {
      printk("\n\n%s() ERROR: not enough memory for TX DMA ring!!\n\n\n",
	     __FUNCTION__);
      return;
    }

    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated TX %u@%u bytes at "
	     "0x%08lx [slot_len=%d]\n",__FUNCTION__,
	     tx_ring->dna.tot_packet_memory, i,
	     tx_ring->dna.rx_tx.tx.packet_memory[i],
	     tx_ring->dna.packet_slot_len);
  }

  if(unlikely(enable_debug))
    printk("[DNA] %s(): %s@%d ptr=%p memory allocated on node %d\n", __FUNCTION__, 
      tx_ring->netdev->name, tx_ring->queue_index, tx_ring, tx_ring->q_vector->numa_node);

  for(i=0; i < tx_ring->count; i++) {
    u_int offset, page_index;
    char *pkt;

    page_index = i / num_slots_per_page;
    offset = (i % num_slots_per_page) * tx_ring->dna.packet_slot_len;
    pkt = (char *)(tx_ring->dna.rx_tx.tx.packet_memory[page_index] + offset);

    bi      = &tx_ring->tx_buffer_info[i];
    bi->skb = NULL;
    tx_desc = IXGBE_TX_DESC(tx_ring, i);

    if(unlikely(enable_debug))
      printk("%s(): Mapping TX slot %d of %d [pktaddr=%p][tx_desc=%p][offset=%u]\n",
	     __FUNCTION__, i, tx_ring->dna.packet_num_slots,
	     pkt, tx_desc, offset);

    bi->dma = pci_map_single(to_pci_dev(tx_ring->dev), pkt,
			     tx_ring->dna.packet_slot_len,
			     PCI_DMA_BIDIRECTIONAL /* PCI_DMA_TODEVICE */ );

    tx_desc->read.buffer_addr = cpu_to_le64(bi->dma);
    shadow_tx_desc = IXGBE_TX_DESC(tx_ring, i + tx_ring->count);
    memcpy(shadow_tx_desc, tx_desc, sizeof(union ixgbe_adv_tx_desc));
  } /* for */

  tx_ring->dna.memory_allocated = 1;
}
示例#3
0
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
			    struct ixgbe_ring *tx_ring, int napi_budget)
{
	unsigned int total_packets = 0, total_bytes = 0;
	u32 i = tx_ring->next_to_clean, xsk_frames = 0;
	unsigned int budget = q_vector->tx.work_limit;
	struct xdp_umem *umem = tx_ring->xsk_umem;
	union ixgbe_adv_tx_desc *tx_desc;
	struct ixgbe_tx_buffer *tx_bi;
	bool xmit_done;

	tx_bi = &tx_ring->tx_buffer_info[i];
	tx_desc = IXGBE_TX_DESC(tx_ring, i);
	i -= tx_ring->count;

	do {
		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
			break;

		total_bytes += tx_bi->bytecount;
		total_packets += tx_bi->gso_segs;

		if (tx_bi->xdpf)
			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
		else
			xsk_frames++;

		tx_bi->xdpf = NULL;
		total_bytes += tx_bi->bytecount;

		tx_bi++;
		tx_desc++;
		i++;
		if (unlikely(!i)) {
			i -= tx_ring->count;
			tx_bi = tx_ring->tx_buffer_info;
			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
		}

		/* issue prefetch for next Tx descriptor */
		prefetch(tx_desc);

		/* update budget accounting */
		budget--;
	} while (likely(budget));

	i += tx_ring->count;
	tx_ring->next_to_clean = i;

	u64_stats_update_begin(&tx_ring->syncp);
	tx_ring->stats.bytes += total_bytes;
	tx_ring->stats.packets += total_packets;
	u64_stats_update_end(&tx_ring->syncp);
	q_vector->tx.total_bytes += total_bytes;
	q_vector->tx.total_packets += total_packets;

	if (xsk_frames)
		xsk_umem_complete_tx(umem, xsk_frames);

	xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
	return budget > 0 && xmit_done;
}