コード例 #1
0
ファイル: igb_dna.c プロジェクト: StevenLeRoux/PF_RING
void dna_igb_alloc_rx_buffers(struct igb_ring *rx_ring, struct pfring_hooks *hook) {
  union e1000_adv_rx_desc *rx_desc, *shadow_rx_desc;
  struct igb_rx_buffer *bi;
  u16 i;
  struct igb_adapter 	*adapter = netdev_priv(rx_ring->netdev);
  struct e1000_hw	*hw = &adapter->hw;
  u16			cache_line_size;
  struct igb_ring	*tx_ring = adapter->tx_ring[rx_ring->queue_index];
  mem_ring_info         rx_info = {0};
  mem_ring_info         tx_info = {0};
  int                   num_slots_per_page;

  /* Check if the memory has been already allocated */
  if(rx_ring->dna.memory_allocated) return;

  /* nothing to do or no valid netdev defined */
  if (!netdev_ring(rx_ring))
    return;

  if (!hook) {
    printk("[DNA] WARNING The PF_RING module is NOT loaded.\n");
    printk("[DNA] WARNING Please load it, before loading this module\n");
    return;
  }

  init_waitqueue_head(&rx_ring->dna.rx_tx.rx.packet_waitqueue);

  cache_line_size = cpu_to_le16(igb_read_pci_cfg_word(hw, IGB_PCI_DEVICE_CACHE_LINE_SIZE));
  cache_line_size &= 0x00FF;
  cache_line_size *= PCI_DEVICE_CACHE_LINE_SIZE_BYTES;
  if(cache_line_size == 0) cache_line_size = 64;

  if(unlikely(enable_debug))
    printk("%s(): pci cache line size %d\n",__FUNCTION__, cache_line_size);

  rx_ring->dna.packet_slot_len  = ALIGN(rx_ring->rx_buffer_len, cache_line_size);
  rx_ring->dna.packet_num_slots = rx_ring->count;

  rx_ring->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER;

  num_slots_per_page = rx_ring->dna.tot_packet_memory / rx_ring->dna.packet_slot_len;

  rx_ring->dna.num_memory_pages = (rx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  for(i=0; i<rx_ring->dna.num_memory_pages; i++) {
    rx_ring->dna.rx_tx.rx.packet_memory[i] =
      alloc_contiguous_memory(&rx_ring->dna.tot_packet_memory, &rx_ring->dna.mem_order);
  
    if (rx_ring->dna.rx_tx.rx.packet_memory[i] == 0) {
      printk("\n\n%s() ERROR: not enough memory for RX DMA ring!!\n\n\n",
	     __FUNCTION__);
      return;
    }

    /*
    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated RX %u@%u bytes at 0x%08lx [slot_len=%d]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i], rx_ring->dna.packet_slot_len);
    */
  }

  for(i=0; i < rx_ring->count; i++) {
    u_int offset, page_index;
    char *pkt;
    
    page_index = i / num_slots_per_page;
    offset = (i % num_slots_per_page) * rx_ring->dna.packet_slot_len;
    pkt = (char *)(rx_ring->dna.rx_tx.rx.packet_memory[page_index] + offset);

    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated RX %u@%u bytes at 0x%08lx [slot_len=%d][page_index=%u][offset=%u]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i],
	     rx_ring->dna.packet_slot_len, page_index, offset);

    bi      = &rx_ring->rx_buffer_info[i];
    bi->skb = NULL;
    rx_desc = IGB_RX_DESC(rx_ring, i);

    /*
    if(unlikely(enable_debug))
      printk("%s(): [%s@%d] Mapping RX slot %d of %d [pktaddr=%p][rx_desc=%p][offset=%u]\n",
	     __FUNCTION__, 
	     rx_ring->netdev->name, rx_ring->queue_index,
	     i, rx_ring->dna.packet_num_slots,
	     pkt, rx_desc, offset);
    */

    bi->dma = pci_map_single(to_pci_dev(rx_ring->dev), pkt,
			     rx_ring->dna.packet_slot_len,
			     PCI_DMA_FROMDEVICE);

    /* Standard MTU */
    rx_desc->read.hdr_addr = 0;
    rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
    rx_desc->wb.upper.status_error = 0;

    shadow_rx_desc = IGB_RX_DESC(rx_ring, i + rx_ring->count);
    memcpy(shadow_rx_desc, rx_desc, sizeof(union e1000_adv_rx_desc));

    if(unlikely(enable_debug)) {
      print_adv_rx_descr(rx_desc);
      print_adv_rx_descr(shadow_rx_desc);
    }

    igb_release_rx_desc(rx_ring, i);
  } /* for */

    /* Shadow */
  rx_desc = IGB_RX_DESC(rx_ring, 0);

  dna_reset_rx_ring(rx_ring);

  if(unlikely(enable_debug))
  printk("[DNA] next_to_clean=%u/next_to_use=%u [register=%d]\n",
	 rx_ring->next_to_clean, rx_ring->next_to_use, 
	 E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx)));

  /* Allocate TX memory */
  
  tx_ring->dna.tot_packet_memory = rx_ring->dna.tot_packet_memory;
  tx_ring->dna.packet_slot_len   = rx_ring->dna.packet_slot_len;
  tx_ring->dna.packet_num_slots  = tx_ring->count;
  tx_ring->dna.mem_order         = rx_ring->dna.mem_order;
  tx_ring->dna.num_memory_pages  = (tx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  dna_igb_alloc_tx_buffers(tx_ring, hook);

  rx_info.packet_memory_num_chunks    = rx_ring->dna.num_memory_pages;
  rx_info.packet_memory_chunk_len     = rx_ring->dna.tot_packet_memory;
  rx_info.packet_memory_num_slots     = rx_ring->dna.packet_num_slots;
  rx_info.packet_memory_slot_len      = rx_ring->dna.packet_slot_len;
  rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size;
  
  tx_info.packet_memory_num_chunks    = tx_ring->dna.num_memory_pages;
  tx_info.packet_memory_chunk_len     = tx_ring->dna.tot_packet_memory;
  tx_info.packet_memory_num_slots     = tx_ring->dna.packet_num_slots;
  tx_info.packet_memory_slot_len      = tx_ring->dna.packet_slot_len;
  tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size;

  /* Register with PF_RING */

  hook->ring_dna_device_handler(add_device_mapping,
				dna_v1,
  				&rx_info,
				&tx_info,
				rx_ring->dna.rx_tx.rx.packet_memory,
				rx_ring->desc, /* Packet descriptors */
				tx_ring->dna.rx_tx.tx.packet_memory,
				tx_ring->desc, /* Packet descriptors */
				(void*)rx_ring->netdev->mem_start,
				rx_ring->netdev->mem_end - rx_ring->netdev->mem_start,
				rx_ring->queue_index, /* Channel Id */
				rx_ring->netdev,
				rx_ring->dev,
				dna_model(hw),
				rx_ring->netdev->dev_addr,
				&rx_ring->dna.rx_tx.rx.packet_waitqueue,
				&rx_ring->dna.rx_tx.rx.interrupt_received,
				(void*)rx_ring, (void*)tx_ring,
				wait_packet_function_ptr,
				notify_function_ptr);

  if(unlikely(enable_debug))
    printk("[DNA] igb: %s: Enabled DNA on queue %d [RX][size=%u][count=%d] [TX][size=%u][count=%d]\n",
	   rx_ring->netdev->name, rx_ring->queue_index, rx_ring->size, rx_ring->count, tx_ring->size, tx_ring->count);

  rx_ring->dna.memory_allocated = 1;
}
コード例 #2
0
ファイル: ixgbe_dna.c プロジェクト: a5216652166/ss
void dna_ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring) {
  union ixgbe_adv_rx_desc *rx_desc, *shadow_rx_desc;
  struct ixgbe_rx_buffer *bi;
  u16 i;
  struct ixgbe_adapter 	*adapter = netdev_priv(rx_ring->netdev);
  struct ixgbe_hw       *hw = &adapter->hw;
  u16	                cache_line_size;
  struct ixgbe_ring     *tx_ring = adapter->tx_ring[rx_ring->queue_index];
  struct pfring_hooks   *hook = (struct pfring_hooks*)rx_ring->netdev->pfring_ptr;
  mem_ring_info         rx_info = {0};
  mem_ring_info         tx_info = {0};
  int                   num_slots_per_page;

  /* Check if the memory has been already allocated */
  if(rx_ring->dna.memory_allocated) return;

  /* nothing to do or no valid netdev defined */
  if (!netdev_ring(rx_ring))
    return;

  if (!hook) {
    printk("[DNA] WARNING The PF_RING module is NOT loaded.\n");
    printk("[DNA] WARNING Please load it, before loading this module\n");
    return;
  }

  init_waitqueue_head(&rx_ring->dna.rx_tx.rx.packet_waitqueue);

  cache_line_size = cpu_to_le16(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CACHE_LINE_SIZE));
  cache_line_size &= 0x00FF;
  cache_line_size *= PCI_DEVICE_CACHE_LINE_SIZE_BYTES;
  if(cache_line_size == 0) cache_line_size = 64;

  if(unlikely(enable_debug))
    printk("%s(): pci cache line size %d\n",__FUNCTION__, cache_line_size);

  rx_ring->dna.packet_slot_len  = ALIGN(rx_ring->rx_buf_len, cache_line_size);
  rx_ring->dna.packet_num_slots = rx_ring->count;

  rx_ring->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER;

  num_slots_per_page = rx_ring->dna.tot_packet_memory / rx_ring->dna.packet_slot_len;

  rx_ring->dna.num_memory_pages = (rx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  /* Packet Split disabled in DNA mode */
  //if (ring_is_ps_enabled(rx_ring)) {
    /* data will be put in this buffer */
    /* Original fuction allocate PAGE_SIZE/2 for this buffer*/
  //  rx_ring->dna.packet_slot_len  += PAGE_SIZE/2;
  //}

  if(unlikely(enable_debug))
    printk("%s(): RX dna.packet_slot_len=%d tot_packet_memory=%d num_memory_pages=%u num_slots_per_page=%d\n",
	   __FUNCTION__, 
	   rx_ring->dna.packet_slot_len,
	   rx_ring->dna.tot_packet_memory,
	   rx_ring->dna.num_memory_pages,
	   num_slots_per_page);

  for(i=0; i<rx_ring->dna.num_memory_pages; i++) {
    rx_ring->dna.rx_tx.rx.packet_memory[i] =
      alloc_contiguous_memory(&rx_ring->dna.tot_packet_memory, 
      			      &rx_ring->dna.mem_order, 
      			      rx_ring->q_vector->numa_node);

    if (rx_ring->dna.rx_tx.rx.packet_memory[i] == 0) {
      printk("\n\n%s() ERROR: not enough memory for RX DMA ring!!\n\n\n",
	     __FUNCTION__);
      return;
    }

    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully allocated RX %u@%u bytes at 0x%08lx [slot_len=%d]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i], rx_ring->dna.packet_slot_len);
  }

  if(unlikely(enable_debug))
    printk("[DNA] %s(): %s@%d ptr=%p memory allocated on node %d\n", __FUNCTION__, 
      rx_ring->netdev->name, rx_ring->queue_index, rx_ring, rx_ring->q_vector->numa_node);

  for(i=0; i < rx_ring->count; i++) {
    u_int offset, page_index;
    char *pkt;

    page_index = i / num_slots_per_page;
    offset = (i % num_slots_per_page) * rx_ring->dna.packet_slot_len;
    pkt = (char *)(rx_ring->dna.rx_tx.rx.packet_memory[page_index] + offset);

    /*
    if(unlikely(enable_debug))
      printk("[DNA] %s(): Successfully remapped RX %u@%u bytes at 0x%08lx [slot_len=%d][page_index=%u][offset=%u]\n",
	     __FUNCTION__, rx_ring->dna.tot_packet_memory, i,
	     rx_ring->dna.rx_tx.rx.packet_memory[i],
	     rx_ring->dna.packet_slot_len, page_index, offset);
    */

    bi      = &rx_ring->rx_buffer_info[i];
    bi->skb = NULL;
    rx_desc = IXGBE_RX_DESC(rx_ring, i);

    if(unlikely(enable_debug))
      printk("%s(): Mapping RX slot %d of %d [pktaddr=%p][rx_desc=%p][offset=%u]\n",
	     __FUNCTION__, i, rx_ring->dna.packet_num_slots,
	     pkt, rx_desc, offset);

    bi->dma = pci_map_single(to_pci_dev(rx_ring->dev), pkt,
			     rx_ring->dna.packet_slot_len,
			     PCI_DMA_BIDIRECTIONAL /* PCI_DMA_FROMDEVICE */ );

    /* Packet Split disabled in DNA mode */
    //if (!ring_is_ps_enabled(rx_ring)) {
      rx_desc->read.hdr_addr = 0;
      rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
    //} else {
    //  rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
    //  rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + rx_ring->dna.packet_slot_len);
    //}

    rx_desc->wb.upper.status_error = 0;

    shadow_rx_desc = IXGBE_RX_DESC(rx_ring, i + rx_ring->count);
    memcpy(shadow_rx_desc, rx_desc, sizeof(union ixgbe_adv_rx_desc));

    if(unlikely(enable_debug)) {
      print_adv_rx_descr(rx_desc);
      print_adv_rx_descr(shadow_rx_desc);
    }

    ixgbe_release_rx_desc(rx_ring, i);
  } /* for */

  /* Shadow */
  rx_desc = IXGBE_RX_DESC(rx_ring, 0);

  /* Resetting index
     rx_ring->next_to_use   = the last slot where the next incoming packets can be copied (tail) */
  ixgbe_release_rx_desc(rx_ring, rx_ring->count-1);
  /* rx_ring->next_to_clean = the slot where the next incoming packet will be read (head) */
  rx_ring->next_to_clean = 0;

  /* Register with PF_RING */

  if(unlikely(enable_debug))
    printk("[DNA] next_to_clean=%u/next_to_use=%u [register=%d]\n",
	   rx_ring->next_to_clean, rx_ring->next_to_use, IXGBE_READ_REG(hw, IXGBE_RDT(rx_ring->reg_idx)));

  /* Allocate TX memory */
  tx_ring->dna.tot_packet_memory = rx_ring->dna.tot_packet_memory;
  tx_ring->dna.packet_slot_len   = rx_ring->dna.packet_slot_len;
  tx_ring->dna.packet_num_slots  = tx_ring->count;
  tx_ring->dna.mem_order         = rx_ring->dna.mem_order;
  tx_ring->dna.num_memory_pages  = (tx_ring->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

  dna_ixgbe_alloc_tx_buffers(tx_ring, hook);

  rx_info.packet_memory_num_chunks    = rx_ring->dna.num_memory_pages;
  rx_info.packet_memory_chunk_len     = rx_ring->dna.tot_packet_memory;
  rx_info.packet_memory_num_slots     = rx_ring->dna.packet_num_slots;
  rx_info.packet_memory_slot_len      = rx_ring->dna.packet_slot_len;
  rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size;
  
  tx_info.packet_memory_num_chunks    = tx_ring->dna.num_memory_pages;
  tx_info.packet_memory_chunk_len     = tx_ring->dna.tot_packet_memory;
  tx_info.packet_memory_num_slots     = tx_ring->dna.packet_num_slots;
  tx_info.packet_memory_slot_len      = tx_ring->dna.packet_slot_len;
  tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size;

  hook->ring_dna_device_handler(add_device_mapping,
				dna_v1,
  				&rx_info,
				&tx_info,
				rx_ring->dna.rx_tx.rx.packet_memory,
				rx_ring->desc, /* Packet descriptors */
				tx_ring->dna.rx_tx.tx.packet_memory,
				tx_ring->desc, /* Packet descriptors */
				(void*)rx_ring->netdev->mem_start,
				rx_ring->netdev->mem_end - rx_ring->netdev->mem_start,
				rx_ring->queue_index, /* Channel Id */
				rx_ring->netdev,
				rx_ring->dev, /* for DMA mapping */
				dna_model(hw),
				rx_ring->netdev->dev_addr,
				&rx_ring->dna.rx_tx.rx.packet_waitqueue,
				&rx_ring->dna.rx_tx.rx.interrupt_received,
				(void*)rx_ring,
				wait_packet_function_ptr,
				notify_function_ptr);

  rx_ring->dna.memory_allocated = 1;

  if(unlikely(enable_debug))
    printk("[DNA] ixgbe: %s: Enabled DNA on queue %d [RX][size=%u][count=%d] [TX][size=%u][count=%d]\n",
	   rx_ring->netdev->name, rx_ring->queue_index, rx_ring->size, rx_ring->count, tx_ring->size, tx_ring->count);
#if 0
  if(adapter->hw.mac.type != ixgbe_mac_82598EB)
    ixgbe_irq_disable_queues(rx_ring->q_vector->adapter, ((u64)1 << rx_ring->queue_index));
#endif
}