コード例 #1
0
ファイル: e1000e_dna.c プロジェクト: a5216652166/ss
void alloc_dna_memory(struct e1000_adapter *adapter) {
  struct net_device *netdev = adapter->netdev;
  struct pci_dev *pdev = adapter->pdev;
  struct e1000_ring *rx_ring = adapter->rx_ring;
  struct e1000_ring *tx_ring = adapter->tx_ring;
  struct e1000_tx_desc *tx_desc, *shadow_tx_desc;
  struct pfring_hooks *hook = (struct pfring_hooks*)netdev->pfring_ptr;
  union e1000_rx_desc_extended *rx_desc, *shadow_rx_desc;
  struct e1000_buffer *buffer_info;
  u16 cache_line_size;
  struct sk_buff *skb;
  unsigned int i;
  int cleaned_count = rx_ring->count; /* Allocate all slots in one shot */
  unsigned int bufsz = adapter->rx_buffer_len;
  mem_ring_info         rx_info = {0}; 
  mem_ring_info         tx_info = {0};
  int                   num_slots_per_page;

  /* Buffers are allocated all in one shot so we'll pass here once */
#if 0
  printk("[DNA] e1000_alloc_rx_buffers(cleaned_count=%d)[%s][slot len=%u/%u]\n",
	 cleaned_count, adapter->netdev->name,
	 bufsz, adapter->max_hw_frame_size);
#endif

  if(hook && (hook->magic == PF_RING)) {
    if(adapter->dna.rx_packet_memory[0] == 0) {
      pci_read_config_word(adapter->pdev,
			   0x0C /* Conf. Space Cache Line Size offset */,
			   &cache_line_size);
      cache_line_size *= 2; /* word (2-byte) to bytes */

      if(cache_line_size == 0) cache_line_size = 64;

      if (0)
	printk("[DNA] Cache line size is %u bytes\n", cache_line_size);

      adapter->dna.packet_slot_len  = ALIGN(bufsz, cache_line_size);
      adapter->dna.packet_num_slots = cleaned_count;

      adapter->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER;

      num_slots_per_page = adapter->dna.tot_packet_memory / adapter->dna.packet_slot_len;

      adapter->dna.num_memory_pages = (adapter->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page;

      if(0)
	printk("[DNA] Allocating memory [%u slots][%u memory pages][tot_packet_memory %u bytes]\n",
	       adapter->dna.packet_num_slots, adapter->dna.num_memory_pages, adapter->dna.tot_packet_memory);

      for(i=0; i<adapter->dna.num_memory_pages; i++) {
	adapter->dna.rx_packet_memory[i] =
	  alloc_contiguous_memory(&adapter->dna.tot_packet_memory, &adapter->dna.mem_order);

	if(adapter->dna.rx_packet_memory[i] != 0) {
	  if(0)
	    printk("[DNA]  Successfully allocated %lu bytes at "
		   "0x%08lx [slot_len=%d]\n",
		   (unsigned long) adapter->dna.tot_packet_memory,
		   (unsigned long) adapter->dna.rx_packet_memory[i],
		   adapter->dna.packet_slot_len);
	} else {
	  printk("[DNA]  ERROR: not enough memory for DMA ring\n");
	  return;
	}
      }

      for(i=0; i<cleaned_count; i++) {
	u_int page_index, offset;

	page_index = i / num_slots_per_page;
	offset = (i % num_slots_per_page) * adapter->dna.packet_slot_len;
	skb = (struct sk_buff *)(adapter->dna.rx_packet_memory[page_index] + offset);

	if(0)
	  printk("[DNA] Allocating slot %d of %d [addr=%p][page_index=%u][offset=%u]\n",
		 i, adapter->dna.packet_num_slots, skb, page_index, offset);

	buffer_info = &rx_ring->buffer_info[i];
	buffer_info->skb = skb;
	buffer_info->length = adapter->rx_buffer_len;
	buffer_info->dma = pci_map_single(pdev, skb,
					  buffer_info->length,
					  PCI_DMA_FROMDEVICE);

#if 0
	printk("[DNA] Mapping buffer %d [ptr=%p][len=%d]\n",
	       i, (void*)buffer_info->dma, adapter->dna.packet_slot_len);
#endif

	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
	rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);

	shadow_rx_desc = E1000_RX_DESC_EXT(*rx_ring, i + rx_ring->count);
	memcpy(shadow_rx_desc, rx_desc, sizeof(union e1000_rx_desc_extended));
      }

      wmb();

      /* The statement below syncs the value of tail (next to read) to 
       * count-1 instead of 0 for zero-copy (one slot back) */
      E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count-1);

      e1000_irq_disable(adapter);
      //e1000_irq_enable(adapter);

      /* TX */

      if(adapter->dna.tx_packet_memory[0] == 0) {

	for(i=0; i<adapter->dna.num_memory_pages; i++) {
	  adapter->dna.tx_packet_memory[i] =
	    alloc_contiguous_memory(&adapter->dna.tot_packet_memory, &adapter->dna.mem_order);

	  if(adapter->dna.tx_packet_memory[i] != 0) {
	    if(0)
	      printk("[DNA] [TX] Successfully allocated %lu bytes at "
		     "0x%08lx [slot_len=%d]\n",
		     (unsigned long) adapter->dna.tot_packet_memory,
		     (unsigned long) adapter->dna.rx_packet_memory[i],
		     adapter->dna.packet_slot_len);
	  } else {
	    printk("[DNA]  ERROR: not enough memory for DMA ring\n");
	    return;
	  }
	}

	for(i=0; i<cleaned_count; i++) {
	  u_int page_index, offset;

	  page_index = i / num_slots_per_page;
	  offset = (i % num_slots_per_page) * adapter->dna.packet_slot_len;
	  skb = (struct sk_buff *)(adapter->dna.tx_packet_memory[page_index] + offset);

	  if(0)
	    printk("[DNA] [TX] Allocating slot %d of %d [addr=%p][page_index=%u][offset=%u]\n",
		   i, adapter->dna.packet_num_slots, skb, page_index, offset);

	  buffer_info = &tx_ring->buffer_info[i];
	  buffer_info->skb = skb;
	  buffer_info->length = adapter->rx_buffer_len;
	  buffer_info->dma = pci_map_single(pdev, skb,
					    buffer_info->length,
					    PCI_DMA_TODEVICE);

#if 0
	  printk("[DNA] Mapping buffer %d [ptr=%p][len=%d]\n",
		 i, (void*)buffer_info->dma, adapter->dna.packet_slot_len);
#endif

	  tx_desc = E1000_TX_DESC(*tx_ring, i);
	  tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);

	  /* Note that shadows are useless for e1000e with standard DNA, but used by libzero */
	  shadow_tx_desc = E1000_TX_DESC(*tx_ring, i + tx_ring->count); 
	  memcpy(shadow_tx_desc, tx_desc, sizeof(struct e1000_tx_desc));
	}
      }

      rx_info.packet_memory_num_chunks    = adapter->dna.num_memory_pages;
      rx_info.packet_memory_chunk_len     = adapter->dna.tot_packet_memory;
      rx_info.packet_memory_num_slots     = adapter->dna.packet_num_slots;
      rx_info.packet_memory_slot_len      = adapter->dna.packet_slot_len;
      rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size;
  
      tx_info.packet_memory_num_chunks    = adapter->dna.num_memory_pages;
      tx_info.packet_memory_chunk_len     = adapter->dna.tot_packet_memory;
      tx_info.packet_memory_num_slots     = adapter->dna.packet_num_slots;
      tx_info.packet_memory_slot_len      = adapter->dna.packet_slot_len;
      tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size;

      /* Register with PF_RING */
      hook->ring_dna_device_handler(add_device_mapping,
				    dna_v1,
  				    &rx_info,
				    &tx_info,
				    adapter->dna.rx_packet_memory,
				    rx_ring->desc,
				    adapter->dna.tx_packet_memory,
				    tx_ring->desc, /* Packet descriptors */
				    (void*)netdev->mem_start,
				    netdev->mem_end-netdev->mem_start,
				    0, /* Channel Id */
				    netdev,
				    &pdev->dev,
				    intel_e1000e,
				    adapter->netdev->dev_addr,
				    &adapter->dna.packet_waitqueue,
				    &adapter->dna.interrupt_received,
				    (void*)adapter,
				    wait_packet_function_ptr,
				    notify_function_ptr);

      if(1) printk("[DNA] Enabled DNA on %s (rx len=%u, tx len=%u)\n", 
                   adapter->netdev->name, rx_ring->size, tx_ring->size);
    } else {
      printk("WARNING e1000_alloc_rx_buffers(cleaned_count=%d)"
	     "[%s][%lu] already allocated\n",
	     cleaned_count, adapter->netdev->name,
	     adapter->dna.rx_packet_memory[0]);

    }
  }
}
コード例 #2
0
static int
e1000_setup_desc_rings(struct e1000_adapter *adapter)
{
	struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
	struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
	struct pci_dev *pdev = adapter->pdev;
	uint32_t rctl;
	int size, i, ret_val;

	/* Setup Tx descriptor ring and Tx buffers */

	txdr->count = 80;

	size = txdr->count * sizeof(struct e1000_buffer);
	if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
		ret_val = 1;
		goto err_nomem;
	}
	memset(txdr->buffer_info, 0, size);

	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
	E1000_ROUNDUP(txdr->size, 4096);
	if(!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
		ret_val = 2;
		goto err_nomem;
	}
	memset(txdr->desc, 0, txdr->size);
	txdr->next_to_use = txdr->next_to_clean = 0;

	E1000_WRITE_REG(&adapter->hw, TDBAL,
			((uint64_t) txdr->dma & 0x00000000FFFFFFFF));
	E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32));
	E1000_WRITE_REG(&adapter->hw, TDLEN,
			txdr->count * sizeof(struct e1000_tx_desc));
	E1000_WRITE_REG(&adapter->hw, TDH, 0);
	E1000_WRITE_REG(&adapter->hw, TDT, 0);
	E1000_WRITE_REG(&adapter->hw, TCTL,
			E1000_TCTL_PSP | E1000_TCTL_EN |
			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
			E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);

	for(i = 0; i < txdr->count; i++) {
		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
		struct sk_buff *skb;
		unsigned int size = 1024;

		if(!(skb = alloc_skb(size, GFP_KERNEL))) {
			ret_val = 3;
			goto err_nomem;
		}
		skb_put(skb, size);
		txdr->buffer_info[i].skb = skb;
		txdr->buffer_info[i].length = skb->len;
		txdr->buffer_info[i].dma =
			pci_map_single(pdev, skb->data, skb->len,
				       PCI_DMA_TODEVICE);
		tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
		tx_desc->lower.data = cpu_to_le32(skb->len);
		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
						   E1000_TXD_CMD_IFCS |
						   E1000_TXD_CMD_RPS);
		tx_desc->upper.data = 0;
	}

	/* Setup Rx descriptor ring and Rx buffers */

	rxdr->count = 80;

	size = rxdr->count * sizeof(struct e1000_buffer);
	if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
		ret_val = 4;
		goto err_nomem;
	}
	memset(rxdr->buffer_info, 0, size);

	rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
	if(!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
		ret_val = 5;
		goto err_nomem;
	}
	memset(rxdr->desc, 0, rxdr->size);
	rxdr->next_to_use = rxdr->next_to_clean = 0;

	rctl = E1000_READ_REG(&adapter->hw, RCTL);
	E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
	E1000_WRITE_REG(&adapter->hw, RDBAL,
			((uint64_t) rxdr->dma & 0xFFFFFFFF));
	E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32));
	E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
	E1000_WRITE_REG(&adapter->hw, RDH, 0);
	E1000_WRITE_REG(&adapter->hw, RDT, 0);
	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
		(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);

	for(i = 0; i < rxdr->count; i++) {
		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
		struct sk_buff *skb;

		if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 
				GFP_KERNEL))) {
			ret_val = 6;
			goto err_nomem;
		}
		skb_reserve(skb, NET_IP_ALIGN);
		rxdr->buffer_info[i].skb = skb;
		rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
		rxdr->buffer_info[i].dma =
			pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
				       PCI_DMA_FROMDEVICE);
		rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
		memset(skb->data, 0x00, skb->len);
	}

	return 0;

err_nomem:
	e1000_free_desc_rings(adapter);
	return ret_val;
}
コード例 #3
0
static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
{
	struct e1000_ring *tx_ring = &adapter->test_tx_ring;
	struct e1000_ring *rx_ring = &adapter->test_rx_ring;
	struct pci_dev *pdev = adapter->pdev;
	struct e1000_hw *hw = &adapter->hw;
	u32 rctl;
	int i;
	int ret_val;

	/* Setup Tx descriptor ring and Tx buffers */

	if (!tx_ring->count)
		tx_ring->count = E1000_DEFAULT_TXD;

	tx_ring->buffer_info = kcalloc(tx_ring->count,
				       sizeof(struct e1000_buffer),
				       GFP_KERNEL);
	if (!(tx_ring->buffer_info)) {
		ret_val = 1;
		goto err_nomem;
	}

	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
	tx_ring->size = ALIGN(tx_ring->size, 4096);
	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
					   &tx_ring->dma, GFP_KERNEL);
	if (!tx_ring->desc) {
		ret_val = 2;
		goto err_nomem;
	}
	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;

	ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
	ew32(TDBAH, ((u64) tx_ring->dma >> 32));
	ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
	ew32(TDH, 0);
	ew32(TDT, 0);
	ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
	     E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
	     E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);

	for (i = 0; i < tx_ring->count; i++) {
		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
		struct sk_buff *skb;
		unsigned int skb_size = 1024;

		skb = alloc_skb(skb_size, GFP_KERNEL);
		if (!skb) {
			ret_val = 3;
			goto err_nomem;
		}
		skb_put(skb, skb_size);
		tx_ring->buffer_info[i].skb = skb;
		tx_ring->buffer_info[i].length = skb->len;
		tx_ring->buffer_info[i].dma =
			pci_map_single(pdev, skb->data, skb->len,
				       PCI_DMA_TODEVICE);
		if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
			ret_val = 4;
			goto err_nomem;
		}
		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
		tx_desc->lower.data = cpu_to_le32(skb->len);
		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
						   E1000_TXD_CMD_IFCS |
						   E1000_TXD_CMD_RS);
		tx_desc->upper.data = 0;
	}

	/* Setup Rx descriptor ring and Rx buffers */

	if (!rx_ring->count)
		rx_ring->count = E1000_DEFAULT_RXD;

	rx_ring->buffer_info = kcalloc(rx_ring->count,
				       sizeof(struct e1000_buffer),
				       GFP_KERNEL);
	if (!(rx_ring->buffer_info)) {
		ret_val = 5;
		goto err_nomem;
	}

	rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
					   &rx_ring->dma, GFP_KERNEL);
	if (!rx_ring->desc) {
		ret_val = 6;
		goto err_nomem;
	}
	rx_ring->next_to_use = 0;
	rx_ring->next_to_clean = 0;

	rctl = er32(RCTL);
	ew32(RCTL, rctl & ~E1000_RCTL_EN);
	ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
	ew32(RDBAH, ((u64) rx_ring->dma >> 32));
	ew32(RDLEN, rx_ring->size);
	ew32(RDH, 0);
	ew32(RDT, 0);
	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
		E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
		E1000_RCTL_SBP | E1000_RCTL_SECRC |
		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
	ew32(RCTL, rctl);

	for (i = 0; i < rx_ring->count; i++) {
		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
		struct sk_buff *skb;

		skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
		if (!skb) {
			ret_val = 7;
			goto err_nomem;
		}
		skb_reserve(skb, NET_IP_ALIGN);
		rx_ring->buffer_info[i].skb = skb;
		rx_ring->buffer_info[i].dma =
			pci_map_single(pdev, skb->data, 2048,
				       PCI_DMA_FROMDEVICE);
		if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
			ret_val = 8;
			goto err_nomem;
		}
		rx_desc->buffer_addr =
			cpu_to_le64(rx_ring->buffer_info[i].dma);
		memset(skb->data, 0x00, skb->len);
	}

	return 0;

err_nomem:
	e1000_free_desc_rings(adapter);
	return ret_val;
}