/** * e1000_close - Disables a network interface * * @v netdev network interface device structure * **/ static void e1000_close ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv ( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t rctl; uint32_t icr; DBG ( "e1000_close\n" ); /* Acknowledge interrupts */ icr = E1000_READ_REG ( hw, ICR ); e1000_irq_disable ( adapter ); /* disable receives */ rctl = E1000_READ_REG ( hw, RCTL ); E1000_WRITE_REG ( hw, RCTL, rctl & ~E1000_RCTL_EN ); E1000_WRITE_FLUSH ( hw ); e1000_reset_hw ( hw ); e1000_free_tx_resources ( adapter ); e1000_free_rx_resources ( adapter ); }
/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * * e1000_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int e1000_sw_init(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct pci_device *pdev = adapter->pdev; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &hw->subsystem_vendor_id); pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &hw->subsystem_device_id); pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; adapter->max_frame_size = MAXIMUM_ETHERNET_VLAN_SIZE + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; hw->fc.requested_mode = e1000_fc_none; /* Initialize the hardware-specific values */ if (e1000_setup_init_funcs(hw, false)) { DBG ("Hardware Initialization Failure\n"); return -EIO; } /* Explicitly disable IRQ since the NIC can be in any state. */ e1000_irq_disable ( adapter ); return 0; }
/** * e1000_irq - enable or Disable interrupts * * @v adapter e1000 adapter * @v action requested interrupt action **/ static void e1000_irq ( struct net_device *netdev, int enable ) { struct e1000_adapter *adapter = netdev_priv(netdev); DBG ( "e1000_irq\n" ); if ( enable ) e1000_irq_enable ( adapter ); else e1000_irq_disable ( adapter ); }
int wait_packet_function_ptr(void *data, int mode) { struct e1000_adapter *adapter = (struct e1000_adapter*)data; if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] called [mode=%d]\n", mode); if(mode == 1) { struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_extended *rx_desc; u16 i = E1000_READ_REG(&adapter->hw, E1000_RDT(0)); /* Very important: update the value from the register set from userland. * Here i is the last I've read (zero-copy implementation) */ if(++i == rx_ring->count) i = 0; /* Here i is the next I have to read */ rx_ring->next_to_clean = i; rx_desc = E1000_RX_DESC_EXT(*rx_ring, rx_ring->next_to_clean); if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Check if a packet is arrived\n"); prefetch(rx_desc); if(!(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD)) { adapter->dna.interrupt_received = 0; #if 0 if(!adapter->dna.interrupt_enabled) { e1000_irq_enable(adapter), adapter->dna.interrupt_enabled = 1; if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Packet not arrived yet: enabling interrupts\n"); } #endif } else adapter->dna.interrupt_received = 1; return(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD); } else { if(adapter->dna.interrupt_enabled) { e1000_irq_disable(adapter); adapter->dna.interrupt_enabled = 0; if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Disabled interrupts\n"); } return(0); } }
/** * e1000_irq - enable or Disable interrupts * * @v adapter e1000 adapter * @v action requested interrupt action **/ static void e1000_irq ( struct net_device *netdev, int enable ) { struct e1000_adapter *adapter = netdev_priv(netdev); DBG ( "e1000_irq\n" ); switch ( enable ) { case 0 : e1000_irq_disable ( adapter ); break; case 1 : e1000_irq_enable ( adapter ); break; case 2 : e1000_irq_force ( adapter ); break; } }
/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * * @v adapter e1000 private structure * * e1000_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int e1000_sw_init ( struct e1000_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; struct pci_device *pdev = adapter->pdev; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_word ( pdev, PCI_COMMAND, &hw->pci_cmd_word ); /* Disable Flow Control */ hw->fc = E1000_FC_NONE; adapter->eeprom_wol = 0; adapter->wol = adapter->eeprom_wol; adapter->en_mng_pt = 0; adapter->rx_int_delay = 0; adapter->rx_abs_int_delay = 0; adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; adapter->rx_ps_bsize0 = E1000_RXBUFFER_128; hw->max_frame_size = MAXIMUM_ETHERNET_VLAN_SIZE + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; /* identify the MAC */ if ( e1000_set_mac_type ( hw ) ) { DBG ( "Unknown MAC Type\n" ); return -EIO; } switch ( hw->mac_type ) { default: break; case e1000_82541: case e1000_82547: case e1000_82541_rev_2: case e1000_82547_rev_2: hw->phy_init_script = 1; break; } e1000_set_media_type ( hw ); hw->autoneg = TRUE; hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; hw->wait_autoneg_complete = TRUE; hw->tbi_compatibility_en = TRUE; hw->adaptive_ifs = TRUE; /* Copper options */ if ( hw->media_type == e1000_media_type_copper ) { hw->mdix = AUTO_ALL_MODES; hw->disable_polarity_correction = FALSE; hw->master_slave = E1000_MASTER_SLAVE; } e1000_irq_disable ( adapter ); return 0; }
void alloc_dna_memory(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc, *shadow_tx_desc; struct pfring_hooks *hook = (struct pfring_hooks*)netdev->pfring_ptr; union e1000_rx_desc_extended *rx_desc, *shadow_rx_desc; struct e1000_buffer *buffer_info; u16 cache_line_size; struct sk_buff *skb; unsigned int i; int cleaned_count = rx_ring->count; /* Allocate all slots in one shot */ unsigned int bufsz = adapter->rx_buffer_len; mem_ring_info rx_info = {0}; mem_ring_info tx_info = {0}; int num_slots_per_page; /* Buffers are allocated all in one shot so we'll pass here once */ #if 0 printk("[DNA] e1000_alloc_rx_buffers(cleaned_count=%d)[%s][slot len=%u/%u]\n", cleaned_count, adapter->netdev->name, bufsz, adapter->max_hw_frame_size); #endif if(hook && (hook->magic == PF_RING)) { if(adapter->dna.rx_packet_memory[0] == 0) { pci_read_config_word(adapter->pdev, 0x0C /* Conf. Space Cache Line Size offset */, &cache_line_size); cache_line_size *= 2; /* word (2-byte) to bytes */ if(cache_line_size == 0) cache_line_size = 64; if (0) printk("[DNA] Cache line size is %u bytes\n", cache_line_size); adapter->dna.packet_slot_len = ALIGN(bufsz, cache_line_size); adapter->dna.packet_num_slots = cleaned_count; adapter->dna.tot_packet_memory = PAGE_SIZE << DNA_MAX_CHUNK_ORDER; num_slots_per_page = adapter->dna.tot_packet_memory / adapter->dna.packet_slot_len; adapter->dna.num_memory_pages = (adapter->dna.packet_num_slots + num_slots_per_page-1) / num_slots_per_page; if(0) printk("[DNA] Allocating memory [%u slots][%u memory pages][tot_packet_memory %u bytes]\n", adapter->dna.packet_num_slots, adapter->dna.num_memory_pages, adapter->dna.tot_packet_memory); for(i=0; i<adapter->dna.num_memory_pages; i++) { adapter->dna.rx_packet_memory[i] = alloc_contiguous_memory(&adapter->dna.tot_packet_memory, &adapter->dna.mem_order); if(adapter->dna.rx_packet_memory[i] != 0) { if(0) printk("[DNA] Successfully allocated %lu bytes at " "0x%08lx [slot_len=%d]\n", (unsigned long) adapter->dna.tot_packet_memory, (unsigned long) adapter->dna.rx_packet_memory[i], adapter->dna.packet_slot_len); } else { printk("[DNA] ERROR: not enough memory for DMA ring\n"); return; } } for(i=0; i<cleaned_count; i++) { u_int page_index, offset; page_index = i / num_slots_per_page; offset = (i % num_slots_per_page) * adapter->dna.packet_slot_len; skb = (struct sk_buff *)(adapter->dna.rx_packet_memory[page_index] + offset); if(0) printk("[DNA] Allocating slot %d of %d [addr=%p][page_index=%u][offset=%u]\n", i, adapter->dna.packet_num_slots, skb, page_index, offset); buffer_info = &rx_ring->buffer_info[i]; buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; buffer_info->dma = pci_map_single(pdev, skb, buffer_info->length, PCI_DMA_FROMDEVICE); #if 0 printk("[DNA] Mapping buffer %d [ptr=%p][len=%d]\n", i, (void*)buffer_info->dma, adapter->dna.packet_slot_len); #endif rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); shadow_rx_desc = E1000_RX_DESC_EXT(*rx_ring, i + rx_ring->count); memcpy(shadow_rx_desc, rx_desc, sizeof(union e1000_rx_desc_extended)); } wmb(); /* The statement below syncs the value of tail (next to read) to * count-1 instead of 0 for zero-copy (one slot back) */ E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count-1); e1000_irq_disable(adapter); //e1000_irq_enable(adapter); /* TX */ if(adapter->dna.tx_packet_memory[0] == 0) { for(i=0; i<adapter->dna.num_memory_pages; i++) { adapter->dna.tx_packet_memory[i] = alloc_contiguous_memory(&adapter->dna.tot_packet_memory, &adapter->dna.mem_order); if(adapter->dna.tx_packet_memory[i] != 0) { if(0) printk("[DNA] [TX] Successfully allocated %lu bytes at " "0x%08lx [slot_len=%d]\n", (unsigned long) adapter->dna.tot_packet_memory, (unsigned long) adapter->dna.rx_packet_memory[i], adapter->dna.packet_slot_len); } else { printk("[DNA] ERROR: not enough memory for DMA ring\n"); return; } } for(i=0; i<cleaned_count; i++) { u_int page_index, offset; page_index = i / num_slots_per_page; offset = (i % num_slots_per_page) * adapter->dna.packet_slot_len; skb = (struct sk_buff *)(adapter->dna.tx_packet_memory[page_index] + offset); if(0) printk("[DNA] [TX] Allocating slot %d of %d [addr=%p][page_index=%u][offset=%u]\n", i, adapter->dna.packet_num_slots, skb, page_index, offset); buffer_info = &tx_ring->buffer_info[i]; buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; buffer_info->dma = pci_map_single(pdev, skb, buffer_info->length, PCI_DMA_TODEVICE); #if 0 printk("[DNA] Mapping buffer %d [ptr=%p][len=%d]\n", i, (void*)buffer_info->dma, adapter->dna.packet_slot_len); #endif tx_desc = E1000_TX_DESC(*tx_ring, i); tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); /* Note that shadows are useless for e1000e with standard DNA, but used by libzero */ shadow_tx_desc = E1000_TX_DESC(*tx_ring, i + tx_ring->count); memcpy(shadow_tx_desc, tx_desc, sizeof(struct e1000_tx_desc)); } } rx_info.packet_memory_num_chunks = adapter->dna.num_memory_pages; rx_info.packet_memory_chunk_len = adapter->dna.tot_packet_memory; rx_info.packet_memory_num_slots = adapter->dna.packet_num_slots; rx_info.packet_memory_slot_len = adapter->dna.packet_slot_len; rx_info.descr_packet_memory_tot_len = 2 * rx_ring->size; tx_info.packet_memory_num_chunks = adapter->dna.num_memory_pages; tx_info.packet_memory_chunk_len = adapter->dna.tot_packet_memory; tx_info.packet_memory_num_slots = adapter->dna.packet_num_slots; tx_info.packet_memory_slot_len = adapter->dna.packet_slot_len; tx_info.descr_packet_memory_tot_len = 2 * tx_ring->size; /* Register with PF_RING */ hook->ring_dna_device_handler(add_device_mapping, dna_v1, &rx_info, &tx_info, adapter->dna.rx_packet_memory, rx_ring->desc, adapter->dna.tx_packet_memory, tx_ring->desc, /* Packet descriptors */ (void*)netdev->mem_start, netdev->mem_end-netdev->mem_start, 0, /* Channel Id */ netdev, &pdev->dev, intel_e1000e, adapter->netdev->dev_addr, &adapter->dna.packet_waitqueue, &adapter->dna.interrupt_received, (void*)adapter, wait_packet_function_ptr, notify_function_ptr); if(1) printk("[DNA] Enabled DNA on %s (rx len=%u, tx len=%u)\n", adapter->netdev->name, rx_ring->size, tx_ring->size); } else { printk("WARNING e1000_alloc_rx_buffers(cleaned_count=%d)" "[%s][%lu] already allocated\n", cleaned_count, adapter->netdev->name, adapter->dna.rx_packet_memory[0]); } } }