static ssize_t igb_txbpacks(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct e1000_hw *hw; struct igb_adapter *adapter = igb_get_adapter(kobj); if (adapter == NULL) return snprintf(buf, PAGE_SIZE, "error: no adapter\n"); hw = &adapter->hw; if (hw == NULL) return snprintf(buf, PAGE_SIZE, "error: no hw data\n"); return snprintf(buf, PAGE_SIZE, "%d\n", E1000_READ_REG(hw, E1000_BPTC)); }
/** * e1000_close - Disables a network interface * * @v netdev network interface device structure * **/ static void e1000e_close ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv ( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t rctl; uint32_t icr; DBGP ( "e1000_close\n" ); /* Acknowledge interrupts */ icr = E1000_READ_REG ( hw, E1000_ICR ); e1000e_irq_disable ( adapter ); /* disable receives */ rctl = E1000_READ_REG ( hw, E1000_RCTL ); E1000_WRITE_REG ( hw, E1000_RCTL, rctl & ~E1000_RCTL_EN ); e1e_flush(); e1000e_reset ( adapter ); e1000e_free_tx_resources ( adapter ); e1000e_free_rx_resources ( adapter ); }
/** * e1000_release_swfw_sync_i210 - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; DEBUGFUNC("e1000_release_swfw_sync_i210"); while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) ; /* Empty */ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); swfw_sync &= ~mask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_i210(hw); }
/** * e1000_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) { u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_rst_pf"); if (vflre & (1 << vf_number)) { ret_val = E1000_SUCCESS; E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); hw->mbx.stats.rsts++; } return ret_val; }
static int igb_txmpacks(char *page, char **start, off_t off, int count, int *eof, void *data) { struct e1000_hw *hw; struct igb_adapter *adapter = (struct igb_adapter *)data; if (adapter == NULL) return snprintf(page, count, "error: no adapter\n"); hw = &adapter->hw; if (hw == NULL) return snprintf(page, count, "error: no hw data\n"); return snprintf(page, count, "%d\n", E1000_READ_REG(hw, E1000_MPTC)); }
STATIC VOID StandBy(PADAPTER_STRUCT Adapter) { UINT32 EecdRegValue; EecdRegValue = E1000_READ_REG(Eecd); EecdRegValue &= ~(E1000_EECS | E1000_EESK); E1000_WRITE_REG(Eecd, EecdRegValue); DelayInMicroseconds(5); EecdRegValue |= E1000_EECS; E1000_WRITE_REG(Eecd, EecdRegValue); }
VOID IdLedOff(PADAPTER_STRUCT Adapter) { UINT32 CtrlRegValue; if (Adapter->AdapterStopped) { return; } CtrlRegValue = E1000_READ_REG(Ctrl); CtrlRegValue |= E1000_CTRL_SWDPIO0; CtrlRegValue &= ~E1000_CTRL_SWDPIN0; E1000_WRITE_REG(Ctrl, CtrlRegValue); }
/** * igb_ptp_tx_work * @work: pointer to work struct * * This work function polls the TSYNCTXCTL valid bit to determine when a * timestamp has been taken for the current stored skb. */ void igb_ptp_tx_work(struct work_struct *work) { struct igb_adapter *adapter = container_of(work, struct igb_adapter, ptp_tx_work); struct e1000_hw *hw = &adapter->hw; u32 tsynctxctl; if (!adapter->ptp_tx_skb) return; tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL); if (tsynctxctl & E1000_TSYNCTXCTL_VALID) igb_ptp_tx_hwtstamp(adapter); else /* reschedule to check later */ schedule_work(&adapter->ptp_tx_work); }
/** * e1000_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; DEBUGFUNC("e1000_obtain_mbx_lock_pf"); /* Take ownership of the buffer */ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); /* reserve mailbox for vf use */ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); if (p2v_mailbox & E1000_P2VMAILBOX_PFU) ret_val = E1000_SUCCESS; return ret_val; }
/** * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR * @hw: pointer to the HW structure * @offset: offset within the Shadow Ram to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the Shadow Ram * * Writes data to Shadow Ram at offset using EEWR register. * * If e1000_update_nvm_checksum is not called after this function , the * Shadow Ram will most likely contain an invalid checksum. **/ static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, k, eewr = 0; u32 attempts = 100000; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_nvm_srwr"); /* * A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } for (i = 0; i < words; i++) { eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | (data[i] << E1000_NVM_RW_REG_DATA) | E1000_NVM_RW_REG_START; E1000_WRITE_REG(hw, E1000_SRWR, eewr); for (k = 0; k < attempts; k++) { if (E1000_NVM_RW_REG_DONE & E1000_READ_REG(hw, E1000_SRWR)) { ret_val = E1000_SUCCESS; break; } usec_delay(5); } if (ret_val != E1000_SUCCESS) { DEBUGOUT("Shadow RAM write EEWR timed out\n"); break; } } out: return ret_val; }
int wait_packet_function_ptr(void *data, int mode) { struct e1000_adapter *adapter = (struct e1000_adapter*)data; if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] called [mode=%d]\n", mode); if(mode == 1) { struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_extended *rx_desc; u16 i = E1000_READ_REG(&adapter->hw, E1000_RDT(0)); /* Very important: update the value from the register set from userland. * Here i is the last I've read (zero-copy implementation) */ if(++i == rx_ring->count) i = 0; /* Here i is the next I have to read */ rx_ring->next_to_clean = i; rx_desc = E1000_RX_DESC_EXT(*rx_ring, rx_ring->next_to_clean); if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Check if a packet is arrived\n"); prefetch(rx_desc); if(!(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD)) { adapter->dna.interrupt_received = 0; #if 0 if(!adapter->dna.interrupt_enabled) { e1000_irq_enable(adapter), adapter->dna.interrupt_enabled = 1; if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Packet not arrived yet: enabling interrupts\n"); } #endif } else adapter->dna.interrupt_received = 1; return(le32_to_cpu(rx_desc->wb.upper.status_error) & E1000_RXD_STAT_DD); } else { if(adapter->dna.interrupt_enabled) { e1000_irq_disable(adapter); adapter->dna.interrupt_enabled = 0; if(unlikely(enable_debug)) printk("[wait_packet_function_ptr] Disabled interrupts\n"); } return(0); } }
STATIC UINT16 WaitEepromCommandDone(PADAPTER_STRUCT Adapter) { UINT32 EecdRegValue; UINT i; StandBy(Adapter); for (i = 0; i < 200; i++) { EecdRegValue = E1000_READ_REG(Eecd); if (EecdRegValue & E1000_EEDO) return (TRUE); DelayInMicroseconds(5); } return (FALSE); }
/** * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. * @hw: pointer to the HW structure * **/ s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_NVM; u32 i, reg; DEBUGFUNC("e1000_pool_flash_update_done_i210"); for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { reg = E1000_READ_REG(hw, E1000_EECD); if (reg & E1000_EECD_FLUDONE_I210) { ret_val = E1000_SUCCESS; break; } usec_delay(5); } return ret_val; }
VOID ForceMacFlowControlSetting(PADAPTER_STRUCT Adapter) { UINT32 CtrlRegValue; DEBUGFUNC("ForceMacFlowControlSetting") CtrlRegValue = E1000_READ_REG(Ctrl); switch (Adapter->FlowControl) { case FLOW_CONTROL_NONE: CtrlRegValue &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); break; case FLOW_CONTROL_RECEIVE_PAUSE: CtrlRegValue &= (~E1000_CTRL_TFCE); CtrlRegValue |= E1000_CTRL_RFCE; break; case FLOW_CONTROL_TRANSMIT_PAUSE: CtrlRegValue &= (~E1000_CTRL_RFCE); CtrlRegValue |= E1000_CTRL_TFCE; break; case FLOW_CONTROL_FULL: CtrlRegValue |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); break; default: DEBUGOUT("Flow control param set incorrectly\n"); ASSERT(0); break; } if (Adapter->MacType == MAC_WISEMAN_2_0) CtrlRegValue &= (~E1000_CTRL_TFCE); E1000_WRITE_REG(Ctrl, CtrlRegValue); }
/** * e1000_read_invm_version - Reads iNVM version and image type * @hw: pointer to the HW structure * @invm_ver: version structure for the version read * * Reads iNVM version and image type. **/ s32 e1000_read_invm_version(struct e1000_hw *hw, struct e1000_fw_version *invm_ver) { u32 *record = NULL; u32 *next_record = NULL; u32 i = 0; u32 invm_dword = 0; u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / E1000_INVM_RECORD_SIZE_IN_BYTES); u32 buffer[E1000_INVM_SIZE]; s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; u16 version = 0; DEBUGFUNC("e1000_read_invm_version"); /* Read iNVM memory */ for (i = 0; i < E1000_INVM_SIZE; i++) { invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); buffer[i] = invm_dword; } /* Read version number */ for (i = 1; i < invm_blocks; i++) { record = &buffer[invm_blocks - i]; next_record = &buffer[invm_blocks - i + 1]; /* Check if we have first version location used */ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { version = 0; status = E1000_SUCCESS; break; } /* Check if we have second version location used */ else if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; status = E1000_SUCCESS; break; } /* * Check if we have odd version location * used and it is the last one used */ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
static int dna_igb_clean_rx_irq(struct igb_q_vector *q_vector, struct igb_ring *rx_ring, int budget) { union e1000_adv_rx_desc *rx_desc; u32 staterr; u16 i; struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; i = E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx)); if(++i == rx_ring->count) i = 0; rx_ring->next_to_clean = i; //i = E1000_READ_REG(hw, E1000_RDT(rx_ring->reg_idx)); rx_desc = IGB_RX_DESC(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if(rx_ring->dna.queue_in_use) { /* A userland application is using the queue so it's not time to mess up with indexes but just to wakeup apps (if waiting) */ if(staterr & E1000_RXD_STAT_DD) { if(unlikely(enable_debug)) printk(KERN_INFO "DNA: got a packet [index=%d]!\n", i); if(waitqueue_active(&rx_ring->dna.rx_tx.rx.packet_waitqueue)) { wake_up_interruptible(&rx_ring->dna.rx_tx.rx.packet_waitqueue); rx_ring->dna.rx_tx.rx.interrupt_received = 1; if(unlikely(enable_debug)) printk("%s(%s): woken up ring=%d, [slot=%d] XXX\n", __FUNCTION__, rx_ring->netdev->name, rx_ring->reg_idx, i); } } // goto dump_stats; } return(budget); }
/** * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings * @hw: pointer to the HW structure * * For the 82543 silicon, we need to set the MAC to match the settings * of the PHY, even if the PHY is auto-negotiating. **/ static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw) { u32 ctrl; s32 ret_val = E1000_SUCCESS; u16 phy_data; DEBUGFUNC("e1000_config_mac_to_phy_82543"); if (!(hw->phy.ops.read_reg)) goto out; /* Set the bits to force speed and duplex */ ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); /* * Set up duplex in the Device Control and Transmit Control * registers depending on negotiated values. */ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) goto out; ctrl &= ~E1000_CTRL_FD; if (phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; e1000_config_collision_dist_generic(hw); /* * Set up speed in the Device Control register depending on * negotiated values. */ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) ctrl |= E1000_CTRL_SPD_1000; else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) ctrl |= E1000_CTRL_SPD_100; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); out: return ret_val; }
static void e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct e1000_adapter *adapter = netdev->priv; struct e1000_hw *hw = &adapter->hw; switch(adapter->hw.device_id) { case E1000_DEV_ID_82542: case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_COPPER: case E1000_DEV_ID_82544EI_FIBER: case E1000_DEV_ID_82546EB_QUAD_COPPER: case E1000_DEV_ID_82545EM_FIBER: case E1000_DEV_ID_82545EM_COPPER: wol->supported = 0; wol->wolopts = 0; return; case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: /* Wake events only supported on port A for dual fiber */ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { wol->supported = 0; wol->wolopts = 0; return; } /* Fall Through */ default: wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; if(adapter->wol & E1000_WUFC_EX) wol->wolopts |= WAKE_UCAST; if(adapter->wol & E1000_WUFC_MC) wol->wolopts |= WAKE_MCAST; if(adapter->wol & E1000_WUFC_BC) wol->wolopts |= WAKE_BCAST; if(adapter->wol & E1000_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; return; } }
/** * e1000_led_off_82543 - Turn off SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED off. **/ static s32 e1000_led_off_82543(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_off_82543"); if (hw->mac.type == e1000_82544 && hw->phy.media_type == e1000_media_type_copper) { /* Set SW-definable Pin 0 to turn off the LED */ ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } else { ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; }
/** * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; s32 ret_val = E1000_SUCCESS; s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ DEBUGFUNC("e1000_acquire_swfw_sync_i210"); while (i < timeout) { if (e1000_get_hw_semaphore_i210(hw)) { ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; /* * Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000_put_hw_semaphore_generic(hw); msec_delay_irq(5); i++; } if (i == timeout) { DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync |= swmask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); out: return ret_val; }
VOID GetSpeedAndDuplex(PADAPTER_STRUCT Adapter, PUINT16 Speed, PUINT16 Duplex) { UINT32 DeviceStatusReg; UINT16 PhyData; DEBUGFUNC("GetSpeedAndDuplex") if (Adapter->AdapterStopped) { *Speed = 0; *Duplex = 0; return; } if (Adapter->MacType >= MAC_LIVENGOOD) { DEBUGOUT("Livengood MAC\n"); DeviceStatusReg = E1000_READ_REG(Status); if (DeviceStatusReg & E1000_STATUS_SPEED_1000) { *Speed = SPEED_1000; DEBUGOUT(" 1000 Mbs\n"); } else if (DeviceStatusReg & E1000_STATUS_SPEED_100) { *Speed = SPEED_100; DEBUGOUT(" 100 Mbs\n"); } else { *Speed = SPEED_10; DEBUGOUT(" 10 Mbs\n"); } if (DeviceStatusReg & E1000_STATUS_FD) { *Duplex = FULL_DUPLEX; DEBUGOUT(" Full Duplex\r\n"); } else { *Duplex = HALF_DUPLEX; DEBUGOUT(" Half Duplex\r\n"); } } else { DEBUGOUT("Wiseman MAC - 1000 Mbs, Full Duplex\r\n"); *Speed = SPEED_1000; *Duplex = FULL_DUPLEX; } return; }
/** * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY * @hw: pointer to the HW structure * @data: data to send to the PHY * @count: number of bits to shift out * * We need to shift 'count' bits out to the PHY. So, the value in the * "data" parameter will be shifted out to the PHY one bit at a time. * In order to do this, "data" must be broken down into bits. **/ STATIC void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, u16 count) { u32 ctrl, mask; /* * We need to shift "count" number of bits out to the PHY. So, the * value in the "data" parameter will be shifted out to the PHY one * bit at a time. In order to do this, "data" must be broken down * into bits. */ mask = 0x01; mask <<= (count - 1); ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); while (mask) { /* * A "1" is shifted out to the PHY by setting the MDIO bit to * "1" and then raising and lowering the Management Data Clock. * A "0" is shifted out to the PHY by setting the MDIO bit to * "0" and then raising and lowering the clock. */ if (data & mask) ctrl |= E1000_CTRL_MDIO; else ctrl &= ~E1000_CTRL_MDIO; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); E1000_WRITE_FLUSH(hw); usec_delay(10); e1000_raise_mdi_clk_82543(hw, &ctrl); e1000_lower_mdi_clk_82543(hw, &ctrl); mask >>= 1; } }
/** * e1000_led_on_82543 - Turn on SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED on. **/ static s32 e1000_led_on_82543(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_on_82543"); if (hw->mac.type == e1000_82544 && hw->phy.media_type == e1000_media_type_copper) { /* Clear SW-definable Pin 0 to turn on the LED */ ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } else { /* Fiber 82544 and all 82543 use this method */ ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; }
static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct e1000_adapter *adapter = netdev->priv; struct e1000_hw *hw = &adapter->hw; switch(adapter->hw.device_id) { case E1000_DEV_ID_82542: case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_COPPER: case E1000_DEV_ID_82544EI_FIBER: case E1000_DEV_ID_82546EB_QUAD_COPPER: case E1000_DEV_ID_82545EM_FIBER: case E1000_DEV_ID_82545EM_COPPER: return wol->wolopts ? -EOPNOTSUPP : 0; case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: /* Wake events only supported on port A for dual fiber */ if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) return wol->wolopts ? -EOPNOTSUPP : 0; /* Fall Through */ default: if(wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; adapter->wol = 0; if(wol->wolopts & WAKE_UCAST) adapter->wol |= E1000_WUFC_EX; if(wol->wolopts & WAKE_MCAST) adapter->wol |= E1000_WUFC_MC; if(wol->wolopts & WAKE_BCAST) adapter->wol |= E1000_WUFC_BC; if(wol->wolopts & WAKE_MAGIC) adapter->wol |= E1000_WUFC_MAG; } return 0; }
void dna_cleanup_tx_ring(struct ixgbe_ring *tx_ring) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); struct e1000_hw *hw = &adapter->hw; union e1000_adv_tx_desc *tx_desc, *shadow_tx_desc; u32 tail; u32 head = E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)); u32 i; /* resetting all */ for (i=0; i<tx_ring->count; i++) { tx_desc = IGB_TX_DESC(tx_ring, i); shadow_tx_desc = IGB_TX_DESC(tx_ring, i + tx_ring->count); tx_desc->read.olinfo_status = 0; tx_desc->read.buffer_addr = shadow_tx_desc->read.buffer_addr; } tail = head; //(head + 1) % tx_ring->count; E1000_WRITE_REG(hw, E1000_TDT(tx_ring->reg_idx), tail); }
/** * e1000_setup_copper_link_82540 - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 data; DEBUGFUNC("e1000_setup_copper_link_82540"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ret_val = e1000_set_phy_mode_82540(hw); if (ret_val) goto out; if (hw->mac.type == e1000_82545_rev_3 || hw->mac.type == e1000_82546_rev_3) { ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &data); if (ret_val) goto out; data |= 0x00000008; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, data); if (ret_val) goto out; } ret_val = e1000_copper_link_setup_m88(hw); if (ret_val) goto out; ret_val = e1000_setup_copper_link_generic(hw); out: return ret_val; }
/* * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface * @hw: pointer to the HW structure * @buffer: pointer to the host interface * @length: size of the buffer * * Writes the DHCP information to the host interface. */ s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, u16 length) { struct e1000_host_mng_command_header hdr; s32 ret_val; u32 hicr; DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; hdr.command_length = length; hdr.reserved1 = 0; hdr.reserved2 = 0; hdr.checksum = 0; /* Enable the host interface */ ret_val = hw->mac.ops.mng_enable_host_if(hw); if (ret_val) goto out; /* Populate the host interface with the contents of "buffer". */ ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length, sizeof (hdr), &(hdr.checksum)); if (ret_val) goto out; /* Write the manageability command header */ ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr); if (ret_val) goto out; /* Tell the ARC a new command is pending. */ hicr = E1000_READ_REG(hw, E1000_HICR); E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); out: return (ret_val); }
/** * e1000_poll - Poll for received packets * * @v netdev Network device */ static void e1000e_poll ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t icr; DBGP ( "e1000_poll\n" ); /* Acknowledge interrupts */ icr = E1000_READ_REG ( hw, E1000_ICR ); if ( ! icr ) return; DBG ( "e1000_poll: intr_status = %#08x\n", icr ); e1000e_process_tx_packets ( netdev ); e1000e_process_rx_packets ( netdev ); e1000e_refill_rx_ring(adapter); }
/** * e1000e_open - Called when a network interface is made active * * @v netdev network interface device structure * @ret rc Return status code, 0 on success, negative value on failure * **/ static int e1000e_open ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv(netdev); int err; DBGP ( "e1000e_open\n" ); /* allocate transmit descriptors */ err = e1000e_setup_tx_resources ( adapter ); if ( err ) { DBG ( "Error setting up TX resources!\n" ); goto err_setup_tx; } /* allocate receive descriptors */ err = e1000e_setup_rx_resources ( adapter ); if ( err ) { DBG ( "Error setting up RX resources!\n" ); goto err_setup_rx; } e1000e_configure_tx ( adapter ); e1000e_configure_rx ( adapter ); DBG ( "E1000_RXDCTL(0): %#08x\n", E1000_READ_REG ( &adapter->hw, E1000_RXDCTL(0) ) ); return 0; err_setup_rx: DBG ( "err_setup_rx\n" ); e1000e_free_tx_resources ( adapter ); err_setup_tx: DBG ( "err_setup_tx\n" ); e1000e_reset ( adapter ); return err; }
static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); struct e1000_hw *hw = &igb->hw; int neg_adj = 0; u64 rate; u32 inca; if (ppb < 0) { neg_adj = 1; ppb = -ppb; } rate = ppb; rate <<= 26; rate = div_u64(rate, 1953125); /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x * as quickly. Account for this by dividing the adjustment by 2.5. */ if (hw->mac.type == e1000_i354) { u32 status = E1000_READ_REG(hw, E1000_STATUS); if ((status & E1000_STATUS_2P5_SKU) && !(status & E1000_STATUS_2P5_SKU_OVER)) { rate <<= 1; rate = div_u64(rate, 5); } } inca = rate & INCVALUE_MASK; if (neg_adj) inca |= ISGN; E1000_WRITE_REG(hw, E1000_TIMINCA, inca); return 0; }