void igb_rx_fifo_flush_82575(struct e1000_hw *hw) { u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; if (hw->mac.type != e1000_82575 || !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; for (i = 0; i < 4; i++) { rxdctl[i] = rd32(E1000_RXDCTL(i)); wr32(E1000_RXDCTL(i), rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); } for (ms_wait = 0; ms_wait < 10; ms_wait++) { msleep(1); rx_enabled = 0; for (i = 0; i < 4; i++) rx_enabled |= rd32(E1000_RXDCTL(i)); if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) break; } if (ms_wait == 10) hw_dbg("Queue disable timed out after 10ms\n"); rfctl = rd32(E1000_RFCTL); wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); rlpml = rd32(E1000_RLPML); wr32(E1000_RLPML, 0); rctl = rd32(E1000_RCTL); temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); temp_rctl |= E1000_RCTL_LPE; wr32(E1000_RCTL, temp_rctl); wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); wrfl(); msleep(2); for (i = 0; i < 4; i++) wr32(E1000_RXDCTL(i), rxdctl[i]); wr32(E1000_RCTL, rctl); wrfl(); wr32(E1000_RLPML, rlpml); wr32(E1000_RFCTL, rfctl); rd32(E1000_ROC); rd32(E1000_RNBC); rd32(E1000_MPC); }
/** * igb_clear_vfta - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ void igb_clear_vfta(struct e1000_hw *hw) { u32 offset; for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { array_wr32(E1000_VFTA, offset, 0); wrfl(); } }
/** * igb_write_vfta_i350 - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table * * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) { int i; for (i = 0; i < 10; i++) array_wr32(E1000_VFTA, offset, value); wrfl(); }
/** * igb_reset_hw_82575 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. This is a * function pointer entry point called by the api module. **/ static s32 igb_reset_hw_82575(struct e1000_hw *hw) { u32 ctrl, icr; s32 ret_val; /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = igb_disable_pcie_master(hw); if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); /* set the completion timeout for interface */ ret_val = igb_set_pcie_completion_timeout(hw); if (ret_val) { hw_dbg("PCI-E Set completion timeout has failed.\n"); } hw_dbg("Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); wr32(E1000_TCTL, E1000_TCTL_PSP); wrfl(); msleep(10); ctrl = rd32(E1000_CTRL); hw_dbg("Issuing a global reset to MAC\n"); wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); ret_val = igb_get_auto_rd_done(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ hw_dbg("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) igb_reset_init_script_82575(hw); /* Clear any pending interrupt events. */ wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); /* Install any alternate MAC address into RAR0 */ ret_val = igb_check_alt_mac_addr(hw); return ret_val; }
/** * igb_clear_vfta_i350 - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ void igb_clear_vfta_i350(struct e1000_hw *hw) { u32 offset; int i; for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { for (i = 0; i < 10; i++) array_wr32(E1000_VFTA, offset, 0); wrfl(); } }
/** * igb_ptp_reset - Re-enable the adapter for PTP following a reset. * @adapter: Board private structure. * * This function handles the reset work required to re-enable the PTP device. **/ void igb_ptp_reset(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; unsigned long flags; /* reset the tstamp_config */ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); spin_lock_irqsave(&adapter->tmreg_lock, flags); switch (adapter->hw.mac.type) { case e1000_82576: /* Dial the nominal frequency. */ wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); break; case e1000_82580: case e1000_i354: case e1000_i350: case e1000_i210: case e1000_i211: wr32(E1000_TSAUXC, 0x0); wr32(E1000_TSSDP, 0x0); wr32(E1000_TSIM, TSYNC_INTERRUPTS | (adapter->pps_sys_wrap_on ? TSINTR_SYS_WRAP : 0)); wr32(E1000_IMS, E1000_IMS_TS); break; default: /* No work to do. */ goto out; } /* Re-initialize the timer. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); igb_ptp_write_i210(adapter, &ts); } else { timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); } out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); wrfl(); if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) schedule_delayed_work(&adapter->ptp_overflow_work, IGB_SYSTIM_OVERFLOW_PERIOD); }
static s32 igb_reset_hw_82575(struct e1000_hw *hw) { u32 ctrl, icr; s32 ret_val; ret_val = igb_disable_pcie_master(hw); if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); ret_val = igb_set_pcie_completion_timeout(hw); if (ret_val) { hw_dbg("PCI-E Set completion timeout has failed.\n"); } hw_dbg("Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); wr32(E1000_TCTL, E1000_TCTL_PSP); wrfl(); msleep(10); ctrl = rd32(E1000_CTRL); hw_dbg("Issuing a global reset to MAC\n"); wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); ret_val = igb_get_auto_rd_done(hw); if (ret_val) { hw_dbg("Auto Read Done did not complete\n"); } if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) igb_reset_init_script_82575(hw); wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); ret_val = igb_check_alt_mac_addr(hw); return ret_val; }
/** * igb_shutdown_serdes_link_82575 - Remove link during power down * @hw: pointer to the HW structure * * In the case of fiber serdes, shut down optics and PCS on driver unload * when management pass thru is not enabled. **/ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; u16 eeprom_data = 0; if (hw->phy.media_type != e1000_media_type_internal_serdes || igb_sgmii_active_82575(hw)) return; if (hw->bus.func == E1000_FUNC_0) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); else if (hw->mac.type == e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); else if (hw->bus.func == E1000_FUNC_1) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); /* * If APM is not enabled in the EEPROM and management interface is * not enabled, then power down. */ if (!(eeprom_data & E1000_NVM_APME_82575) && !igb_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ reg = rd32(E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); /* shutdown the laser */ reg = rd32(E1000_CTRL_EXT); reg |= E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); /* flush the write to verify completion */ wrfl(); msleep(1); } return; }
void igb_power_up_serdes_link_82575(struct e1000_hw *hw) { u32 reg; if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !igb_sgmii_active_82575(hw)) return; reg = rd32(E1000_PCS_CFG0); reg |= E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); reg = rd32(E1000_CTRL_EXT); reg &= ~E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); wrfl(); msleep(1); }
/** * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown * @hw: pointer to the HW structure **/ void igb_power_up_serdes_link_82575(struct e1000_hw *hw) { u32 reg; if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !igb_sgmii_active_82575(hw)) return; /* Enable PCS to turn on link */ reg = rd32(E1000_PCS_CFG0); reg |= E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); /* Power up the laser */ reg = rd32(E1000_CTRL_EXT); reg &= ~E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); /* flush the write to verify completion */ wrfl(); msleep(1); }
/** * igb_shutdown_serdes_link_82575 - Remove link during power down * @hw: pointer to the HW structure * * In the case of fiber serdes, shut down optics and PCS on driver unload * when management pass thru is not enabled. **/ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; if (hw->phy.media_type != e1000_media_type_internal_serdes && igb_sgmii_active_82575(hw)) return; if (!igb_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ reg = rd32(E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); /* shutdown the laser */ reg = rd32(E1000_CTRL_EXT); reg |= E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); /* flush the write to verify completion */ wrfl(); msleep(1); } }
void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; if (hw->phy.media_type != e1000_media_type_internal_serdes && igb_sgmii_active_82575(hw)) return; if (!igb_enable_mng_pass_thru(hw)) { reg = rd32(E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; wr32(E1000_PCS_CFG0, reg); reg = rd32(E1000_CTRL_EXT); reg |= E1000_CTRL_EXT_SDP3_DATA; wr32(E1000_CTRL_EXT, reg); wrfl(); msleep(1); } }
/** * igb_write_vfta - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table * * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { array_wr32(E1000_VFTA, offset, value); wrfl(); }
void igb_ptp_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) int i; #endif switch (hw->mac.type) { case e1000_82576: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 999999881; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; adapter->ptp_caps.gettime = igb_ptp_gettime_82576; adapter->ptp_caps.settime = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82576; adapter->cc.mask = CYCLECOUNTER_MASK(64); adapter->cc.mult = 1; adapter->cc.shift = IGB_82576_TSYNC_SHIFT; /* Dial the nominal frequency. */ wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); break; case e1000_82580: case e1000_i354: case e1000_i350: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; adapter->ptp_caps.gettime = igb_ptp_gettime_82576; adapter->ptp_caps.settime = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82580; adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); adapter->cc.mult = 1; adapter->cc.shift = 0; /* Enable the timer functions by clearing bit 31. */ wr32(E1000_TSAUXC, 0x0); break; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) case e1000_i210: case e1000_i211: for (i = 0; i < IGB_N_SDP; i++) { struct ptp_pin_desc *ppd = &adapter->sdp_config[i]; snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i); ppd->index = i; ppd->func = PTP_PF_NONE; } snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS; adapter->ptp_caps.n_per_out = IGB_N_PEROUT; adapter->ptp_caps.n_pins = IGB_N_SDP; adapter->ptp_caps.pps = 1; adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; adapter->ptp_caps.gettime = igb_ptp_gettime_i210; adapter->ptp_caps.settime = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; /* Enable the timer functions by clearing bit 31. */ wr32(E1000_TSAUXC, 0x0); break; #endif default: adapter->ptp_clock = NULL; return; } wrfl(); spin_lock_init(&adapter->tmreg_lock); INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); /* Initialize the clock and overflow work for devices that need it. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { struct timespec ts = ktime_to_timespec(ktime_get_real()); igb_ptp_settime_i210(&adapter->ptp_caps, &ts); } else #endif { timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); INIT_DELAYED_WORK(&adapter->ptp_overflow_work, igb_ptp_overflow_check); schedule_delayed_work(&adapter->ptp_overflow_work, IGB_SYSTIM_OVERFLOW_PERIOD); } /* Initialize the time sync interrupts for devices that support it. */ if (hw->mac.type >= e1000_82580) { wr32(E1000_TSIM, TSYNC_INTERRUPTS); wr32(E1000_IMS, E1000_IMS_TS); } adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); } else { dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->flags |= IGB_FLAG_PTP; } }
/** * igb_get_phy_id_82575 - Retrieve PHY addr and id * @hw: pointer to the HW structure * * Retrieves the PHY address and ID for both PHY's which do and do not use * sgmi interface. **/ static s32 igb_get_phy_id_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 phy_id; u32 ctrl_ext; /* * For SGMII PHYs, we try the list of possible addresses until * we find one that works. For non-SGMII PHYs * (e.g. integrated copper PHYs), an address of 1 should * work. The result of this function should mean phy->phy_addr * and phy->id are set correctly. */ if (!(igb_sgmii_active_82575(hw))) { phy->addr = 1; ret_val = igb_get_phy_id(hw); goto out; } /* Power on sgmii phy if it is disabled */ ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); wrfl(); msleep(300); /* * The address field in the I2CCMD register is 3 bits and 0 is invalid. * Therefore, we need to test 1-7 */ for (phy->addr = 1; phy->addr < 8; phy->addr++) { ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); if (ret_val == 0) { hw_dbg("Vendor ID 0x%08X read at address %u\n", phy_id, phy->addr); /* * At the time of this writing, The M88 part is * the only supported SGMII PHY product. */ if (phy_id == M88_VENDOR) break; } else { hw_dbg("PHY address %u was unreadable\n", phy->addr); } } /* A valid PHY type couldn't be found. */ if (phy->addr == 8) { phy->addr = 0; ret_val = -E1000_ERR_PHY; goto out; } else { ret_val = igb_get_phy_id(hw); } /* restore previous sfp cage power state */ wr32(E1000_CTRL_EXT, ctrl_ext); out: return ret_val; }
/** * igb_ptp_hwtstamp_ioctl - control hardware time stamping * @netdev: * @ifreq: * @cmd: * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't case any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". **/ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct hwtstamp_config config; u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; u32 tsync_rx_cfg = 0; bool is_l4 = false; bool is_l2 = false; u32 regval; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; /* reserved for future extensions */ if (config.flags) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: /* 82576 cannot timestamp all packets, which it needs to do to * support both V1 Sync and Delay_Req messages */ if (hw->mac.type != e1000_82576) { tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; config.rx_filter = HWTSTAMP_FILTER_ALL; break; } /* fall through */ default: config.rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } if (hw->mac.type == e1000_82575) { if (tsync_rx_ctl | tsync_tx_ctl) return -EINVAL; return 0; } /* Per-packet timestamping only works if all packets are * timestamped, so enable timestamping in all packets as * long as one Rx filter was configured. */ if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; config.rx_filter = HWTSTAMP_FILTER_ALL; is_l2 = true; is_l4 = true; if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { regval = rd32(E1000_RXPBS); regval |= E1000_RXPBS_CFG_TS_EN; wr32(E1000_RXPBS, regval); } } /* enable/disable TX */ regval = rd32(E1000_TSYNCTXCTL); regval &= ~E1000_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; wr32(E1000_TSYNCTXCTL, regval); /* enable/disable RX */ regval = rd32(E1000_TSYNCRXCTL); regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; wr32(E1000_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); /* define ethertype filter for timestamped packets */ if (is_l2) wr32(E1000_ETQF(3), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else wr32(E1000_ETQF(3), 0); /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { u32 ftqf = (IPPROTO_UDP /* UDP */ | E1000_FTQF_VF_BP /* VF not compared */ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ | E1000_FTQF_MASK); /* mask all inputs */ ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ wr32(E1000_IMIR(3), htons(PTP_EV_PORT)); wr32(E1000_IMIREXT(3), (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); if (hw->mac.type == e1000_82576) { /* enable source port check */ wr32(E1000_SPQF(3), htons(PTP_EV_PORT)); ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; } wr32(E1000_FTQF(3), ftqf); } else { wr32(E1000_FTQF(3), E1000_FTQF_MASK); } wrfl(); /* clear TX/RX time stamp registers, just to be sure */ regval = rd32(E1000_TXSTMPL); regval = rd32(E1000_TXSTMPH); regval = rd32(E1000_RXSTMPL); regval = rd32(E1000_RXSTMPH); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; }
static s32 igb_reset_hw_82580(struct e1000_hw *hw) { s32 ret_val = 0; u16 swmbsw_mask = E1000_SW_SYNCH_MB; u32 ctrl, icr; bool global_device_reset = hw->dev_spec._82575.global_device_reset; hw->dev_spec._82575.global_device_reset = false; ctrl = rd32(E1000_CTRL); ret_val = igb_disable_pcie_master(hw); if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); hw_dbg("Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); wr32(E1000_TCTL, E1000_TCTL_PSP); wrfl(); msleep(10); if (global_device_reset && igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) global_device_reset = false; if (global_device_reset && !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) ctrl |= E1000_CTRL_DEV_RST; else ctrl |= E1000_CTRL_RST; wr32(E1000_CTRL, ctrl); wrfl(); if (global_device_reset) msleep(5); ret_val = igb_get_auto_rd_done(hw); if (ret_val) { hw_dbg("Auto Read Done did not complete\n"); } if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) igb_reset_init_script_82575(hw); wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); ret_val = igb_reset_mdicnfg_82580(hw); if (ret_val) hw_dbg("Could not reset MDICNFG based on EEPROM\n"); ret_val = igb_check_alt_mac_addr(hw); if (global_device_reset) igb_release_swfw_sync_82575(hw, swmbsw_mask); return ret_val; }
/** * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable * @hw: pointer to the HW structure * * After rx enable if managability is enabled then there is likely some * bad data at the start of the fifo and possibly in the DMA fifo. This * function clears the fifos and flushes any packets that came in as rx was * being enabled. **/ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) { u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; if (hw->mac.type != e1000_82575 || !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; /* Disable all RX queues */ for (i = 0; i < 4; i++) { rxdctl[i] = rd32(E1000_RXDCTL(i)); wr32(E1000_RXDCTL(i), rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); } /* Poll all queues to verify they have shut down */ for (ms_wait = 0; ms_wait < 10; ms_wait++) { msleep(1); rx_enabled = 0; for (i = 0; i < 4; i++) rx_enabled |= rd32(E1000_RXDCTL(i)); if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) break; } if (ms_wait == 10) hw_dbg("Queue disable timed out after 10ms\n"); /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all * incoming packets are rejected. Set enable and wait 2ms so that * any packet that was coming in as RCTL.EN was set is flushed */ rfctl = rd32(E1000_RFCTL); wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); rlpml = rd32(E1000_RLPML); wr32(E1000_RLPML, 0); rctl = rd32(E1000_RCTL); temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); temp_rctl |= E1000_RCTL_LPE; wr32(E1000_RCTL, temp_rctl); wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); wrfl(); msleep(2); /* Enable RX queues that were previously enabled and restore our * previous state */ for (i = 0; i < 4; i++) wr32(E1000_RXDCTL(i), rxdctl[i]); wr32(E1000_RCTL, rctl); wrfl(); wr32(E1000_RLPML, rlpml); wr32(E1000_RFCTL, rfctl); /* Flush receive errors generated by workaround */ rd32(E1000_ROC); rd32(E1000_RNBC); rd32(E1000_MPC); }
static s32 igb_get_phy_id_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; u16 phy_id; u32 ctrl_ext; u32 mdic; if (!(igb_sgmii_active_82575(hw))) { phy->addr = 1; ret_val = igb_get_phy_id(hw); goto out; } if (igb_sgmii_uses_mdio_82575(hw)) { switch (hw->mac.type) { case e1000_82575: case e1000_82576: mdic = rd32(E1000_MDIC); mdic &= E1000_MDIC_PHY_MASK; phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; break; case e1000_82580: case e1000_i350: mdic = rd32(E1000_MDICNFG); mdic &= E1000_MDICNFG_PHY_MASK; phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; break; default: ret_val = -E1000_ERR_PHY; goto out; break; } ret_val = igb_get_phy_id(hw); goto out; } ctrl_ext = rd32(E1000_CTRL_EXT); wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); wrfl(); msleep(300); for (phy->addr = 1; phy->addr < 8; phy->addr++) { ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); if (ret_val == 0) { hw_dbg("Vendor ID 0x%08X read at address %u\n", phy_id, phy->addr); if (phy_id == M88_VENDOR) break; } else { hw_dbg("PHY address %u was unreadable\n", phy->addr); } } if (phy->addr == 8) { phy->addr = 0; ret_val = -E1000_ERR_PHY; goto out; } else { ret_val = igb_get_phy_id(hw); } wr32(E1000_CTRL_EXT, ctrl_ext); out: return ret_val; }
void igb_ptp_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; switch (hw->mac.type) { case e1000_82576: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 999999881; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; adapter->ptp_caps.gettime = igb_ptp_gettime_82576; adapter->ptp_caps.settime = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_enable; adapter->cc.read = igb_ptp_read_82576; adapter->cc.mask = CLOCKSOURCE_MASK(64); adapter->cc.mult = 1; adapter->cc.shift = IGB_82576_TSYNC_SHIFT; /* Dial the nominal frequency. */ wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); break; case e1000_82580: case e1000_i354: case e1000_i350: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; adapter->ptp_caps.gettime = igb_ptp_gettime_82576; adapter->ptp_caps.settime = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_enable; adapter->cc.read = igb_ptp_read_82580; adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); adapter->cc.mult = 1; adapter->cc.shift = 0; /* Enable the timer functions by clearing bit 31. */ wr32(E1000_TSAUXC, 0x0); break; case e1000_i210: case e1000_i211: snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; adapter->ptp_caps.gettime = igb_ptp_gettime_i210; adapter->ptp_caps.settime = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_enable; /* Enable the timer functions by clearing bit 31. */ wr32(E1000_TSAUXC, 0x0); break; default: adapter->ptp_clock = NULL; return; } wrfl(); spin_lock_init(&adapter->tmreg_lock); INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); /* Initialize the clock and overflow work for devices that need it. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { struct timespec ts = ktime_to_timespec(ktime_get_real()); igb_ptp_settime_i210(&adapter->ptp_caps, &ts); } else { timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); INIT_DELAYED_WORK(&adapter->ptp_overflow_work, igb_ptp_overflow_check); schedule_delayed_work(&adapter->ptp_overflow_work, IGB_SYSTIM_OVERFLOW_PERIOD); } /* Initialize the time sync interrupts for devices that support it. */ if (hw->mac.type >= e1000_82580) { wr32(E1000_TSIM, E1000_TSIM_TXTS); wr32(E1000_IMS, E1000_IMS_TS); } adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); } else { dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->flags |= IGB_FLAG_PTP; } }
/** * igb_reset_hw_82580 - Reset hardware * @hw: pointer to the HW structure * * This resets function or entire device (all ports, etc.) * to a known state. **/ static s32 igb_reset_hw_82580(struct e1000_hw *hw) { s32 ret_val = 0; /* BH SW mailbox bit in SW_FW_SYNC */ u16 swmbsw_mask = E1000_SW_SYNCH_MB; u32 ctrl, icr; bool global_device_reset = hw->dev_spec._82575.global_device_reset; hw->dev_spec._82575.global_device_reset = false; /* Get current control state. */ ctrl = rd32(E1000_CTRL); /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = igb_disable_pcie_master(hw); if (ret_val) hw_dbg("PCI-E Master disable polling has failed.\n"); hw_dbg("Masking off all interrupts\n"); wr32(E1000_IMC, 0xffffffff); wr32(E1000_RCTL, 0); wr32(E1000_TCTL, E1000_TCTL_PSP); wrfl(); msleep(10); /* Determine whether or not a global dev reset is requested */ if (global_device_reset && igb_acquire_swfw_sync_82575(hw, swmbsw_mask)) global_device_reset = false; if (global_device_reset && !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) ctrl |= E1000_CTRL_DEV_RST; else ctrl |= E1000_CTRL_RST; wr32(E1000_CTRL, ctrl); /* Add delay to insure DEV_RST has time to complete */ if (global_device_reset) msleep(5); ret_val = igb_get_auto_rd_done(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ hw_dbg("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) igb_reset_init_script_82575(hw); /* clear global device reset status bit */ wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); /* Clear any pending interrupt events. */ wr32(E1000_IMC, 0xffffffff); icr = rd32(E1000_ICR); /* Install any alternate MAC address into RAR0 */ ret_val = igb_check_alt_mac_addr(hw); /* Release semaphore */ if (global_device_reset) igb_release_swfw_sync_82575(hw, swmbsw_mask); return ret_val; }