/** * ixgbe_ptp_setup_sdp * @hw: the hardware private structure * * this function enables or disables the clock out feature on SDP0 for * the X540 device. It will create a 1second periodic output that can * be used as the PPS (via an interrupt). * * It calculates when the systime will be on an exact second, and then * aligns the start of the PPS signal to that value. The shift is * necessary because it can change based on the link speed. */ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int shift = adapter->hw_cc.shift; u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; u64 ns = 0, clock_edge = 0; if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && (hw->mac.type == ixgbe_mac_X540)) { /* disable the pin first */ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); IXGBE_WRITE_FLUSH(hw); esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); /* * enable the SDP0 pin as output, and connected to the * native function for Timesync (ClockOut) */ esdp |= (IXGBE_ESDP_SDP0_DIR | IXGBE_ESDP_SDP0_NATIVE); /* * enable the Clock Out feature on SDP0, and allow * interrupts to occur when the pin changes */ tsauxc = (IXGBE_TSAUXC_EN_CLK | IXGBE_TSAUXC_SYNCLK | IXGBE_TSAUXC_SDP0_INT); /* clock period (or pulse length) */ clktiml = (u32)(NSECS_PER_SEC << shift); clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); /* * Account for the cyclecounter wrap-around value by * using the converted ns value of the current time to * check for when the next aligned second would occur. */ clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); div_u64_rem(ns, NSECS_PER_SEC, &rem); clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift); /* specify the initial clock start time */ trgttiml = (u32)clock_edge; trgttimh = (u32)(clock_edge >> 32); IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); IXGBE_WRITE_FLUSH(hw); } else {
/** * ixgbe_ipsec_stop_data * @adapter: board private structure **/ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; bool link = adapter->link_up; u32 t_rdy, r_rdy; u32 limit; u32 reg; /* halt data paths */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); reg |= IXGBE_SECTXCTRL_TX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); reg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); IXGBE_WRITE_FLUSH(hw); /* If the tx fifo doesn't have link, but still has data, * we can't clear the tx sec block. Set the MAC loopback * before block clear */ if (!link) { reg = IXGBE_READ_REG(hw, IXGBE_MACC); reg |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg |= IXGBE_HLREG0_LPBK; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); IXGBE_WRITE_FLUSH(hw); mdelay(3); } /* wait for the paths to empty */ limit = 20; do { mdelay(10); t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & IXGBE_SECTXSTAT_SECTX_RDY; r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & IXGBE_SECRXSTAT_SECRX_RDY; } while (!t_rdy && !r_rdy && limit--); /* undo loopback if we played with it earlier */ if (!link) { reg = IXGBE_READ_REG(hw, IXGBE_MACC); reg &= ~IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); reg &= ~IXGBE_HLREG0_LPBK; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); IXGBE_WRITE_FLUSH(hw); } }
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { u32 gcr_ext, hlreg0; /* * If double reset is not requested then all transactions should * already be clear and as such there is no work to do */ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) return; /* * Set loopback enable to prevent any transmits from being sent * should the link come up. This assumes that the RXCTRL.RXEN bit * has already been cleared. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); /* Flush all writes and allow 20usec for all transactions to clear */ IXGBE_WRITE_FLUSH(hw); udelay(20); /* restore previous register values */ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); }
/** * ixgbe_blink_led_start_82598 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink **/ int32_t ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, uint32_t index) { ixgbe_link_speed speed = 0; int link_up = 0; uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); /* * Link must be up to auto-blink the LEDs on the 82598EB MAC; * force it if link is down. */ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); if (!link_up) { autoc_reg |= IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); msec_delay(10); } led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; }
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); } IXGBE_WRITE_FLUSH(hw); /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } return 0; }
/** * ixgbe_ipsec_start_engine * @adapter: board private structure * * NOTE: this increases power consumption whether being used or not **/ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reg; ixgbe_ipsec_stop_data(adapter); /* Set minimum IFG between packets to 3 */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); reg = (reg & 0xfffffff0) | 0x3; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); /* Set "tx security buffer almost full threshold" to 0x15 so that the * almost full indication is generated only after buffer contains at * least an entire jumbo packet. */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); reg = (reg & 0xfffffc00) | 0x15; IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg); /* restart the data paths by clearing the DISABLE bits */ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); /* enable Rx and Tx SA lookup */ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN); IXGBE_WRITE_FLUSH(hw); }
/** * ixgbe_ipsec_stop_engine * @adapter: board private structure **/ static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reg; ixgbe_ipsec_stop_data(adapter); /* disable Rx and Tx SA lookup */ IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); /* disable the Rx and Tx engines and full packet store-n-forward */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); reg |= IXGBE_SECTXCTRL_SECTX_DIS; reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD; IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); reg |= IXGBE_SECRXCTRL_SECRX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); /* restore the "tx security buffer almost full threshold" to 0x250 */ IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250); /* Set minimum IFG between packets back to the default 0x1 */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); reg = (reg & 0xfffffff0) | 0x1; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); /* final set for normal (no ipsec offload) processing */ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS); IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS); IXGBE_WRITE_FLUSH(hw); }
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { s32 ret_val; u32 ctrl_ext; /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); /* PHY ops initialization must be done in reset_hw() */ /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); /* Clear statistics registers */ hw->mac.ops.clear_hw_cntrs(hw); /* Set No Snoop Disable */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); /* Setup flow control */ ret_val = ixgbe_setup_fc(hw); if (ret_val != 0) goto out; /* Clear adapter stopped flag */ hw->adapter_stopped = false; out: return ret_val; }
/** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers * @hw: hw specific details * @idx: register index to write * @key: key byte array * @salt: salt bytes **/ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, u32 key[], u32 salt) { u32 reg; int i; for (i = 0; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i])); IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt)); IXGBE_WRITE_FLUSH(hw); reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); reg &= IXGBE_RXTXIDX_IPS_EN; reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg); IXGBE_WRITE_FLUSH(hw); }
/** * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info * @hw: hw specific details * @idx: register index to write * @addr: IP address byte array **/ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) { int i; /* store the ip address */ for (i = 0; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i])); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); }
/** * ixgbe_led_off_generic - Turns off the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn off **/ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); /* To turn off the LED, set mode to OFF. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return 0; }
/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (hw->bus.lan_id) { esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); esdp |= IXGBE_ESDP_SDP1_DIR; } esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); }
/** * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info * @hw: hw specific details * @idx: register index to write * @spi: security parameter index * @key: key byte array * @salt: salt bytes * @mode: rx decrypt control bits * @ip_idx: index into IP table for related IP address **/ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, u32 key[], u32 salt, u32 mode, u32 ip_idx) { int i; /* store the SPI (in bigendian) and IPidx */ IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl); /* store the key, salt, and mode */ for (i = 0; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i])); IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl); }
/** * ixgbe_ipsec_set_rx_item - set an Rx table item * @hw: hw specific details * @idx: register index to write * @tbl: table selector * * Trigger the device to store into a particular Rx table the * data that has already been loaded into the input register **/ static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, enum ixgbe_ipsec_tbl_sel tbl) { u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX); reg &= IXGBE_RXTXIDX_IPS_EN; reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg); IXGBE_WRITE_FLUSH(hw); }
/** * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. * @hw: pointer to hardware structure * @index: led number to stop blinking **/ int32_t ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, uint32_t index) { uint32_t autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); uint32_t led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); autoc_reg &= ~IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; }
/** * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) { u32 number_of_queues; u32 reg_val; u16 i; /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; /* Disable the receive unit */ reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); reg_val &= ~(IXGBE_RXCTRL_RXEN); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); IXGBE_WRITE_FLUSH(hw); msleep(2); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* Clear any pending interrupts */ IXGBE_READ_REG(hw, IXGBE_EICR); /* Disable the transmit unit. Each queue must be disabled. */ number_of_queues = hw->mac.max_tx_queues; for (i = 0; i < number_of_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); if (reg_val & IXGBE_TXDCTL_ENABLE) { reg_val &= ~IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); } } /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests */ if (ixgbe_disable_pcie_master(hw) != 0) hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); return 0; }
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw){ u32 reg_val; u16 i; /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; /* Disable the receive unit */ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* Clear any pending interrupts, flush previous writes */ IXGBE_READ_REG(hw, IXGBE_EICR); /* Disable the transmit unit. Each queue must be disabled. */ for (i = 0; i < hw->mac.max_tx_queues; i++) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); /* Disable the receive unit by stopping each queue */ for (i = 0; i < hw->mac.max_rx_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); reg_val &= ~IXGBE_RXDCTL_ENABLE; reg_val |= IXGBE_RXDCTL_SWFLSH; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); } /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); msleep(2); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests */ return ixgbe_disable_pcie_master(hw); }
/** * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears * all on chip counters, initializes receive address registers, multicast * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { u32 ctrl_ext; /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); /* Identify the PHY */ hw->phy.ops.identify(hw); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table */ hw->mac.ops.init_rx_addrs(hw); /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); /* Set up link */ hw->mac.ops.setup_link(hw); /* Clear statistics registers */ hw->mac.ops.clear_hw_cntrs(hw); /* Set No Snoop Disable */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); /* Clear adapter stopped flag */ hw->adapter_stopped = false; return 0; }
/** * ixgbe_reset_hw_X540 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != 0) goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); mac_reset_top: ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(100); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* Set the Rx packet buffer size. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); reset_hw_out: return status; }
/** * ixgbe_setup_mac_link_multispeed_fixed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_multispeed_fixed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; u32 speedcnt = 0; u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); u32 i = 0; bool link_up = false; bool negotiation; DEBUGFUNC(""); /* Mask off requested but non-supported speeds */ status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); if (status != IXGBE_SUCCESS) return status; speed &= link_speed; /* * Try each speed one by one, highest priority first. We do this in * software because 10gb fiber doesn't support speed autonegotiation. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { speedcnt++; highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; /* If we already have link at this speed, just jump out */ status = ixgbe_check_link(hw, &link_speed, &link_up, false); if (status != IXGBE_SUCCESS) return status; if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) goto out; /* Set the module link speed */ ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); /* Set the module link speed */ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); /* Allow module to change analog characteristics (1G->10G) */ msec_delay(40); status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, autoneg_wait_to_complete); if (status != IXGBE_SUCCESS) return status; /* Flap the tx laser if it has not already been done */ ixgbe_flap_tx_laser(hw); /* * Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted. 82599 uses the same timing for 10g SFI. */ for (i = 0; i < 5; i++) { /* Wait for the link partner to also set speed */ msec_delay(100); /* If we have link, just jump out */ status = ixgbe_check_link(hw, &link_speed, &link_up, false); if (status != IXGBE_SUCCESS) return status; if (link_up) goto out; } } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { speedcnt++; if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; /* If we already have link at this speed, just jump out */ status = ixgbe_check_link(hw, &link_speed, &link_up, false); if (status != IXGBE_SUCCESS) return status; if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) goto out; /* Set the module link speed */ ixgbe_set_fiber_fixed_speed(hw, IXGBE_LINK_SPEED_1GB_FULL); /* Allow module to change analog characteristics (10G->1G) */ msec_delay(40); status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, autoneg_wait_to_complete); if (status != IXGBE_SUCCESS) return status; /* Flap the tx laser if it has not already been done */ ixgbe_flap_tx_laser(hw); /* Wait for the link partner to also set speed */ msec_delay(100); /* If we have link, just jump out */ status = ixgbe_check_link(hw, &link_speed, &link_up, false); if (status != IXGBE_SUCCESS) return status; if (link_up) goto out; } /* * We didn't get link. Configure back to the highest speed we tried, * (if there was more than one). We call ourselves back with just the * single highest speed that the user requested. */ if (speedcnt > 1) status = ixgbe_setup_mac_link_multispeed_fixed_fiber(hw, highest_link_speed, autoneg, autoneg_wait_to_complete); out: /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; return status; }
/** * ixgbe_reset_hw_X540 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, and perform a reset. **/ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; u32 swfw_mask = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_reset_hw_X540"); /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != IXGBE_SUCCESS) goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; ERROR_REPORT1(IXGBE_ERROR_POLLING, "Reset polling failed to complete.\n"); } msec_delay(100); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* Set the Rx packet buffer size. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); reset_hw_out: return status; }
/** * ixgbe_reset_hw_82598 - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by reseting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { s32 status = 0; u32 ctrl; u32 gheccr; u32 i; u32 autoc; u8 analog_val; /* Call adapter stop to disable tx/rx and clear interrupts */ ixgbe_stop_adapter(hw); /* * Power up the Atlas TX lanes if they are currently powered down. * Atlas TX lanes are powered down for MAC loopback tests, but * they are not automatically restored on reset. */ ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { /* Enable TX Atlas so packets can be transmitted again */ ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); } /* Reset PHY */ ixgbe_reset_phy(hw); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ if (ixgbe_disable_pcie_master(hw) != 0) { status = IXGBE_ERR_MASTER_REQUESTS_PENDING; hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); } /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is using it */ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(50); gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* * AUTOC register which stores link settings gets cleared * and reloaded from EEPROM after reset. We need to restore * our stored value from init in case SW changed the attach * type or speed. If this is the first time and link settings * have not been stored, store default settings from AUTOC. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.link_settings_loaded) { autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE); autoc &= ~(IXGBE_AUTOC_LMS_MASK); autoc |= hw->mac.link_attach_type; autoc |= hw->mac.link_mode_select; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } else { hw->mac.link_attach_type = (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); hw->mac.link_settings_loaded = true; } /* Store the permanent mac address */ ixgbe_get_mac_addr(hw, hw->mac.perm_addr); return status; }
/** * ixgbe_setup_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ int32_t ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) { ixgbe_link_speed speed; int link_up; uint32_t autoc_reg; uint32_t links_reg; uint32_t i; int32_t status = IXGBE_SUCCESS; autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.link_settings_loaded) { autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; autoc_reg |= hw->mac.link_attach_type; autoc_reg |= hw->mac.link_mode_select; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); IXGBE_WRITE_FLUSH(hw); msec_delay(50); } /* Restart link */ autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); /* Only poll for autoneg to complete if specified to do so */ if (hw->phy.autoneg_wait_to_complete) { if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN || hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; DEBUGOUT("Autonegotiation did not complete.\n"); } } } /* * We want to save off the original Flow Control configuration just in * case we get disconnected and then reconnected into a different hub * or switch with different Flow Control capabilities. */ hw->fc.original_type = hw->fc.type; /* * Set up the SerDes link if in 1Gb mode, otherwise just set up * 10Gb flow control. */ hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); if (speed == IXGBE_LINK_SPEED_1GB_FULL) status = ixgbe_setup_fiber_serdes_link_82598(hw); else ixgbe_setup_fc_82598(hw, 0); /* Add delay to filter out noises during initial link setup */ msec_delay(50); return status; }
/** * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode * @adapter: the private ixgbe adapter structure * @config: the hwtstamp configuration requested * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't cause any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". * * Since hardware always timestamps Path delay packets when timestamping V2 * packets, regardless of the type specified in the register, only use V2 * Event mode. This more accurately tells the user what the hardware is going * to do anyways. * * Note: this may modify the hwtstamp configuration towards a more general * mode, if required to support the specifically requested mode. */ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, struct hwtstamp_config *config) { struct ixgbe_hw *hw = &adapter->hw; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = PTP_EV_PORT << 16; bool is_l2 = false; u32 regval; /* reserved for future extensions */ if (config->flags) return -EINVAL; switch (config->tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; tsync_rx_mtrl = 0; adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: default: /* register RXMTRL must be set in order to do V1 packets, * therefore it is not possible to time stamp both V1 Sync and * Delay_Req messages unless hardware supports timestamping all * packets => return error */ adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } if (hw->mac.type == ixgbe_mac_82598EB) { adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); if (tsync_rx_ctl | tsync_tx_ctl) return -ERANGE; return 0; } /* define ethertype filter for timestamping L2 packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), (IXGBE_ETQF_FILTER_EN | /* enable filter */ IXGBE_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); /* enable/disable TX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); regval &= ~IXGBE_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); /* enable/disable RX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); IXGBE_WRITE_FLUSH(hw); /* clear TX/RX time stamp registers, just to be sure */ regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); return 0; }
/** * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping * @adapter: pointer to adapter struct * @ifreq: ioctl data * @cmd: particular ioctl requested * * Outgoing time stamping can be enabled and disabled. Play nice and * disable it when requested, although it shouldn't case any overhead * when no packet needs it. At most one packet in the queue may be * marked for time stamping, otherwise it would be impossible to tell * for sure to which packet the hardware time stamp belongs. * * Incoming time stamping has to be configured via the hardware * filters. Not all combinations are supported, in particular event * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". */ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd) { struct ixgbe_hw *hw = &adapter->hw; struct hwtstamp_config config; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; u32 tsync_rx_mtrl = 0; bool is_l4 = false; bool is_l2 = false; u32 regval; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; /* reserved for future extensions */ if (config.flags) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: tsync_tx_ctl = 0; case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG; is_l2 = true; is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG; is_l2 = true; is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_SOME; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_EVENT: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: default: /* * register RXMTRL must be set, therefore it is not * possible to time stamp both V1 Sync and Delay_Req messages * and hardware does not support timestamping all packets * => return error */ return -ERANGE; } if (hw->mac.type == ixgbe_mac_82598EB) { if (tsync_rx_ctl | tsync_tx_ctl) return -ERANGE; return 0; } /* define ethertype filter for timestamped packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), (IXGBE_ETQF_FILTER_EN | /* enable filter */ IXGBE_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0); #define PTP_PORT 319 /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */ | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */ | IXGBE_FTQF_QUEUE_ENABLE); ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */ & IXGBE_FTQF_DEST_PORT_MASK /* dest check */ & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */ << IXGBE_FTQF_5TUPLE_MASK_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3), (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 | IXGBE_IMIR_SIZE_BP_82599)); /* enable port check */ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3), (htons(PTP_PORT) | htons(PTP_PORT) << 16)); IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf); tsync_rx_mtrl |= PTP_PORT << 16; } else { IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0); } /* enable/disable TX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); regval &= ~IXGBE_TSYNCTXCTL_ENABLED; regval |= tsync_tx_ctl; IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, regval); /* enable/disable RX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); regval &= ~(IXGBE_TSYNCRXCTL_ENABLED | IXGBE_TSYNCRXCTL_TYPE_MASK); regval |= tsync_rx_ctl; IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, regval); /* define which PTP packets are time stamped */ IXGBE_WRITE_REG(hw, IXGBE_RXMTRL, tsync_rx_mtrl); IXGBE_WRITE_FLUSH(hw); /* clear TX/RX time stamp registers, just to be sure */ regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; }
/** * ixgbe_reset_hw_X540 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status = 0; u32 ctrl; u32 ctrl_ext; u32 reset_bit; u32 i; u32 autoc; u32 autoc2; bool link_up = false; /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ ixgbe_disable_pcie_master(hw); mac_reset_top: /* * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. */ if (hw->force_full_reset) { reset_bit = IXGBE_CTRL_LNK_RST; } else { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (!link_up) reset_bit = IXGBE_CTRL_LNK_RST; else reset_bit = IXGBE_CTRL_RST; } ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit)); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & reset_bit)) break; } if (ctrl & reset_bit) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. We use 1usec since that is * what is needed for ixgbe_disable_pcie_master(). The second reset * then clears out any effects of those events. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; udelay(1); goto mac_reset_top; } /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); msleep(50); /* Set the Rx packet buffer size. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store the original AUTOC/AUTOC2 values if they have not been * stored off yet. Otherwise restore the stored original * values since the reset operation sets back to defaults. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); if (hw->mac.orig_link_settings_stored == false) { hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = true; } else { if (autoc != hw->mac.orig_autoc) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | IXGBE_AUTOC_AN_RESTART)); if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; autoc2 |= (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK); IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); } } /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); return status; }
/** * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw * @adapter - pointer to the adapter structure * * this function initializes the timecounter and cyclecounter * structures for use in generated a ns counter from the arbitrary * fixed point cycles registers in the hardware. * * A change in link speed impacts the frequency of the DMA clock on * the device, which is used to generate the cycle counter * registers. Therefor this function is called whenever the link speed * changes. * * This function also turns on the SDP pin for clock out feature (X540 * only), because this is where the shift is first calculated. */ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 incval = 0; u32 timinca = 0; u32 shift = 0; u32 cycle_speed; unsigned long flags; /** * Determine what speed we need to set the cyclecounter * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat * unknown speeds as 10Gb. (Hence why we can't just copy the * link_speed. */ switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: case IXGBE_LINK_SPEED_1GB_FULL: case IXGBE_LINK_SPEED_10GB_FULL: cycle_speed = adapter->link_speed; break; default: /* cycle speed should be 10Gb when there is no link */ cycle_speed = IXGBE_LINK_SPEED_10GB_FULL; break; } /* * grab the current TIMINCA value from the register so that it can be * double checked. If the register value has been cleared, it must be * reset to the correct value for generating a cyclecounter. If * TIMINCA is zero, the SYSTIME registers do not increment at all. */ timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA); /* Bail if the cycle speed didn't change and TIMINCA is non-zero */ if (adapter->cycle_speed == cycle_speed && timinca) return; /* disable the SDP clock out */ ixgbe_ptp_disable_sdp(hw); /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added * or subtracted. The drawbacks of a large factor include * (a) the clock register overflows more quickly, (b) the cycle * counter structure must be able to convert the systime value * to nanoseconds using only a multiplier and a right-shift, * and (c) the value must fit within the timinca register space * => math based on internal DMA clock rate and available bits */ switch (cycle_speed) { case IXGBE_LINK_SPEED_100_FULL: incval = IXGBE_INCVAL_100; shift = IXGBE_INCVAL_SHIFT_100; break; case IXGBE_LINK_SPEED_1GB_FULL: incval = IXGBE_INCVAL_1GB; shift = IXGBE_INCVAL_SHIFT_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: incval = IXGBE_INCVAL_10GB; shift = IXGBE_INCVAL_SHIFT_10GB; break; } /** * Modify the calculated values to fit within the correct * number of bits specified by the hardware. The 82599 doesn't * have the same space as the X540, so bitshift the calculated * values to fit. */ switch (hw->mac.type) { case ixgbe_mac_X540: IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); break; case ixgbe_mac_82599EB: incval >>= IXGBE_INCVAL_SHIFT_82599; shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (1 << IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ return; } /* reset the system time registers */ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); IXGBE_WRITE_FLUSH(hw); /* now that the shift has been calculated and the systime * registers reset, (re-)enable the Clock out feature*/ ixgbe_ptp_enable_sdp(hw, shift); /* store the new cycle speed */ adapter->cycle_speed = cycle_speed; ACCESS_ONCE(adapter->base_incval) = incval; smp_mb(); /* grab the ptp lock */ spin_lock_irqsave(&adapter->tmreg_lock, flags); memset(&adapter->cc, 0, sizeof(adapter->cc)); adapter->cc.read = ixgbe_ptp_read; adapter->cc.mask = CLOCKSOURCE_MASK(64); adapter->cc.shift = shift; adapter->cc.mult = 1; /* reset the ns time counter */ timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); }
/** * ixgbe_reset_hw_rev_0_82598 - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ int32_t ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw) { int32_t status = IXGBE_SUCCESS; uint32_t ctrl; uint32_t gheccr; uint32_t autoc; uint32_t i; uint32_t resets; /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); /* Reset PHY */ hw->phy.ops.reset(hw); for (resets = 0; resets < 10; resets++) { /* * Prevent the PCI-E bus from from hanging by disabling PCI-E * master access and verify no pending requests before reset */ if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS) { status = IXGBE_ERR_MASTER_REQUESTS_PENDING; DEBUGOUT("PCI-E Master disable polling has failed.\n"); } /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is * using it. */ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); IXGBE_WRITE_FLUSH(hw); /* * Poll for reset bit to self-clear indicating reset is * complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; DEBUGOUT("Reset polling failed to complete.\n"); } } msec_delay(50); gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* * AUTOC register which stores link settings gets cleared * and reloaded from EEPROM after reset. We need to restore * our stored value from init in case SW changed the attach * type or speed. If this is the first time and link settings * have not been stored, store default settings from AUTOC. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); if (hw->mac.link_settings_loaded) { autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE); autoc &= ~(IXGBE_AUTOC_LMS_MASK); autoc |= hw->mac.link_attach_type; autoc |= hw->mac.link_mode_select; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } else { hw->mac.link_attach_type = (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); hw->mac.link_settings_loaded = TRUE; } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); return status; }