/** * t3_mc5_init - initialize MC5 and the TCAM * @mc5: the MC5 handle * @nservers: desired number the TCP servers (listening ports) * @nfilters: desired number of HW filters (classifiers) * @nroutes: desired number of routes * * Initialize MC5 and the TCAM and partition the TCAM for the requested * number of servers, filters, and routes. The number of routes is * typically 0 except for specialized uses of the T3 adapters. */ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, unsigned int nroutes) { int err; unsigned int tcam_size = mc5->tcam_size; unsigned int mode72 = mc5->mode == MC5_MODE_72_BIT; adapter_t *adap = mc5->adapter; if (!tcam_size) return 0; if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) return -EINVAL; if (nfilters) mc5->parity_enabled = 0; /* Reset the TCAM */ t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_TMMODE | F_COMPEN, V_COMPEN(mode72) | V_TMMODE(mode72) | F_TMRST); if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { CH_ERR(adap, "TCAM reset timed out\n"); return -1; } t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, tcam_size - nroutes - nfilters); t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, tcam_size - nroutes - nfilters - nservers); /* All the TCAM addresses we access have only the low 32 bits non 0 */ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); mc5_dbgi_mode_enable(mc5); switch (mc5->part_type) { case IDT75P52100: err = init_idt52100(mc5); break; case IDT75N43102: err = init_idt43102(mc5); break; default: CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); err = -EINVAL; break; } mc5_dbgi_mode_disable(mc5); return err; }
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, unsigned int nroutes) { u32 cfg; int err; unsigned int tcam_size = mc5->tcam_size; struct adapter *adap = mc5->adapter; if (!tcam_size) return 0; if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) return -EINVAL; /* */ cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE; cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST; t3_write_reg(adap, A_MC5_DB_CONFIG, cfg); if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { CH_ERR(adap, "TCAM reset timed out\n"); return -1; } t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, tcam_size - nroutes - nfilters); t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, tcam_size - nroutes - nfilters - nservers); mc5->parity_enabled = 1; /* */ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); mc5_dbgi_mode_enable(mc5); switch (mc5->part_type) { case IDT75P52100: err = init_idt52100(mc5); break; case IDT75N43102: err = init_idt43102(mc5); break; default: CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); err = -EINVAL; break; } mc5_dbgi_mode_disable(mc5); return err; }
static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; unsigned int oft = mac->offset; u32 val; if (!macidx(mac)) t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ msleep(10); /* Check for xgm Rx fifo empty */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 0x80000000, 1, 5, 2)) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", macidx(mac)); return -1; } t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ val = F_MAC_RESET_; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) val |= F_PCS_RESET_ | F_XG2G_RESET_; else val |= F_RGMII_RESET_ | F_XG2G_RESET_; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } t3_write_reg(adap, A_XGM_RX_CFG + oft, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); if (!macidx(mac)) t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); else t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); return 0; }
int t3_mac_reset(struct cmac *mac) { static const struct addr_val_pair mac_reset_avp[] = { {A_XGM_TX_CTRL, 0}, {A_XGM_RX_CTRL, 0}, {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST}, {A_XGM_RX_HASH_LOW, 0}, {A_XGM_RX_HASH_HIGH, 0}, {A_XGM_RX_EXACT_MATCH_LOW_1, 0}, {A_XGM_RX_EXACT_MATCH_LOW_2, 0}, {A_XGM_RX_EXACT_MATCH_LOW_3, 0}, {A_XGM_RX_EXACT_MATCH_LOW_4, 0}, {A_XGM_RX_EXACT_MATCH_LOW_5, 0}, {A_XGM_RX_EXACT_MATCH_LOW_6, 0}, {A_XGM_RX_EXACT_MATCH_LOW_7, 0}, {A_XGM_RX_EXACT_MATCH_LOW_8, 0}, {A_XGM_STAT_CTRL, F_CLRSTATS} }; u32 val; struct adapter *adap = mac->adapter; unsigned int oft = mac->offset; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft); t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, F_RXSTRFRWRD | F_DISERRFRAMES, uses_xaui(adap) ? 0 : F_RXSTRFRWRD); t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); if (uses_xaui(adap)) { if (adap->params.rev == 0) { t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, F_RXENABLE | F_TXENABLE); if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft, F_CMULOCK, 1, 5, 2)) { CH_ERR(adap, "MAC %d XAUI SERDES CMU lock failed\n", macidx(mac)); return -1; } t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, F_SERDESRESET_); } else xaui_serdes_reset(mac); } t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); val = F_MAC_RESET_ | F_XGMAC_STOP_EN; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) val |= F_PCS_RESET_ | F_XG2G_RESET_; else val |= F_RGMII_RESET_ | F_XG2G_RESET_; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } memset(&mac->stats, 0, sizeof(mac->stats)); return 0; }
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) { int hwm, lwm, divisor; int ipg; unsigned int thres, v, reg; struct adapter *adap = mac->adapter; /* * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max * packet size register includes header, but not FCS. */ mtu += 14; if (mtu > 1536) mtu += 4; if (mtu > MAX_FRAME_SIZE - 4) return -EINVAL; t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); if (adap->params.rev >= T3_REV_B2 && (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { t3_mac_disable_exact_filters(mac); v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); reg = adap->params.rev == T3_REV_B2 ? A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; /* drain RX FIFO */ if (t3_wait_op_done(adap, reg + mac->offset, F_RXFIFO_EMPTY, 1, 20, 5)) { t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); t3_mac_enable_exact_filters(mac); return -EIO; } t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), V_RXMAXPKTSIZE(mtu)); t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); t3_mac_enable_exact_filters(mac); } else t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), V_RXMAXPKTSIZE(mtu)); /* * Adjust the PAUSE frame watermarks. We always set the LWM, and the * HWM only if flow-control is enabled. */ hwm = rx_fifo_hwm(mtu); lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); v |= V_RXFIFOPAUSELWM(lwm / 8); if (G_RXFIFOPAUSEHWM(v)) v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) | V_RXFIFOPAUSEHWM(hwm / 8); t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); /* Adjust the TX FIFO threshold based on the MTU */ thres = (adap->params.vpd.cclk * 1000) / 15625; thres = (thres * mtu) / 1000; if (is_10G(adap)) thres /= 10; thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; thres = max(thres, 8U); /* need at least 8 */ ipg = (adap->params.rev == T3_REV_C) ? 0 : 1; t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); if (adap->params.rev > 0) { divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, (hwm - lwm) * 4 / divisor); } t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, MAC_RXFIFO_SIZE * 4 * 8 / 512); return 0; }
static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; unsigned int oft = mac->offset, store; int idx = macidx(mac); u32 val; if (!macidx(mac)) t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); /* Stop NIC traffic to reduce the number of TXTOGGLES */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); /* Ensure TX drains */ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ /* Store A_TP_TX_DROP_CFG_CH0 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx); msleep(10); /* Change DROP_CFG to 0xc0000011 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); /* Check for xgm Rx fifo empty */ /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 0x80000000, 1, 1000, 2)) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", macidx(mac)); return -1; } t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ val = F_MAC_RESET_; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) val |= F_PCS_RESET_ | F_XG2G_RESET_; else val |= F_RGMII_RESET_ | F_XG2G_RESET_; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } t3_write_reg(adap, A_XGM_RX_CFG + oft, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); /* Restore the DROP_CFG */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, store); if (!idx) t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); else t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); /* re-enable nic traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); /* Set: re-enable NIC traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); return 0; }
/* * Issue a command to the TCAM and wait for its completion. The address and * any data required by the command must have been setup by the caller. */ static int mc5_cmd_write(adapter_t *adapter, u32 cmd) { t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd); return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS, F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1); }
/** * t3_mac_set_mtu - set the MAC MTU * @mac: the MAC to configure * @mtu: the MTU * * Sets the MAC MTU and adjusts the FIFO PAUSE watermarks accordingly. */ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) { int hwm, lwm, divisor; int ipg; unsigned int thres, v, reg; adapter_t *adap = mac->adapter; unsigned port_type = adap->params.vpd.port_type[macidx(mac)]; unsigned int orig_mtu=mtu; /* * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max * packet size register includes header, but not FCS. */ mtu += 14; if (mac->multiport) mtu += 8; /* for preamble */ if (mtu > MAX_FRAME_SIZE - 4) return -EINVAL; if (mac->multiport) return t3_vsc7323_set_mtu(adap, mtu - 4, mac->ext_port); /* Modify the TX and RX fifo depth only if the card has a vsc8211 phy */ if (port_type == 2) { int err = t3_vsc8211_fifo_depth(adap,orig_mtu,macidx(mac)); if (err) return err; } if (adap->params.rev >= T3_REV_B2 && (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) { t3_mac_disable_exact_filters(mac); v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset); t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset, F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST); reg = adap->params.rev == T3_REV_B2 ? A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG; /* drain RX FIFO */ if (t3_wait_op_done(adap, reg + mac->offset, F_RXFIFO_EMPTY, 1, 20, 5)) { t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); t3_mac_enable_exact_filters(mac); return -EIO; } t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), V_RXMAXPKTSIZE(mtu)); t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v); t3_mac_enable_exact_filters(mac); } else t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, V_RXMAXPKTSIZE(M_RXMAXPKTSIZE), V_RXMAXPKTSIZE(mtu)); /* * Adjust the PAUSE frame watermarks. We always set the LWM, and the * HWM only if flow-control is enabled. */ hwm = rx_fifo_hwm(mtu); lwm = min(3 * (int) mtu, MAC_RXFIFO_SIZE /4); v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); v |= V_RXFIFOPAUSELWM(lwm / 8); if (G_RXFIFOPAUSEHWM(v)) v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) | V_RXFIFOPAUSEHWM(hwm / 8); t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v); /* Adjust the TX FIFO threshold based on the MTU */ thres = (adap->params.vpd.cclk * 1000) / 15625; thres = (thres * mtu) / 1000; if (is_10G(adap)) thres /= 10; thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; thres = max(thres, 8U); /* need at least 8 */ ipg = (port_type == 9 || adap->params.rev != T3_REV_C) ? 1 : 0; t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), V_TXFIFOTHRESH(thres) | V_TXIPG(ipg)); /* Assuming a minimum drain rate of 2.5Gbps... */ if (adap->params.rev > 0) { divisor = (adap->params.rev == T3_REV_C) ? 64 : 8; t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, (hwm - lwm) * 4 / divisor); } t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, MAC_RXFIFO_SIZE * 4 * 8 / 512); return 0; }
static int t3_mac_reset(struct cmac *mac, int portspeed) { u32 val, store_mps; adapter_t *adap = mac->adapter; unsigned int oft = mac->offset; int idx = macidx(mac); unsigned int store; /* Stop egress traffic to xgm*/ store_mps = t3_read_reg(adap, A_MPS_CFG); if (!idx) t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); /* This will reduce the number of TXTOGGLES */ /* Clear: to stop the NIC traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); /* Ensure TX drains */ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); /* PCS in reset */ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ /* Store A_TP_TX_DROP_CFG_CH0 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); store = t3_read_reg(adap, A_TP_PIO_DATA); msleep(10); /* Change DROP_CFG to 0xc0000011 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); /* Check for xgm Rx fifo empty */ /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 0x80000000, 1, 1000, 2) && portspeed < 0) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", idx); return -1; } if (portspeed >= 0) { u32 intr = t3_read_reg(adap, A_XGM_INT_ENABLE + oft); /* * safespeedchange: wipes out pretty much all XGMAC registers. */ t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, V_PORTSPEED(M_PORTSPEED) | F_SAFESPEEDCHANGE, portspeed | F_SAFESPEEDCHANGE); (void) t3_read_reg(adap, A_XGM_PORT_CFG + oft); t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, F_SAFESPEEDCHANGE, 0); (void) t3_read_reg(adap, A_XGM_PORT_CFG + oft); t3_mac_init(mac); t3_write_reg(adap, A_XGM_INT_ENABLE + oft, intr); } else { t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ val = xgm_reset_ctrl(mac); t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } t3_write_reg(adap, A_XGM_RX_CFG + oft, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST ); } /* Restore the DROP_CFG */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, store); /* Resume egress traffic to xgm */ t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE, store_mps); /* Set: re-enable NIC traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, F_ENFORCEPKT); return 0; }