void falcon_reconfigure_xmac_core(struct efx_nic *efx) { unsigned int max_frame_len; efx_oword_t reg; bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); /* Configure MAC - cut-thru mode is hard wired on */ EFX_POPULATE_OWORD_3(reg, FRF_AB_XM_RX_JUMBO_MODE, 1, FRF_AB_XM_TX_STAT_EN, 1, FRF_AB_XM_RX_STAT_EN, 1); efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); /* Configure TX */ EFX_POPULATE_OWORD_6(reg, FRF_AB_XM_TXEN, 1, FRF_AB_XM_TX_PRMBL, 1, FRF_AB_XM_AUTO_PAD, 1, FRF_AB_XM_TXCRC, 1, FRF_AB_XM_FCNTL, tx_fc, FRF_AB_XM_IPG, 0x3); efx_writeo(efx, ®, FR_AB_XM_TX_CFG); /* Configure RX */ EFX_POPULATE_OWORD_5(reg, FRF_AB_XM_RXEN, 1, FRF_AB_XM_AUTO_DEPAD, 0, FRF_AB_XM_ACPT_ALL_MCAST, 1, FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, FRF_AB_XM_PASS_CRC_ERR, 1); efx_writeo(efx, ®, FR_AB_XM_RX_CFG); /* Set frame length */ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); EFX_POPULATE_OWORD_2(reg, FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, FRF_AB_XM_TX_JUMBO_MODE, 1); efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); EFX_POPULATE_OWORD_2(reg, FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ FRF_AB_XM_DIS_FCNTL, !rx_fc); efx_writeo(efx, ®, FR_AB_XM_FC); /* Set MAC address */ memcpy(®, &efx->net_dev->dev_addr[0], 4); efx_writeo(efx, ®, FR_AB_XM_ADR_LO); memcpy(®, &efx->net_dev->dev_addr[4], 2); efx_writeo(efx, ®, FR_AB_XM_ADR_HI); }
static int falcon_mdio_write(struct net_device *net_dev, int prtad, int devad, u16 addr, u16 value) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; netif_vdbg(efx, hw, efx->net_dev, "writing MDIO %d register %d.%d with 0x%04x\n", prtad, devad, addr, value); mutex_lock(&nic_data->mdio_lock); rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, ®, FR_AB_MD_ID); EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); efx_writeo(efx, ®, FR_AB_MD_TXD); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, ®, FR_AB_MD_CS); /* Wait for data to be written */ rc = falcon_gmii_wait(efx); if (rc) { EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); udelay(10); } out: mutex_unlock(&nic_data->mdio_lock); return rc; }
static int falcon_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; mutex_lock(&nic_data->mdio_lock); rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, ®, FR_AB_MD_ID); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, ®, FR_AB_MD_CS); rc = falcon_gmii_wait(efx); if (rc == 0) { efx_reado(efx, ®, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); netif_vdbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got %04x\n", prtad, devad, addr, rc); } else { EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RIC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); netif_dbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got error %d\n", prtad, devad, addr, rc); } out: mutex_unlock(&nic_data->mdio_lock); return rc; }
/* Write an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_write(struct net_device *net_dev, int prtad, int devad, u16 addr, u16 value) { struct efx_nic *efx = netdev_priv(net_dev); efx_oword_t reg; int rc; EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", prtad, devad, addr, value); mutex_lock(&efx->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; /* Write the address/ID register */ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, ®, FR_AB_MD_ID); /* Write data */ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); efx_writeo(efx, ®, FR_AB_MD_TXD); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, ®, FR_AB_MD_CS); /* Wait for data to be written */ rc = falcon_gmii_wait(efx); if (rc) { /* Abort the write operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); udelay(10); } out: mutex_unlock(&efx->mdio_lock); return rc; }
static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) { efx_oword_t reg; if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) return; /* We expect xgmii faults if the wireside link is up */ if (!EFX_WORKAROUND_5147(efx) || !efx->link_up) return; /* We can only use this interrupt to signal the negative edge of * xaui_align [we have to poll the positive edge]. */ if (!efx->mac_up) return; /* Flush the ISR */ if (enable) falcon_read(efx, ®, XM_MGT_INT_REG_B0); EFX_POPULATE_OWORD_2(reg, XM_MSK_RMTFLT, !enable, XM_MSK_LCLFLT, !enable); falcon_write(efx, ®, XM_MGT_INT_MSK_REG_B0); }
static int siena_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key) { efx_oword_t temp; /* Set hash key for IPv4 */ if (key) memcpy(efx->rss_context.rx_hash_key, key, sizeof(temp)); memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); /* Enable IPv6 RSS */ BUILD_BUG_ON(sizeof(efx->rss_context.rx_hash_key) < 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); memcpy(&temp, efx->rss_context.rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); memcpy(&temp, efx->rss_context.rx_hash_key + sizeof(temp), sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); memcpy(&temp, efx->rss_context.rx_hash_key + 2 * sizeof(temp), FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); memcpy(efx->rss_context.rx_indir_table, rx_indir_table, sizeof(efx->rss_context.rx_indir_table)); efx_farch_rx_push_indir_table(efx); return 0; }
/* Read an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); efx_oword_t reg; int rc; mutex_lock(&efx->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, ®, FR_AB_MD_ID); /* Request data to be read */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, ®, FR_AB_MD_CS); /* Wait for data to become available */ rc = falcon_gmii_wait(efx); if (rc == 0) { efx_reado(efx, ®, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", prtad, devad, addr, rc); } else { /* Abort the read operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RIC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", prtad, devad, addr, rc); } out: mutex_unlock(&efx->mdio_lock); return rc; }
static void efx_sriov_usrev(struct efx_nic *efx, bool enabled) { efx_oword_t reg; EFX_POPULATE_OWORD_2(reg, FRF_CZ_USREV_DIS, enabled ? 0 : 1, FRF_CZ_DFLT_EVQ, efx->vfdi_channel->channel); efx_writeo(efx, ®, FR_CZ_USR_EV_CFG); }
static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) { efx_oword_t reg; if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) return; /* Flush the ISR */ if (enable) falcon_read(efx, ®, XM_MGT_INT_REG_B0); EFX_POPULATE_OWORD_2(reg, XM_MSK_RMTFLT, !enable, XM_MSK_LCLFLT, !enable); falcon_write(efx, ®, XM_MGT_INT_MSK_REG_B0); }
static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); schedule_timeout_uninterruptible(HZ / 50); efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; }
/* Zeroes out the SRAM contents. This routine must be called in * process context and is allowed to sleep. */ static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; /* Set the SRAM wake/sleep GPIO appropriately. */ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); /* Initiate SRAM reset */ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); /* Wait for SRAM reset to complete */ count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); /* SRAM reset is slow; expect around 16ms */ schedule_timeout_uninterruptible(HZ / 50); /* Check for reset complete */ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); /* wait up to 0.4 sec */ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; }
static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) { efx_oword_t reg; if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) return; if (!EFX_WORKAROUND_5147(efx) || !efx->link_up) return; if (!efx->mac_up) return; if (enable) falcon_read(efx, ®, XM_MGT_INT_REG_B0); EFX_POPULATE_OWORD_2(reg, XM_MSK_RMTFLT, !enable, XM_MSK_LCLFLT, !enable); falcon_write(efx, ®, XM_MGT_INT_MSK_REG_B0); }
static void falcon_stats_request(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; WARN_ON(nic_data->stats_pending); WARN_ON(nic_data->stats_disable_count); if (nic_data->stats_dma_done == NULL) return; *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; nic_data->stats_pending = true; wmb(); EFX_POPULATE_OWORD_2(reg, FRF_AB_MAC_STAT_DMA_CMD, 1, FRF_AB_MAC_STAT_DMA_ADR, efx->stats_buffer.dma_addr); efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA); mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); }
/* Resets NIC to known state. This routine must be called in process * context and is allowed to sleep. */ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t glb_ctl_reg_ker; int rc; EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method)); /* Initiate device reset */ if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { EFX_ERR(efx, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { EFX_ERR(efx, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2; } } EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } else { EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, /* exclude PHY from "invisible" reset */ FRF_AB_EXT_PHY_RST_CTL, method == RESET_TYPE_INVISIBLE, /* exclude EEPROM/flash and PCIe */ FRF_AB_PCIE_CORE_RST_CTL, 1, FRF_AB_PCIE_NSTKY_RST_CTL, 1, FRF_AB_PCIE_SD_RST_CTL, 1, FRF_AB_EE_RST_CTL, 1, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); EFX_LOG(efx, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); /* Restore PCI configuration if needed */ if (method == RESET_TYPE_WORLD) { if (efx_nic_is_dual_func(efx)) { rc = pci_restore_state(nic_data->pci_dev2); if (rc) { EFX_ERR(efx, "failed to restore PCI config for " "the secondary function\n"); goto fail3; } } rc = pci_restore_state(efx->pci_dev); if (rc) { EFX_ERR(efx, "failed to restore PCI config for the " "primary function\n"); goto fail4; } EFX_LOG(efx, "successfully restored PCI config\n"); } /* Assert that reset complete */ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; EFX_ERR(efx, "timed out waiting for hardware reset\n"); goto fail5; } EFX_LOG(efx, "hardware reset complete\n"); return 0; /* pci_save_state() and pci_restore_state() MUST be called in pairs */ fail2: fail3: pci_restore_state(efx->pci_dev); fail1: fail4: fail5: return rc; }
static void falcon_reconfigure_gmac(struct efx_nic *efx) { bool loopback, tx_fc, rx_fc, bytemode; int if_mode; unsigned int max_frame_len; efx_oword_t reg; /* Configuration register 1 */ tx_fc = (efx->link_fc & EFX_FC_TX) || !efx->link_fd; rx_fc = !!(efx->link_fc & EFX_FC_RX); loopback = (efx->loopback_mode == LOOPBACK_GMAC); bytemode = (efx->link_speed == 1000); EFX_POPULATE_OWORD_5(reg, GM_LOOP, loopback, GM_TX_EN, 1, GM_TX_FC_EN, tx_fc, GM_RX_EN, 1, GM_RX_FC_EN, rx_fc); falcon_write(efx, ®, GM_CFG1_REG); udelay(10); /* Configuration register 2 */ if_mode = (bytemode) ? 2 : 1; EFX_POPULATE_OWORD_5(reg, GM_IF_MODE, if_mode, GM_PAD_CRC_EN, 1, GM_LEN_CHK, 1, GM_FD, efx->link_fd, GM_PAMBL_LEN, 0x7/*datasheet recommended */); falcon_write(efx, ®, GM_CFG2_REG); udelay(10); /* Max frame len register */ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len); falcon_write(efx, ®, GM_MAX_FLEN_REG); udelay(10); /* FIFO configuration register 0 */ EFX_POPULATE_OWORD_5(reg, GMF_FTFENREQ, 1, GMF_STFENREQ, 1, GMF_FRFENREQ, 1, GMF_SRFENREQ, 1, GMF_WTMENREQ, 1); falcon_write(efx, ®, GMF_CFG0_REG); udelay(10); /* FIFO configuration register 1 */ EFX_POPULATE_OWORD_2(reg, GMF_CFGFRTH, 0x12, GMF_CFGXOFFRTX, 0xffff); falcon_write(efx, ®, GMF_CFG1_REG); udelay(10); /* FIFO configuration register 2 */ EFX_POPULATE_OWORD_2(reg, GMF_CFGHWM, 0x3f, GMF_CFGLWM, 0xa); falcon_write(efx, ®, GMF_CFG2_REG); udelay(10); /* FIFO configuration register 3 */ EFX_POPULATE_OWORD_2(reg, GMF_CFGHWMFT, 0x1c, GMF_CFGFTTH, 0x08); falcon_write(efx, ®, GMF_CFG3_REG); udelay(10); /* FIFO configuration register 4 */ EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1); falcon_write(efx, ®, GMF_CFG4_REG); udelay(10); /* FIFO configuration register 5 */ falcon_read(efx, ®, GMF_CFG5_REG); EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode); EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd); EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd); EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0); falcon_write(efx, ®, GMF_CFG5_REG); udelay(10); /* MAC address */ EFX_POPULATE_OWORD_4(reg, GM_HWADDR_5, efx->net_dev->dev_addr[5], GM_HWADDR_4, efx->net_dev->dev_addr[4], GM_HWADDR_3, efx->net_dev->dev_addr[3], GM_HWADDR_2, efx->net_dev->dev_addr[2]); falcon_write(efx, ®, GM_ADR1_REG); udelay(10); EFX_POPULATE_OWORD_2(reg, GM_HWADDR_1, efx->net_dev->dev_addr[1], GM_HWADDR_0, efx->net_dev->dev_addr[0]); falcon_write(efx, ®, GM_ADR2_REG); udelay(10); falcon_reconfigure_mac_wrapper(efx); }
/* This call performs hardware-specific global initialisation, such as * defining the descriptor cache sizes and number of RSS channels. * It does not set up any buffers, descriptor rings or event queues. */ static int siena_init_nic(struct efx_nic *efx) { efx_oword_t temp; int rc; /* Recover from a failed assertion post-reset */ rc = efx_mcdi_handle_assertion(efx); if (rc) return rc; /* Squash TX of packets of 16 bytes or less */ efx_reado(efx, &temp, FR_AZ_TX_RESERVED); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ efx_reado(efx, &temp, FR_AZ_TX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); efx_writeo(efx, &temp, FR_AZ_TX_CFG); efx_reado(efx, &temp, FR_AZ_RX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); /* Enable hash insertion. This is broken for the 'Falcon' hash * if IPv6 hashing is also enabled, so also select Toeplitz * TCP/IPv4 and IPv4 hashes. */ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); efx_writeo(efx, &temp, FR_AZ_RX_CFG); /* Set hash key for IPv4 */ memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); /* Enable IPv6 RSS */ BUILD_BUG_ON(sizeof(efx->rx_hash_key) < 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); if (rc) return rc; /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); efx_writeo(efx, &temp, FR_BZ_DP_CTRL); EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); efx_nic_init_common(efx); return 0; }
static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t glb_ctl_reg_ker; int rc; netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", RESET_TYPE(method)); if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2; } } EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } else { EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_CTL, method == RESET_TYPE_INVISIBLE, FRF_AB_PCIE_CORE_RST_CTL, 1, FRF_AB_PCIE_NSTKY_RST_CTL, 1, FRF_AB_PCIE_SD_RST_CTL, 1, FRF_AB_EE_RST_CTL, 1, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); if (method == RESET_TYPE_WORLD) { if (efx_nic_is_dual_func(efx)) pci_restore_state(nic_data->pci_dev2); pci_restore_state(efx->pci_dev); netif_dbg(efx, drv, efx->net_dev, "successfully restored PCI config\n"); } efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; netif_err(efx, hw, efx->net_dev, "timed out waiting for hardware reset\n"); goto fail3; } netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); return 0; fail2: pci_restore_state(efx->pci_dev); fail1: fail3: return rc; }