/* Probe all SPI devices on the NIC */ static void falcon_probe_spi_devices(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; int boot_dev; efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); } else { /* Disable VPD and set clock dividers to safe * values for initial programming. */ boot_dev = -1; netif_dbg(efx, probe, efx->net_dev, "Booted from internal ASIC settings;" " setting SPI config\n"); EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, /* 125 MHz / 7 ~= 20 MHz */ FRF_AB_EE_SF_CLOCK_DIV, 7, /* 125 MHz / 63 ~= 2 MHz */ FRF_AB_EE_EE_CLOCK_DIV, 63); efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); } mutex_init(&nic_data->spi_lock); if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) falcon_spi_device_init(efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, default_flash_type); if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) falcon_spi_device_init(efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, large_eeprom_type); }
irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) { struct efx_nic *efx = dev_id; efx_oword_t *int_ker = efx->irq_status.addr; int syserr; int queues; /* Check to see if this is our interrupt. If it isn't, we * exit without having touched the hardware. */ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d not for me\n", irq, raw_smp_processor_id()); return IRQ_NONE; } efx->last_irq_cpu = raw_smp_processor_id(); netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); /* Check to see if we have a serious error condition */ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); /* Determine interrupting queues, clear interrupt status * register and acknowledge the device interrupt. */ BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); EFX_ZERO_OWORD(*int_ker); wmb(); /* Ensure the vector is cleared before interrupt ack */ falcon_irq_ack_a1(efx); if (queues & 1) efx_schedule_channel_irq(efx_get_channel(efx, 0)); if (queues & 2) efx_schedule_channel_irq(efx_get_channel(efx, 1)); return IRQ_HANDLED; }
/* * To support clients which aren't provided with any PCI context infer * the hardware family by inspecting the hardware. Obviously the caller * must be damn sure they're really talking to a supported device. */ __checkReturn efx_rc_t efx_infer_family( __in efsys_bar_t *esbp, __out efx_family_t *efp) { efx_family_t family; efx_oword_t oword; unsigned int portnum; efx_rc_t rc; EFSYS_BAR_READO(esbp, FR_AZ_CS_DEBUG_REG_OFST, &oword, B_TRUE); portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM); if ((portnum == 1) || (portnum == 2)) { #if EFSYS_OPT_SIENA family = EFX_FAMILY_SIENA; goto out; #endif } else if (portnum == 0) { efx_dword_t dword; uint32_t hw_rev; EFSYS_BAR_READD(esbp, ER_DZ_BIU_HW_REV_ID_REG_OFST, &dword, B_TRUE); hw_rev = EFX_DWORD_FIELD(dword, ERF_DZ_HW_REV_ID); if (hw_rev == ER_DZ_BIU_HW_REV_ID_REG_RESET) { #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD /* * BIU_HW_REV_ID is the same for Huntington and Medford. * Assume Huntington, as Medford is very similar. */ family = EFX_FAMILY_HUNTINGTON; goto out; #endif } else { #if EFSYS_OPT_FALCON family = EFX_FAMILY_FALCON; goto out; #endif } } rc = ENOTSUP; goto fail1; out: if (efp != NULL) *efp = family; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); }
static __checkReturn efx_rc_t siena_intr_trigger( __in efx_nic_t *enp, __in unsigned int level) { efx_intr_t *eip = &(enp->en_intr); efx_oword_t oword; unsigned int count; uint32_t sel; efx_rc_t rc; /* bug16757: No event queues can be initialized */ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV)); if (level >= EFX_NINTR_SIENA) { rc = EINVAL; goto fail1; } if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL)) return (ENOTSUP); /* avoid EFSYS_PROBE() */ sel = level; /* Trigger a test interrupt */ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1); EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); /* * Wait up to 100ms for the interrupt to be raised before restoring * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL */ count = 0; do { EFSYS_SPIN(100); /* 100us */ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword); } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000); EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level); EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword); return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); }
int falcon_reset_xaui(struct efx_nic *efx) { efx_oword_t reg; int count; /* Start reset sequence */ EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); falcon_write(efx, ®, XX_PWR_RST_REG); /* Wait up to 10 ms for completion, then reinitialise */ for (count = 0; count < 1000; count++) { falcon_read(efx, ®, XX_PWR_RST_REG); if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { falcon_setup_xaui(efx); return 0; } udelay(10); } EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); return -ETIMEDOUT; }
static bool falcon_xgxs_link_ok(struct efx_nic *efx) { efx_oword_t reg; bool align_done, link_ok = false; int sync_status; /* Read link status */ efx_reado(efx, ®, FR_AB_XX_CORE_STAT); align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) link_ok = true; /* Clear link status ready for next read */ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); return link_ok; }
int falcon_reset_xaui(struct efx_nic *efx) { efx_oword_t reg; int count; EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); falcon_write(efx, ®, XX_PWR_RST_REG); for (count = 0; count < 1000; count++) { falcon_read(efx, ®, XX_PWR_RST_REG); if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { falcon_setup_xaui(efx); return 0; } udelay(10); } EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); return -ETIMEDOUT; }
bool falcon_xaui_link_ok(struct efx_nic *efx) { efx_oword_t reg; bool align_done, link_ok = false; int sync_status; if (LOOPBACK_INTERNAL(efx)) return true; /* Read link status */ falcon_read(efx, ®, XX_CORE_STAT_REG); align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE); sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT); if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) link_ok = true; /* Clear link status ready for next read */ EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); falcon_write(efx, ®, XX_CORE_STAT_REG); /* If the link is up, then check the phy side of the xaui link * (error conditions from the wire side propoagate back through * the phy to the xaui side). */ if (efx->link_up && link_ok) { if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS)) link_ok = mdio_clause45_phyxgxs_lane_sync(efx); } /* If the PHY and XAUI links are up, then check the mac's xgmii * fault state */ if (efx->link_up && link_ok) link_ok = falcon_xgmii_status(efx); return link_ok; }
static int falcon_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; mutex_lock(&nic_data->mdio_lock); rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, ®, FR_AB_MD_ID); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, ®, FR_AB_MD_CS); rc = falcon_gmii_wait(efx); if (rc == 0) { efx_reado(efx, ®, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); netif_vdbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got %04x\n", prtad, devad, addr, rc); } else { EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RIC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); netif_dbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got error %d\n", prtad, devad, addr, rc); } out: mutex_unlock(&nic_data->mdio_lock); return rc; }
/* Wait for GMII access to complete */ static int falcon_gmii_wait(struct efx_nic *efx) { efx_oword_t md_stat; int count; /* wait upto 50ms - taken max from datasheet */ for (count = 0; count < 5000; count++) { efx_reado(efx, &md_stat, FR_AB_MD_STAT); if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { EFX_ERR(efx, "error from GMII access " EFX_OWORD_FMT"\n", EFX_OWORD_VAL(md_stat)); return -EIO; } return 0; } udelay(10); } EFX_ERR(efx, "timed out waiting for GMII\n"); return -ETIMEDOUT; }
static int falcon_gmii_wait(struct efx_nic *efx) { efx_oword_t md_stat; int count; for (count = 0; count < 5000; count++) { efx_reado(efx, &md_stat, FR_AB_MD_STAT); if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { netif_err(efx, hw, efx->net_dev, "error from GMII access " EFX_OWORD_FMT"\n", EFX_OWORD_VAL(md_stat)); return -EIO; } return 0; } udelay(10); } netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); return -ETIMEDOUT; }
void falcon_drain_tx_fifo(struct efx_nic *efx) { efx_oword_t reg; if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || (efx->loopback_mode != LOOPBACK_NONE)) return; efx_reado(efx, ®, FR_AB_MAC_CTRL); if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) return; falcon_reset_macs(efx); }
/* Read an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); efx_oword_t reg; int rc; mutex_lock(&efx->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, ®, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, ®, FR_AB_MD_ID); /* Request data to be read */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, ®, FR_AB_MD_CS); /* Wait for data to become available */ rc = falcon_gmii_wait(efx); if (rc == 0) { efx_reado(efx, ®, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", prtad, devad, addr, rc); } else { /* Abort the read operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RIC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, ®, FR_AB_MD_CS); EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", prtad, devad, addr, rc); } out: mutex_unlock(&efx->mdio_lock); return rc; }
/************************************************************************** * * MAC operations * *************************************************************************/ static int falcon_reset_xmac(struct efx_nic *efx) { efx_oword_t reg; int count; EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); falcon_write(efx, ®, XM_GLB_CFG_REG); for (count = 0; count < 10000; count++) { /* wait upto 100ms */ falcon_read(efx, ®, XM_GLB_CFG_REG); if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) return 0; udelay(10); } EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); return -ETIMEDOUT; }
static void falcon_clock_mac(struct efx_nic *efx) { unsigned strap_val; efx_oword_t nic_stat; /* Configure the NIC generated MAC clock correctly */ efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); strap_val = EFX_IS10G(efx) ? 5 : 3; if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1); EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val); efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT); } else { /* Falcon A1 does not support 1G/10G speed switching * and must not be used with a PHY that does. */ BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) != strap_val); } }
int falcon_reset_xaui(struct efx_nic *efx) { efx_oword_t reg; int count; EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); falcon_write(efx, ®, XX_PWR_RST_REG); /* Give some time for the link to establish */ for (count = 0; count < 1000; count++) { /* wait upto 10ms */ falcon_read(efx, ®, XX_PWR_RST_REG); if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0) { falcon_setup_xaui(efx); return 0; } udelay(10); } EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); return -ETIMEDOUT; }
static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); schedule_timeout_uninterruptible(HZ / 50); efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; }
/* Zeroes out the SRAM contents. This routine must be called in * process context and is allowed to sleep. */ static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; /* Set the SRAM wake/sleep GPIO appropriately. */ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); /* Initiate SRAM reset */ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); /* Wait for SRAM reset to complete */ count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); /* SRAM reset is slow; expect around 16ms */ schedule_timeout_uninterruptible(HZ / 50); /* Check for reset complete */ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); /* wait up to 0.4 sec */ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; }
/* * To support clients which aren't provided with any PCI context infer * the hardware family by inspecting the hardware. Obviously the caller * must be damn sure they're really talking to a supported device. */ __checkReturn int efx_infer_family( __in efsys_bar_t *esbp, __out efx_family_t *efp) { efx_family_t family; efx_oword_t oword; unsigned int portnum; int rc; EFSYS_BAR_READO(esbp, FR_AZ_CS_DEBUG_REG_OFST, &oword, B_TRUE); portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM); switch (portnum) { #if EFSYS_OPT_FALCON case 0: family = EFX_FAMILY_FALCON; break; #endif #if EFSYS_OPT_SIENA case 1: case 2: family = EFX_FAMILY_SIENA; break; #endif default: rc = ENOTSUP; goto fail1; } if (efp != NULL) *efp = family; return (0); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); }
static __checkReturn boolean_t efx_intr_check_fatal( __in efx_nic_t *enp) { efx_intr_t *eip = &(enp->en_intr); efsys_mem_t *esmp = eip->ei_esmp; efx_oword_t oword; /* Read the syndrome */ EFSYS_MEM_READO(esmp, 0, &oword); if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) { EFSYS_PROBE(fatal); /* Clear the fatal interrupt condition */ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0); EFSYS_MEM_WRITEO(esmp, 0, &oword); return (B_TRUE); } return (B_FALSE); }
static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; efx_oword_t reg; int rc; /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; nic_data->efx = efx; efx->nic_data = nic_data; if (efx_farch_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Siena FPGA not supported\n"); rc = -ENODEV; goto fail1; } efx->max_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS; efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; rc = efx_mcdi_init(efx); if (rc) goto fail1; /* Now we can reset the NIC */ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } siena_init_wol(efx); /* Allocate memory for INT_KER */ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t), GFP_KERNEL); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); netif_dbg(efx, probe, efx->net_dev, "INT_KER at %llx (virt %p phys %llx)\n", (unsigned long long)efx->irq_status.dma_addr, efx->irq_status.addr, (unsigned long long)virt_to_phys(efx->irq_status.addr)); /* Read in the non-volatile configuration */ rc = siena_probe_nvconfig(efx); if (rc == -EINVAL) { netif_err(efx, probe, efx->net_dev, "NVRAM is invalid therefore using defaults\n"); efx->phy_type = PHY_TYPE_NONE; efx->mdio.prtad = MDIO_PRTAD_NONE; } else if (rc) { goto fail5; } rc = efx_mcdi_mon_probe(efx); if (rc) goto fail5; #ifdef CONFIG_SFC_SRIOV efx_siena_sriov_probe(efx); #endif efx_ptp_defer_probe_with_channel(efx); return 0; fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: efx_mcdi_detach(efx); efx_mcdi_fini(efx); fail1: kfree(efx->nic_data); return rc; }
static int falcon_probe_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data; struct falcon_board *board; int rc; /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; rc = -ENODEV; if (efx_nic_fpga_ver(efx) != 0) { EFX_ERR(efx, "Falcon FPGA not supported\n"); goto fail1; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { efx_oword_t nic_stat; struct pci_dev *dev; u8 pci_rev = efx->pci_dev->revision; if ((pci_rev == 0xff) || (pci_rev == 0)) { EFX_ERR(efx, "Falcon rev A0 not supported\n"); goto fail1; } efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { EFX_ERR(efx, "Falcon rev A1 1G not supported\n"); goto fail1; } if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); goto fail1; } dev = pci_dev_get(efx->pci_dev); while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, dev))) { if (dev->bus == efx->pci_dev->bus && dev->devfn == efx->pci_dev->devfn + 1) { nic_data->pci_dev2 = dev; break; } } if (!nic_data->pci_dev2) { EFX_ERR(efx, "failed to find secondary function\n"); rc = -ENODEV; goto fail2; } } /* Now we can reset the NIC */ rc = falcon_reset_hw(efx, RESET_TYPE_ALL); if (rc) { EFX_ERR(efx, "failed to reset NIC\n"); goto fail3; } /* Allocate memory for INT_KER */ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n", (u64)efx->irq_status.dma_addr, efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); falcon_probe_spi_devices(efx); /* Read in the non-volatile configuration */ rc = falcon_probe_nvconfig(efx); if (rc) goto fail5; /* Initialise I2C adapter */ board = falcon_board(efx); board->i2c_adap.owner = THIS_MODULE; board->i2c_data = falcon_i2c_bit_operations; board->i2c_data.data = efx; board->i2c_adap.algo_data = &board->i2c_data; board->i2c_adap.dev.parent = &efx->pci_dev->dev; strlcpy(board->i2c_adap.name, "SFC4000 GPIO", sizeof(board->i2c_adap.name)); rc = i2c_bit_add_bus(&board->i2c_adap); if (rc) goto fail5; rc = falcon_board(efx)->type->init(efx); if (rc) { EFX_ERR(efx, "failed to initialise board\n"); goto fail6; } nic_data->stats_disable_count = 1; setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, (unsigned long)efx); return 0; fail6: BUG_ON(i2c_del_adapter(&board->i2c_adap)); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); fail5: falcon_remove_spi_devices(efx); efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } fail2: fail1: kfree(efx->nic_data); return rc; }
void efx_intr_fatal( __in efx_nic_t *enp) { #if EFSYS_OPT_DECODE_INTR_FATAL efx_oword_t fatal; efx_oword_t mem_per; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal); EFX_ZERO_OWORD(mem_per); if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 || EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0) EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per); if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR, EFX_OWORD_FIELD(mem_per, EFX_DWORD_0), EFX_OWORD_FIELD(mem_per, EFX_DWORD_1)); if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0); if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0) EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR, EFX_OWORD_FIELD(mem_per, EFX_DWORD_0), EFX_OWORD_FIELD(mem_per, EFX_DWORD_1)); #else EFSYS_ASSERT(0); #endif }
static int falcon_spi_poll(struct efx_nic *efx) { efx_oword_t reg; efx_reado(efx, ®, FR_AB_EE_SPI_HCMD); return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; }
/* Resets NIC to known state. This routine must be called in process * context and is allowed to sleep. */ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t glb_ctl_reg_ker; int rc; EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method)); /* Initiate device reset */ if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { EFX_ERR(efx, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { EFX_ERR(efx, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2; } } EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } else { EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, /* exclude PHY from "invisible" reset */ FRF_AB_EXT_PHY_RST_CTL, method == RESET_TYPE_INVISIBLE, /* exclude EEPROM/flash and PCIe */ FRF_AB_PCIE_CORE_RST_CTL, 1, FRF_AB_PCIE_NSTKY_RST_CTL, 1, FRF_AB_PCIE_SD_RST_CTL, 1, FRF_AB_EE_RST_CTL, 1, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); EFX_LOG(efx, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); /* Restore PCI configuration if needed */ if (method == RESET_TYPE_WORLD) { if (efx_nic_is_dual_func(efx)) { rc = pci_restore_state(nic_data->pci_dev2); if (rc) { EFX_ERR(efx, "failed to restore PCI config for " "the secondary function\n"); goto fail3; } } rc = pci_restore_state(efx->pci_dev); if (rc) { EFX_ERR(efx, "failed to restore PCI config for the " "primary function\n"); goto fail4; } EFX_LOG(efx, "successfully restored PCI config\n"); } /* Assert that reset complete */ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; EFX_ERR(efx, "timed out waiting for hardware reset\n"); goto fail5; } EFX_LOG(efx, "hardware reset complete\n"); return 0; /* pci_save_state() and pci_restore_state() MUST be called in pairs */ fail2: fail3: pci_restore_state(efx->pci_dev); fail1: fail4: fail5: return rc; }
static int falcon_probe_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data; struct falcon_board *board; int rc; nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; rc = -ENODEV; if (efx_nic_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Falcon FPGA not supported\n"); goto fail1; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { efx_oword_t nic_stat; struct pci_dev *dev; u8 pci_rev = efx->pci_dev->revision; if ((pci_rev == 0xff) || (pci_rev == 0)) { netif_err(efx, probe, efx->net_dev, "Falcon rev A0 not supported\n"); goto fail1; } efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { netif_err(efx, probe, efx->net_dev, "Falcon rev A1 1G not supported\n"); goto fail1; } if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { netif_err(efx, probe, efx->net_dev, "Falcon rev A1 PCI-X not supported\n"); goto fail1; } dev = pci_dev_get(efx->pci_dev); while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, dev))) { if (dev->bus == efx->pci_dev->bus && dev->devfn == efx->pci_dev->devfn + 1) { nic_data->pci_dev2 = dev; break; } } if (!nic_data->pci_dev2) { netif_err(efx, probe, efx->net_dev, "failed to find secondary function\n"); rc = -ENODEV; goto fail2; } } rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); netif_dbg(efx, probe, efx->net_dev, "INT_KER at %llx (virt %p phys %llx)\n", (u64)efx->irq_status.dma_addr, efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); falcon_probe_spi_devices(efx); rc = falcon_probe_nvconfig(efx); if (rc) { if (rc == -EINVAL) netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); goto fail5; } efx->timer_quantum_ns = 4968; board = falcon_board(efx); board->i2c_adap.owner = THIS_MODULE; board->i2c_data = falcon_i2c_bit_operations; board->i2c_data.data = efx; board->i2c_adap.algo_data = &board->i2c_data; board->i2c_adap.dev.parent = &efx->pci_dev->dev; strlcpy(board->i2c_adap.name, "SFC4000 GPIO", sizeof(board->i2c_adap.name)); rc = i2c_bit_add_bus(&board->i2c_adap); if (rc) goto fail5; rc = falcon_board(efx)->type->init(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise board\n"); goto fail6; } nic_data->stats_disable_count = 1; setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, (unsigned long)efx); return 0; fail6: BUG_ON(i2c_del_adapter(&board->i2c_adap)); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } fail2: fail1: kfree(efx->nic_data); return rc; }
static int siena_probe_nic(struct efx_nic *efx) { struct siena_nic_data *nic_data; bool already_attached = 0; efx_oword_t reg; int rc; /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; if (efx_nic_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Siena FPGA not supported\n"); rc = -ENODEV; goto fail1; } efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; /* Initialise MCDI */ nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + FR_CZ_MC_TREG_SMEM, FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); if (!nic_data->mcdi_smem) { netif_err(efx, probe, efx->net_dev, "could not map MCDI at %llx+%x\n", (unsigned long long)efx->membase_phys + FR_CZ_MC_TREG_SMEM, FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); rc = -ENOMEM; goto fail1; } efx_mcdi_init(efx); /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) goto fail2; /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ rc = efx_mcdi_drv_attach(efx, true, &already_attached); if (rc) { netif_err(efx, probe, efx->net_dev, "Unable to register driver with MCPU\n"); goto fail2; } if (already_attached) /* Not a fatal error */ netif_err(efx, probe, efx->net_dev, "Host already registered with MCPU\n"); /* Now we can reset the NIC */ rc = siena_reset_hw(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } siena_init_wol(efx); /* Allocate memory for INT_KER */ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); netif_dbg(efx, probe, efx->net_dev, "INT_KER at %llx (virt %p phys %llx)\n", (unsigned long long)efx->irq_status.dma_addr, efx->irq_status.addr, (unsigned long long)virt_to_phys(efx->irq_status.addr)); /* Read in the non-volatile configuration */ rc = siena_probe_nvconfig(efx); if (rc == -EINVAL) { netif_err(efx, probe, efx->net_dev, "NVRAM is invalid therefore using defaults\n"); efx->phy_type = PHY_TYPE_NONE; efx->mdio.prtad = MDIO_PRTAD_NONE; } else if (rc) { goto fail5; } return 0; fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: iounmap(nic_data->mcdi_smem); fail1: kfree(efx->nic_data); return rc; }
static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t glb_ctl_reg_ker; int rc; netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", RESET_TYPE(method)); if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2; } } EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } else { EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_CTL, method == RESET_TYPE_INVISIBLE, FRF_AB_PCIE_CORE_RST_CTL, 1, FRF_AB_PCIE_NSTKY_RST_CTL, 1, FRF_AB_PCIE_SD_RST_CTL, 1, FRF_AB_EE_RST_CTL, 1, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); if (method == RESET_TYPE_WORLD) { if (efx_nic_is_dual_func(efx)) pci_restore_state(nic_data->pci_dev2); pci_restore_state(efx->pci_dev); netif_dbg(efx, drv, efx->net_dev, "successfully restored PCI config\n"); } efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; netif_err(efx, hw, efx->net_dev, "timed out waiting for hardware reset\n"); goto fail3; } netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); return 0; fail2: pci_restore_state(efx->pci_dev); fail1: fail3: return rc; }
static void falcon_reset_macs(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg, mac_ctrl; int count; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { /* It's not safe to use GLB_CTL_REG to reset the * macs, so instead use the internal MAC resets */ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); for (count = 0; count < 10000; count++) { efx_reado(efx, ®, FR_AB_XM_GLB_CFG); if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == 0) return; udelay(10); } netif_err(efx, hw, efx->net_dev, "timed out waiting for XMAC core reset\n"); } /* Mac stats will fail whist the TX fifo is draining */ WARN_ON(nic_data->stats_disable_count == 0); efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); efx_reado(efx, ®, FR_AB_GLB_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); efx_writeo(efx, ®, FR_AB_GLB_CTL); count = 0; while (1) { efx_reado(efx, ®, FR_AB_GLB_CTL); if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { netif_dbg(efx, hw, efx->net_dev, "Completed MAC reset after %d loops\n", count); break; } if (count > 20) { netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); break; } count++; udelay(10); } /* Ensure the correct MAC is selected before statistics * are re-enabled by the caller */ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); falcon_setup_xaui(efx); }
__checkReturn efx_rc_t efx_nic_biu_test( __in efx_nic_t *enp) { efx_oword_t oword; efx_rc_t rc; /* * Write magic values to scratch registers 0 and 1, then * verify that the values were written correctly. Interleave * the accesses to ensure that the BIU is not just reading * back the cached value that was last written. */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) { rc = EIO; goto fail1; } EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) { rc = EIO; goto fail2; } /* * Perform the same test, with the values swapped. This * ensures that subsequent tests don't start with the correct * values already written into the scratch registers. */ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0); EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) { rc = EIO; goto fail3; } EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE); if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) { rc = EIO; goto fail4; } return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); }