/** * t3_mc5_init - initialize MC5 and the TCAM * @mc5: the MC5 handle * @nservers: desired number the TCP servers (listening ports) * @nfilters: desired number of HW filters (classifiers) * @nroutes: desired number of routes * * Initialize MC5 and the TCAM and partition the TCAM for the requested * number of servers, filters, and routes. The number of routes is * typically 0 except for specialized uses of the T3 adapters. */ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, unsigned int nroutes) { int err; unsigned int tcam_size = mc5->tcam_size; unsigned int mode72 = mc5->mode == MC5_MODE_72_BIT; adapter_t *adap = mc5->adapter; if (!tcam_size) return 0; if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size) return -EINVAL; if (nfilters) mc5->parity_enabled = 0; /* Reset the TCAM */ t3_set_reg_field(adap, A_MC5_DB_CONFIG, F_TMMODE | F_COMPEN, V_COMPEN(mode72) | V_TMMODE(mode72) | F_TMRST); if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) { CH_ERR(adap, "TCAM reset timed out\n"); return -1; } t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes); t3_write_reg(adap, A_MC5_DB_FILTER_TABLE, tcam_size - nroutes - nfilters); t3_write_reg(adap, A_MC5_DB_SERVER_INDEX, tcam_size - nroutes - nfilters - nservers); /* All the TCAM addresses we access have only the low 32 bits non 0 */ t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0); t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0); mc5_dbgi_mode_enable(mc5); switch (mc5->part_type) { case IDT75P52100: err = init_idt52100(mc5); break; case IDT75N43102: err = init_idt43102(mc5); break; default: CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type); err = -EINVAL; break; } mc5_dbgi_mode_disable(mc5); return err; }
static int tricn_init(adapter_t *adapter) { int i, sme = 1; if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) { CH_ERR("%s: ESPI clock not ready\n", adapter->name); return -1; } writel(F_ESPI_RX_CORE_RST, adapter->regs + A_ESPI_RX_RESET); if (sme) { tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); } for (i = 1; i <= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); for (i = 1; i <= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); for (i = 1; i <= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); tricn_write(adapter, 0, 2, 4, TRICN_CNFG, 0xf1); tricn_write(adapter, 0, 2, 5, TRICN_CNFG, 0xe1); tricn_write(adapter, 0, 2, 6, TRICN_CNFG, 0xf1); tricn_write(adapter, 0, 2, 7, TRICN_CNFG, 0x80); tricn_write(adapter, 0, 2, 8, TRICN_CNFG, 0xf1); writel(F_ESPI_RX_CORE_RST | F_ESPI_RX_LNK_RST, adapter->regs + A_ESPI_RX_RESET); return 0; }
/* * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM * command cmd. The data to be written must have been set up by the caller. * Returns -1 on failure, 0 on success. */ static int mc5_write(adapter_t *adapter, u32 addr_lo, u32 cmd) { t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo); if (mc5_cmd_write(adapter, cmd) == 0) return 0; CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n", addr_lo); return -1; }
/* * Write a value to a register and check that the write completed. These * writes normally complete in a cycle or two, so one read should suffice. * The very first read exists to flush the posted write to the device. */ static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val) { t1_write_reg_4(adapter, addr, val); val = t1_read_reg_4(adapter, addr); /* flush */ if (!(t1_read_reg_4(adapter, addr) & F_BUSY)) return 0; CH_ERR("%s: write to MC3 register 0x%x timed out\n", adapter_name(adapter), addr); return -EIO; }
static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; unsigned int oft = mac->offset; u32 val; if (!macidx(mac)) t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ msleep(10); /* Check for xgm Rx fifo empty */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 0x80000000, 1, 5, 2)) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", macidx(mac)); return -1; } t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ val = F_MAC_RESET_; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) val |= F_PCS_RESET_ | F_XG2G_RESET_; else val |= F_RGMII_RESET_ | F_XG2G_RESET_; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } t3_write_reg(adap, A_XGM_RX_CFG + oft, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); if (!macidx(mac)) t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); else t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); return 0; }
/* 3. Deassert rx_reset_link */ static int tricn_init(adapter_t *adapter) { int i = 0; int sme = 1; int stat = 0; int timeout = 0; int is_ready = 0; int dynamic_deskew = 0; if (dynamic_deskew) sme = 0; /* 1 */ timeout=1000; do { stat = readl(adapter->regs + A_ESPI_RX_RESET); is_ready = (stat & 0x4); timeout--; udelay(5); } while (!is_ready || (timeout==0)); writel(0x2, adapter->regs + A_ESPI_RX_RESET); if (timeout==0) { CH_ERR("ESPI : ERROR : Timeout tricn_init() \n"); t1_fatal_err(adapter); } /* 2 */ if (sme) { tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81); tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81); tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81); } for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1); for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1); for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1); for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80); for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1); /* 3 */ writel(0x3, adapter->regs + A_ESPI_RX_RESET); return 0; }
/* * Write a value to a register and check that the write completed. These * writes normally complete in a cycle or two, so one read should suffice but * just in case we give them a bit of grace period. Note that the very first * read exists to flush the posted write to the device. */ static int wrreg_wait(adapter_t *adapter, unsigned int addr, u32 val) { int attempts = 2; t1_write_reg_4(adapter, addr, val); val = t1_read_reg_4(adapter, addr); /* flush */ while (attempts--) { if (!(t1_read_reg_4(adapter, addr) & F_BUSY)) return 0; if (attempts) DELAY_US(1); } CH_ERR("%s: write to MC4 register 0x%x timed out\n", adapter_name(adapter), addr); return -EIO; }
static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr, int ch_addr, int reg_offset, u32 wr_data) { int busy, attempts = TRICN_CMD_ATTEMPTS; writel(V_WRITE_DATA(wr_data) | V_REGISTER_OFFSET(reg_offset) | V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) | V_BUNDLE_ADDR(bundle_addr) | V_SPI4_COMMAND(TRICN_CMD_WRITE), adapter->regs + A_ESPI_CMD_ADDR); writel(0, adapter->regs + A_ESPI_GOSTAT); do { busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY; } while (busy && --attempts); if (busy) CH_ERR("%s: TRICN write timed out\n", adapter->name); return busy; }
/* * Read SEEPROM. A zero is written to the flag register when the addres is * written to the Control register. The hardware device will set the flag to a * one when 4B have been transferred to the Data register. */ int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data) { int i = EEPROM_MAX_POLL; u16 val; if (addr >= EEPROMSIZE || (addr & 3)) return -EINVAL; pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); do { udelay(50); pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); } while (!(val & F_VPD_OP_FLAG) && --i); if (!(val & F_VPD_OP_FLAG)) { CH_ERR("%s: reading EEPROM address 0x%x failed\n", adapter->name, addr); return -EIO; } pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data); *data = le32_to_cpu(*data); return 0; }
int t3_mac_reset(struct cmac *mac) { static const struct addr_val_pair mac_reset_avp[] = { {A_XGM_TX_CTRL, 0}, {A_XGM_RX_CTRL, 0}, {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST}, {A_XGM_RX_HASH_LOW, 0}, {A_XGM_RX_HASH_HIGH, 0}, {A_XGM_RX_EXACT_MATCH_LOW_1, 0}, {A_XGM_RX_EXACT_MATCH_LOW_2, 0}, {A_XGM_RX_EXACT_MATCH_LOW_3, 0}, {A_XGM_RX_EXACT_MATCH_LOW_4, 0}, {A_XGM_RX_EXACT_MATCH_LOW_5, 0}, {A_XGM_RX_EXACT_MATCH_LOW_6, 0}, {A_XGM_RX_EXACT_MATCH_LOW_7, 0}, {A_XGM_RX_EXACT_MATCH_LOW_8, 0}, {A_XGM_STAT_CTRL, F_CLRSTATS} }; u32 val; struct adapter *adap = mac->adapter; unsigned int oft = mac->offset; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft); t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft, F_RXSTRFRWRD | F_DISERRFRAMES, uses_xaui(adap) ? 0 : F_RXSTRFRWRD); t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX); if (uses_xaui(adap)) { if (adap->params.rev == 0) { t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, F_RXENABLE | F_TXENABLE); if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft, F_CMULOCK, 1, 5, 2)) { CH_ERR(adap, "MAC %d XAUI SERDES CMU lock failed\n", macidx(mac)); return -1; } t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0, F_SERDESRESET_); } else xaui_serdes_reset(mac); } t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft, V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE), V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER); val = F_MAC_RESET_ | F_XGMAC_STOP_EN; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) val |= F_PCS_RESET_ | F_XG2G_RESET_; else val |= F_RGMII_RESET_ | F_XG2G_RESET_; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } memset(&mac->stats, 0, sizeof(mac->stats)); return 0; }
static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; unsigned int oft = mac->offset, store; int idx = macidx(mac); u32 val; if (!macidx(mac)) t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); /* Stop NIC traffic to reduce the number of TXTOGGLES */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); /* Ensure TX drains */ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ /* Store A_TP_TX_DROP_CFG_CH0 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx); msleep(10); /* Change DROP_CFG to 0xc0000011 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); /* Check for xgm Rx fifo empty */ /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 0x80000000, 1, 1000, 2)) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", macidx(mac)); return -1; } t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ val = F_MAC_RESET_; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) val |= F_PCS_RESET_ | F_XG2G_RESET_; else val |= F_RGMII_RESET_ | F_XG2G_RESET_; t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } t3_write_reg(adap, A_XGM_RX_CFG + oft, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); /* Restore the DROP_CFG */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, store); if (!idx) t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); else t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); /* re-enable nic traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); /* Set: re-enable NIC traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1); return 0; }
static int t3_mac_reset(struct cmac *mac, int portspeed) { u32 val, store_mps; adapter_t *adap = mac->adapter; unsigned int oft = mac->offset; int idx = macidx(mac); unsigned int store; /* Stop egress traffic to xgm*/ store_mps = t3_read_reg(adap, A_MPS_CFG); if (!idx) t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); else t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); /* This will reduce the number of TXTOGGLES */ /* Clear: to stop the NIC traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0); /* Ensure TX drains */ t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0); /* PCS in reset */ t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ /* Store A_TP_TX_DROP_CFG_CH0 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); store = t3_read_reg(adap, A_TP_PIO_DATA); msleep(10); /* Change DROP_CFG to 0xc0000011 */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011); /* Check for xgm Rx fifo empty */ /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */ if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 0x80000000, 1, 1000, 2) && portspeed < 0) { CH_ERR(adap, "MAC %d Rx fifo drain failed\n", idx); return -1; } if (portspeed >= 0) { u32 intr = t3_read_reg(adap, A_XGM_INT_ENABLE + oft); /* * safespeedchange: wipes out pretty much all XGMAC registers. */ t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, V_PORTSPEED(M_PORTSPEED) | F_SAFESPEEDCHANGE, portspeed | F_SAFESPEEDCHANGE); (void) t3_read_reg(adap, A_XGM_PORT_CFG + oft); t3_set_reg_field(adap, A_XGM_PORT_CFG + oft, F_SAFESPEEDCHANGE, 0); (void) t3_read_reg(adap, A_XGM_PORT_CFG + oft); t3_mac_init(mac); t3_write_reg(adap, A_XGM_INT_ENABLE + oft, intr); } else { t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/ (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ val = xgm_reset_ctrl(mac); t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ if ((val & F_PCS_RESET_) && adap->params.rev) { msleep(1); t3b_pcs_reset(mac); } t3_write_reg(adap, A_XGM_RX_CFG + oft, F_DISPAUSEFRAMES | F_EN1536BFRAMES | F_RMFCS | F_ENJUMBO | F_ENHASHMCAST ); } /* Restore the DROP_CFG */ t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, store); /* Resume egress traffic to xgm */ t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE, store_mps); /* Set: re-enable NIC traffic */ t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, F_ENFORCEPKT); return 0; }
static int __devinit init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed; int i, err, pci_using_dac = 0; unsigned long mmio_start, mmio_len; const struct board_info *bi; struct adapter *adapter = NULL; struct port_info *pi; if (!version_printed) { printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION); ++version_printed; } err = pci_enable_device(pdev); if (err) return err; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { CH_ERR("%s: cannot find PCI device memory base address\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { CH_ERR("%s: unable to obtain 64-bit DMA for" "consistent allocations\n", pci_name(pdev)); err = -ENODEV; goto out_disable_pdev; } } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) { CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev)); goto out_disable_pdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev)); goto out_disable_pdev; } pci_set_master(pdev); mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); bi = t1_get_board_info(ent->driver_data); for (i = 0; i < bi->port_number; ++i) { struct net_device *netdev; netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter)); if (!netdev) { err = -ENOMEM; goto out_free_dev; } SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { adapter = netdev->priv; adapter->pdev = pdev; adapter->port[0].dev = netdev; /* so we don't leak it */ adapter->regs = ioremap(mmio_start, mmio_len); if (!adapter->regs) { CH_ERR("%s: cannot map device registers\n", pci_name(pdev)); err = -ENOMEM; goto out_free_dev; } if (t1_get_board_rev(adapter, bi, &adapter->params)) { err = -ENODEV; /* Can't handle this chip rev */ goto out_free_dev; } adapter->name = pci_name(pdev); adapter->msg_enable = dflt_msg_enable; adapter->mmio_len = mmio_len; init_MUTEX(&adapter->mib_mutex); spin_lock_init(&adapter->tpi_lock); spin_lock_init(&adapter->work_lock); spin_lock_init(&adapter->async_lock); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task, adapter); INIT_WORK(&adapter->stats_update_task, mac_stats_task, adapter); #ifdef work_struct init_timer(&adapter->stats_update_timer); adapter->stats_update_timer.function = mac_stats_timer; adapter->stats_update_timer.data = (unsigned long)adapter; #endif pci_set_drvdata(pdev, netdev); } pi = &adapter->port[i]; pi->dev = netdev; netif_carrier_off(netdev); netdev->irq = pdev->irq; netdev->if_port = i; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; netdev->priv = adapter; netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; netdev->features |= NETIF_F_LLTX; adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; if (vlan_tso_capable(adapter)) { #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) adapter->flags |= VLAN_ACCEL_CAPABLE; netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; netdev->vlan_rx_register = vlan_rx_register; netdev->vlan_rx_kill_vid = vlan_rx_kill_vid; #endif adapter->flags |= TSO_CAPABLE; netdev->features |= NETIF_F_TSO; } netdev->open = cxgb_open; netdev->stop = cxgb_close; netdev->hard_start_xmit = t1_start_xmit; netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); netdev->get_stats = t1_get_stats; netdev->set_multicast_list = t1_set_rxmode; netdev->do_ioctl = t1_ioctl; netdev->change_mtu = t1_change_mtu; netdev->set_mac_address = t1_set_mac_addr; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = t1_netpoll; #endif netdev->weight = 64; SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); } if (t1_init_sw_modules(adapter, bi) < 0) { err = -ENODEV; goto out_free_dev; } /* * The card is now ready to go. If any errors occur during device * registration we do not fail the whole card but rather proceed only * with the ports we manage to register successfully. However we must * register at least one net device. */ for (i = 0; i < bi->port_number; ++i) { err = register_netdev(adapter->port[i].dev); if (err) CH_WARN("%s: cannot register net device %s, skipping\n", pci_name(pdev), adapter->port[i].dev->name); else { /* * Change the name we use for messages to the name of * the first successfully registered interface. */ if (!adapter->registered_device_map) adapter->name = adapter->port[i].dev->name; __set_bit(i, &adapter->registered_device_map); } } if (!adapter->registered_device_map) { CH_ERR("%s: could not register any net devices\n", pci_name(pdev)); goto out_release_adapter_res; } printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name, bi->desc, adapter->params.chip_revision, adapter->params.pci.is_pcix ? "PCIX" : "PCI", adapter->params.pci.speed, adapter->params.pci.width); return 0; out_release_adapter_res: t1_free_sw_modules(adapter); out_free_dev: if (adapter) { if (adapter->regs) iounmap(adapter->regs); for (i = bi->port_number - 1; i >= 0; --i) if (adapter->port[i].dev) { cxgb_proc_cleanup(adapter, proc_root_driver); kfree(adapter->port[i].dev); } } pci_release_regions(pdev); out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; }
/** * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters * @adapter: the adapter * * Retrieves various core SGE parameters in the form of hardware SGE * register values. The caller is responsible for decoding these as * needed. The SGE parameters are stored in @adapter->params.sge. */ int t4vf_get_sge_params(struct adapter *adapter) { struct sge_params *sp = &adapter->params.sge; u32 params[7], vals[7]; u32 whoami; unsigned int pf, s_hps; int i, v; params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL)); params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE)); params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1)); params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3)); params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5)); params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL)); params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD)); v = t4vf_query_params(adapter, 7, params, vals); if (v != FW_SUCCESS) return v; sp->sge_control = vals[0]; sp->counter_val[0] = G_THRESHOLD_0(vals[6]); sp->counter_val[1] = G_THRESHOLD_1(vals[6]); sp->counter_val[2] = G_THRESHOLD_2(vals[6]); sp->counter_val[3] = G_THRESHOLD_3(vals[6]); sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(vals[2])); sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(vals[2])); sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(vals[3])); sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(vals[3])); sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(vals[4])); sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(vals[4])); sp->fl_starve_threshold = G_EGRTHRESHOLD(vals[5]) * 2 + 1; if (is_t4(adapter)) sp->fl_starve_threshold2 = sp->fl_starve_threshold; else sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(vals[5]) * 2 + 1; /* * We need the Queues/Page and Host Page Size for our VF. * This is based on the PF from which we're instantiated. */ whoami = t4_read_reg(adapter, VF_PL_REG(A_PL_VF_WHOAMI)); pf = G_SOURCEPF(whoami); s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * pf); sp->page_shift = ((vals[1] >> s_hps) & M_HOSTPAGESIZEPF0) + 10; for (i = 0; i < SGE_FLBUF_SIZES; i++) { params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0 + (4 * i))); v = t4vf_query_params(adapter, 1, params, vals); if (v != FW_SUCCESS) return v; sp->sge_fl_buffer_size[i] = vals[0]; } /* * T4 uses a single control field to specify both the PCIe Padding and * Packing Boundary. T5 introduced the ability to specify these * separately with the Padding Boundary in SGE_CONTROL and and Packing * Boundary in SGE_CONTROL2. So for T5 and later we need to grab * SGE_CONTROL in order to determine how ingress packet data will be * laid out in Packed Buffer Mode. Unfortunately, older versions of * the firmware won't let us retrieve SGE_CONTROL2 so if we get a * failure grabbing it we throw an error since we can't figure out the * right value. */ sp->spg_len = sp->sge_control & F_EGRSTATUSPAGESIZE ? 128 : 64; sp->fl_pktshift = G_PKTSHIFT(sp->sge_control); sp->pad_boundary = 1 << (G_INGPADBOUNDARY(sp->sge_control) + 5); if (is_t4(adapter)) sp->pack_boundary = sp->pad_boundary; else { params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2)); v = t4vf_query_params(adapter, 1, params, vals); if (v != FW_SUCCESS) { CH_ERR(adapter, "Unable to get SGE Control2; " "probably old firmware.\n"); return v; } if (G_INGPACKBOUNDARY(vals[0]) == 0) sp->pack_boundary = 16; else sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(vals[0]) + 5); } /* * For T5 and later we want to use the new BAR2 Doorbells. * Unfortunately, older firmware didn't allow the this register to be * read. */ if (!is_t4(adapter)) { unsigned int s_qpp; params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF)); params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF)); v = t4vf_query_params(adapter, 2, params, vals); if (v != FW_SUCCESS) { CH_WARN(adapter, "Unable to get VF SGE Queues/Page; " "probably old firmware.\n"); return v; } s_qpp = (S_QUEUESPERPAGEPF0 + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * pf); sp->eq_s_qpp = ((vals[0] >> s_qpp) & M_QUEUESPERPAGEPF0); sp->iq_s_qpp = ((vals[1] >> s_qpp) & M_QUEUESPERPAGEPF0); }
int t1_mc4_init(struct pemc4 *mc4, unsigned int mc4_clock) { int attempts; u32 val; unsigned int width, ext_mode, slow_mode; adapter_t *adapter = mc4->adapter; /* Power up the FCRAMs. */ val = t1_read_reg_4(adapter, A_MC4_CFG); t1_write_reg_4(adapter, A_MC4_CFG, val | F_POWER_UP); val = t1_read_reg_4(adapter, A_MC4_CFG); /* flush */ if (is_MC4A(adapter)) { slow_mode = val & F_MC4A_SLOW; width = G_MC4A_WIDTH(val); /* If we're not in slow mode, we are using the DLLs */ if (!slow_mode) { /* Clear Reset */ val = t1_read_reg_4(adapter, A_MC4_STROBE); t1_write_reg_4(adapter, A_MC4_STROBE, val & ~F_SLAVE_DLL_RESET); /* Wait for slave DLLs to lock */ DELAY_US(2 * 512 / (mc4_clock / 1000000) + 1); } } else { slow_mode = val & F_MC4_SLOW; width = !!(val & F_MC4_NARROW); /* Initializes the master DLL and slave delay lines. */ if (t1_is_asic(adapter) && !slow_mode) { val = t1_read_reg_4(adapter, A_MC4_STROBE); t1_write_reg_4(adapter, A_MC4_STROBE, val & ~F_MASTER_DLL_RESET); /* Wait for the master DLL to lock. */ attempts = 100; do { DELAY_US(1); val = t1_read_reg_4(adapter, A_MC4_STROBE); } while (!(val & MC4_DLL_DONE) && --attempts); if (!(val & MC4_DLL_DONE)) { CH_ERR("%s: MC4 DLL lock failed\n", adapter_name(adapter)); goto out_fail; } } } mc4->nwords = 4 >> width; /* Set the FCRAM output drive strength and enable DLLs if needed */ ext_mode = t1_is_asic(adapter) && !slow_mode ? 0 : 1; if (wrreg_wait(adapter, A_MC4_EXT_MODE, ext_mode)) goto out_fail; /* Specify the FCRAM operating parameters */ if (wrreg_wait(adapter, A_MC4_MODE, 0x32)) goto out_fail; /* Initiate an immediate refresh and wait for the write to complete. */ val = t1_read_reg_4(adapter, A_MC4_REFRESH); if (wrreg_wait(adapter, A_MC4_REFRESH, val & ~F_REFRESH_ENABLE)) goto out_fail; /* 2nd immediate refresh as before */ if (wrreg_wait(adapter, A_MC4_REFRESH, val & ~F_REFRESH_ENABLE)) goto out_fail; /* Convert to KHz first to avoid 64-bit division. */ mc4_clock /= 1000; /* Hz->KHz */ mc4_clock = mc4_clock * 7812 + mc4_clock / 2; /* ns */ mc4_clock /= 1000000; /* KHz->MHz, ns->us */ /* Enable periodic refresh. */ t1_write_reg_4(adapter, A_MC4_REFRESH, F_REFRESH_ENABLE | V_REFRESH_DIVISOR(mc4_clock)); (void) t1_read_reg_4(adapter, A_MC4_REFRESH); /* flush */ t1_write_reg_4(adapter, A_MC4_ECC_CNTL, F_ECC_GENERATION_ENABLE | F_ECC_CHECK_ENABLE); /* Use the BIST engine to clear all of the MC4 memory. */ t1_write_reg_4(adapter, A_MC4_BIST_ADDR_BEG, 0); t1_write_reg_4(adapter, A_MC4_BIST_ADDR_END, (mc4->size << width) - 1); t1_write_reg_4(adapter, A_MC4_BIST_DATA, 0); t1_write_reg_4(adapter, A_MC4_BIST_OP, V_OP(1) | 0x1f0); (void) t1_read_reg_4(adapter, A_MC4_BIST_OP); /* flush */ attempts = 100; do { DELAY_MS(100); val = t1_read_reg_4(adapter, A_MC4_BIST_OP); } while ((val & F_BUSY) && --attempts); if (val & F_BUSY) { CH_ERR("%s: MC4 BIST timed out\n", adapter_name(adapter)); goto out_fail; } /* Enable normal memory accesses. */ val = t1_read_reg_4(adapter, A_MC4_CFG); t1_write_reg_4(adapter, A_MC4_CFG, val | F_READY); val = t1_read_reg_4(adapter, A_MC4_CFG); /* flush */ return 0; out_fail: return -1; }
int t1_mc3_init(struct pemc3 *mc3, unsigned int mc3_clock) { u32 val; unsigned int width, fast_asic, attempts; adapter_t *adapter = mc3->adapter; /* Check to see if ASIC is running in slow mode. */ val = t1_read_reg_4(adapter, A_MC3_CFG); width = is_MC3A(adapter) ? G_MC3_WIDTH(val) : 0; fast_asic = t1_is_asic(adapter) && !(val & F_MC3_SLOW); val &= ~(V_MC3_BANK_CYCLE(M_MC3_BANK_CYCLE) | V_REFRESH_CYCLE(M_REFRESH_CYCLE) | V_PRECHARGE_CYCLE(M_PRECHARGE_CYCLE) | F_ACTIVE_TO_READ_WRITE_DELAY | V_ACTIVE_TO_PRECHARGE_DELAY(M_ACTIVE_TO_PRECHARGE_DELAY) | V_WRITE_RECOVERY_DELAY(M_WRITE_RECOVERY_DELAY)); if (mc3_clock <= 100000000) val |= V_MC3_BANK_CYCLE(7) | V_REFRESH_CYCLE(4) | V_PRECHARGE_CYCLE(2) | V_ACTIVE_TO_PRECHARGE_DELAY(5) | V_WRITE_RECOVERY_DELAY(2); else if (mc3_clock <= 133000000) val |= V_MC3_BANK_CYCLE(9) | V_REFRESH_CYCLE(5) | V_PRECHARGE_CYCLE(3) | F_ACTIVE_TO_READ_WRITE_DELAY | V_ACTIVE_TO_PRECHARGE_DELAY(6) | V_WRITE_RECOVERY_DELAY(2); else val |= V_MC3_BANK_CYCLE(0xA) | V_REFRESH_CYCLE(6) | V_PRECHARGE_CYCLE(3) | F_ACTIVE_TO_READ_WRITE_DELAY | V_ACTIVE_TO_PRECHARGE_DELAY(7) | V_WRITE_RECOVERY_DELAY(3); t1_write_reg_4(adapter, A_MC3_CFG, val); val = t1_read_reg_4(adapter, A_MC3_CFG); t1_write_reg_4(adapter, A_MC3_CFG, val | F_CLK_ENABLE); val = t1_read_reg_4(adapter, A_MC3_CFG); /* flush */ if (fast_asic) { /* setup DLLs */ val = t1_read_reg_4(adapter, A_MC3_STROBE); if (is_MC3A(adapter)) { t1_write_reg_4(adapter, A_MC3_STROBE, val & ~F_SLAVE_DLL_RESET); /* Wait for slave DLLs to lock */ DELAY_US(2 * 512 / (mc3_clock / 1000000) + 1); } else { /* Initialize the master DLL and slave delay lines. */ t1_write_reg_4(adapter, A_MC3_STROBE, val & ~F_MASTER_DLL_RESET); /* Wait for the master DLL to lock. */ attempts = 100; do { DELAY_US(1); val = t1_read_reg_4(adapter, A_MC3_STROBE); } while (!(val & MC3_DLL_DONE) && --attempts); if (!(val & MC3_DLL_DONE)) { CH_ERR("%s: MC3 DLL lock failed\n", adapter_name(adapter)); goto out_fail; } } } /* Initiate a precharge and wait for the precharge to complete. */ if (wrreg_wait(adapter, A_MC3_PRECHARG, 0)) goto out_fail; /* Set the SDRAM output drive strength and enable DLLs if needed */ if (wrreg_wait(adapter, A_MC3_EXT_MODE, fast_asic ? 0 : 1)) goto out_fail; /* Specify the SDRAM operating parameters. */ if (wrreg_wait(adapter, A_MC3_MODE, fast_asic ? 0x161 : 0x21)) goto out_fail; /* Initiate a precharge and wait for the precharge to complete. */ if (wrreg_wait(adapter, A_MC3_PRECHARG, 0)) goto out_fail; /* Initiate an immediate refresh and wait for the write to complete. */ val = t1_read_reg_4(adapter, A_MC3_REFRESH); if (wrreg_wait(adapter, A_MC3_REFRESH, val & ~F_REFRESH_ENABLE)) goto out_fail; /* 2nd immediate refresh as before */ if (wrreg_wait(adapter, A_MC3_REFRESH, val & ~F_REFRESH_ENABLE)) goto out_fail; /* Specify the SDRAM operating parameters. */ if (wrreg_wait(adapter, A_MC3_MODE, fast_asic ? 0x61 : 0x21)) goto out_fail; /* Convert to KHz first to avoid 64-bit division. */ mc3_clock /= 1000; /* Hz->KHz */ mc3_clock = mc3_clock * 7812 + mc3_clock / 2; /* ns */ mc3_clock /= 1000000; /* KHz->MHz, ns->us */ /* Enable periodic refresh. */ t1_write_reg_4(adapter, A_MC3_REFRESH, F_REFRESH_ENABLE | V_REFRESH_DIVISOR(mc3_clock)); (void) t1_read_reg_4(adapter, A_MC3_REFRESH); /* flush */ t1_write_reg_4(adapter, A_MC3_ECC_CNTL, F_ECC_GENERATION_ENABLE | F_ECC_CHECK_ENABLE); /* Use the BIST engine to clear MC3 memory and initialize ECC. */ t1_write_reg_4(adapter, A_MC3_BIST_ADDR_BEG, 0); t1_write_reg_4(adapter, A_MC3_BIST_ADDR_END, (mc3->size << width) - 1); t1_write_reg_4(adapter, A_MC3_BIST_DATA, 0); t1_write_reg_4(adapter, A_MC3_BIST_OP, V_OP(1) | 0x1f0); (void) t1_read_reg_4(adapter, A_MC3_BIST_OP); /* flush */ attempts = 100; do { DELAY_MS(100); val = t1_read_reg_4(adapter, A_MC3_BIST_OP); } while ((val & F_BUSY) && --attempts); if (val & F_BUSY) { CH_ERR("%s: MC3 BIST timed out\n", adapter_name(adapter)); goto out_fail; } /* Enable normal memory accesses. */ val = t1_read_reg_4(adapter, A_MC3_CFG); t1_write_reg_4(adapter, A_MC3_CFG, val | F_READY); return 0; out_fail: return -1; }