static void bnx2x_hw_stats_post(struct bnx2x *bp) { struct dmae_command *dmae = &bp->stats_dmae; u32 *stats_comp = bnx2x_sp(bp, stats_comp); *stats_comp = DMAE_COMP_VAL; if (CHIP_REV_IS_SLOW(bp)) return; /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, true, DMAE_COMP_GRC); opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); memset(dmae, 0, sizeof(struct dmae_command)); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * (loader_idx + 1)) >> 2; dmae->dst_addr_hi = 0; dmae->len = sizeof(struct dmae_command) >> 2; if (CHIP_IS_E1(bp)) dmae->len--; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; *stats_comp = 0; bnx2x_post_dmae(bp, dmae, loader_idx); } else if (bp->func_stx) {
static void bnx2x_hw_stats_post(struct bnx2x *bp) { struct dmae_command *dmae = &bp->stats_dmae; u32 *stats_comp = bnx2x_sp(bp, stats_comp); *stats_comp = DMAE_COMP_VAL; if (CHIP_REV_IS_SLOW(bp)) return; /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); memset(dmae, 0, sizeof(struct dmae_command)); dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | DMAE_CMD_DST_RESET | #ifdef __BIG_ENDIAN DMAE_CMD_ENDIANITY_B_DW_SWAP | #else DMAE_CMD_ENDIANITY_DW_SWAP | #endif (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * (loader_idx + 1)) >> 2; dmae->dst_addr_hi = 0; dmae->len = sizeof(struct dmae_command) >> 2; if (CHIP_IS_E1(bp)) dmae->len--; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; *stats_comp = 0; bnx2x_post_dmae(bp, dmae, loader_idx); } else if (bp->func_stx) {
static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) { struct cstorm_vf_zone_data __iomem *zone_data = REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START); int tout = 600, interval = 100; /* wait for 60 seconds */ if (*done) { BNX2X_ERR("done was non zero before message to pf was sent\n"); WARN_ON(true); return -EINVAL; } /* Write message address */ writel(U64_LO(msg_mapping), &zone_data->non_trigger.vf_pf_channel.msg_addr_lo); writel(U64_HI(msg_mapping), &zone_data->non_trigger.vf_pf_channel.msg_addr_hi); /* make sure the address is written before FW accesses it */ wmb(); /* Trigger the PF FW */ writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid); /* Wait for PF to complete */ while ((tout >= 0) && (!*done)) { msleep(interval); tout -= 1; /* progress indicator - HV can take its own sweet time in * answering VFs... */ DP_CONT(BNX2X_MSG_IOV, "."); } if (!*done) { BNX2X_ERR("PF response has timed out\n"); return -EAGAIN; } DP(BNX2X_MSG_SP, "Got a response from PF\n"); return 0; }
int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t j, idx; const struct rte_memzone *dma; struct bnx2x_rx_queue *rxq; uint32_t dma_size; struct rte_mbuf *mbuf; struct bnx2x_softc *sc = dev->data->dev_private; struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; struct eth_rx_cqe_next_page *nextpg; phys_addr_t *rx_bd; phys_addr_t busaddr; /* First allocate the rx queue data structure */ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue), RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq) { PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); return -ENOMEM; } rxq->sc = sc; rxq->mb_pool = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN); rxq->nb_rx_pages = 1; while (USABLE_RX_BD(rxq) < nb_desc) rxq->nb_rx_pages <<= 1; rxq->nb_rx_desc = TOTAL_RX_BD(rxq); sc->rx_ring_size = USABLE_RX_BD(rxq); rxq->nb_cq_pages = RCQ_BD_PAGES(rxq); rxq->rx_free_thresh = rx_conf->rx_free_thresh ? rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH; PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, rx_pages=%u, cq_pages=%u", queue_idx, nb_desc, rxq->rx_free_thresh, (unsigned long)USABLE_RX_BD(rxq), (unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages); /* Allocate RX ring hardware descriptors */ dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd); dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id); if (NULL == dma) { PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!"); bnx2x_rx_queue_release(rxq); return -ENOMEM; } fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr; rxq->rx_ring = (uint64_t*)dma->addr; memset((void *)rxq->rx_ring, 0, dma_size); /* Link the RX chain pages. */ for (j = 1; j <= rxq->nb_rx_pages; j++) { rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2]; busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages); *rx_bd = busaddr; } /* Allocate software ring */ dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry); rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size, RTE_CACHE_LINE_SIZE, socket_id); if (NULL == rxq->sw_ring) { PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!"); bnx2x_rx_queue_release(rxq); return -ENOMEM; } /* Initialize software ring entries */ rxq->rx_mbuf_alloc = 0; for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { mbuf = bnx2x_rxmbuf_alloc(mp); if (NULL == mbuf) { PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", (unsigned)rxq->queue_id, idx); bnx2x_rx_queue_release(rxq); return -ENOMEM; } rxq->sw_ring[idx] = mbuf; rxq->rx_ring[idx] = mbuf->buf_physaddr; rxq->rx_mbuf_alloc++; } rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; rxq->rx_bd_head = 0; rxq->rx_bd_tail = rxq->nb_rx_desc; /* Allocate CQ chain. */ dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages; dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id); if (NULL == dma) { PMD_RX_LOG(ERR, "RCQ alloc failed"); return -ENOMEM; } fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr; rxq->cq_ring = (union eth_rx_cqe*)dma->addr; /* Link the CQ chain pages. */ for (j = 1; j <= rxq->nb_cq_pages; j++) { nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe; busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages); nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); } rxq->rx_cq_head = 0; rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq); dev->data->rx_queues[queue_idx] = rxq; if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues; return 0; }
int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { uint16_t i; unsigned int tsize; const struct rte_memzone *tz; struct bnx2x_tx_queue *txq; struct eth_tx_next_bd *tx_n_bd; uint64_t busaddr; struct bnx2x_softc *sc = dev->data->dev_private; struct bnx2x_fastpath *fp = &sc->fp[queue_idx]; /* First allocate the tx queue data structure */ txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue), RTE_CACHE_LINE_SIZE); if (txq == NULL) return -ENOMEM; txq->sc = sc; txq->nb_tx_pages = 1; while (USABLE_TX_BD(txq) < nb_desc) txq->nb_tx_pages <<= 1; txq->nb_tx_desc = TOTAL_TX_BD(txq); sc->tx_ring_size = TOTAL_TX_BD(txq); txq->tx_free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, " "total_bd=%lu, tx_pages=%u", queue_idx, nb_desc, txq->tx_free_thresh, (unsigned long)USABLE_TX_BD(txq), (unsigned long)TOTAL_TX_BD(txq), txq->nb_tx_pages); /* Allocate TX ring hardware descriptors */ tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types); tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id); if (tz == NULL) { bnx2x_tx_queue_release(txq); return -ENOMEM; } fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr; txq->tx_ring = (union eth_tx_bd_types *) tz->addr; memset(txq->tx_ring, 0, tsize); /* Allocate software ring */ tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *); txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, RTE_CACHE_LINE_SIZE); if (txq->sw_ring == NULL) { bnx2x_tx_queue_release(txq); return -ENOMEM; } /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */ /* Link TX pages */ for (i = 1; i <= txq->nb_tx_pages; i++) { tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd; busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages); tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr)); tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr)); /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */ } txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; txq->tx_pkt_tail = 0; txq->tx_pkt_head = 0; txq->tx_bd_tail = 0; txq->tx_bd_head = 0; txq->nb_tx_avail = txq->nb_tx_desc; dev->tx_pkt_burst = bnx2x_xmit_pkts; dev->data->tx_queues[queue_idx] = txq; if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues; return 0; }