static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) { struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi); struct i40e_hmc_obj_rxq rx_ctx; int err = I40E_SUCCESS; memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); /* Init the RX queue in hardware */ rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT; rx_ctx.hbuff = 0; rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT; rx_ctx.qlen = rxq->nb_rx_desc; #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC rx_ctx.dsize = 1; #endif rx_ctx.dtype = i40e_header_split_none; rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; rx_ctx.rxmax = ETHER_MAX_LEN; rx_ctx.tphrdesc_ena = 1; rx_ctx.tphwdesc_ena = 1; rx_ctx.tphdata_ena = 1; rx_ctx.tphhead_ena = 1; rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 0; rx_ctx.l2tsel = 1; rx_ctx.showiv = 0; rx_ctx.prefena = 1; err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx); if (err != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context."); return err; } err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx); if (err != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context."); return err; } rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(rxq->vsi->base_queue); rte_wmb(); /* Init the RX tail regieter. */ I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); return err; }
int ixl_if_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixl_queue *que; struct ixl_tx_buf *bufs; if_shared_ctx_t sctx; int i; MPASS(vsi->num_queues > 0); MPASS(nqs == 2); /* Allocate queue structure memory */ sctx = iflib_get_sctx(ctx); if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_IXL, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } if ((bufs = malloc(sizeof(*bufs)*sctx->isc_ntxd*vsi->num_queues, M_IXL, M_WAITOK|M_ZERO)) == NULL) { free(vsi->queues, M_IXL); device_printf(iflib_get_dev(ctx), "failed to allocate sw bufs\n"); return (ENOMEM); } for (i = 0, que = vsi->queues; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; que->me = i; que->vsi = vsi; /* get the virtual and physical address of the hardware queues */ txr->tail = I40E_QTX_TAIL(que->me); txr->tx_base = (struct i40e_tx_desc *)vaddrs[i*2]; txr->tx_paddr = paddrs[i*2]; txr->tx_buffers = bufs + i*sctx->isc_ntxd; rxr->tail = I40E_QRX_TAIL(que->me); rxr->rx_base = (union i40e_rx_desc *)vaddrs[i*2 + 1]; rxr->rx_paddr = paddrs[i*2 + 1]; txr->que = rxr->que = que; } device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", vsi->num_queues); return (0); }