Exemple #1
0
static int alloc_uld_rxqs(struct adapter *adap,
			  struct sge_uld_rxq_info *rxq_info, bool lro)
{
	struct sge *s = &adap->sge;
	unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
	struct sge_ofld_rxq *q = rxq_info->uldrxq;
	unsigned short *ids = rxq_info->rspq_id;
	unsigned int bmap_idx = 0;
	unsigned int per_chan;
	int i, err, msi_idx, que_idx = 0;

	per_chan = rxq_info->nrxq / adap->params.nports;

	if (adap->flags & CXGB4_USING_MSIX)
		msi_idx = 1;
	else
		msi_idx = -((int)s->intrq.abs_id + 1);

	for (i = 0; i < nq; i++, q++) {
		if (i == rxq_info->nrxq) {
			/* start allocation of concentrator queues */
			per_chan = rxq_info->nciq / adap->params.nports;
			que_idx = 0;
		}

		if (msi_idx >= 0) {
			bmap_idx = get_msix_idx_from_bmap(adap);
			msi_idx = adap->msix_info_ulds[bmap_idx].idx;
		}
		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
				       adap->port[que_idx++ / per_chan],
				       msi_idx,
				       q->fl.size ? &q->fl : NULL,
				       uldrx_handler,
				       lro ? uldrx_flush_handler : NULL,
				       0);
		if (err)
			goto freeout;
		if (msi_idx >= 0)
			rxq_info->msix_tbl[i] = bmap_idx;
		memset(&q->stats, 0, sizeof(q->stats));
		if (ids)
			ids[i] = q->rspq.abs_id;
	}
	return 0;
freeout:
	q = rxq_info->uldrxq;
	for ( ; i; i--, q++) {
		if (q->rspq.desc)
			free_rspq_fl(adap, &q->rspq,
				     q->fl.size ? &q->fl : NULL);
	}
	return err;
}
Exemple #2
0
static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
				    uint16_t queue_idx,	uint16_t nb_desc,
				    unsigned int socket_id,
				    const struct rte_eth_rxconf *rx_conf,
				    struct rte_mempool *mp)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
	int err = 0;
	int msi_idx = 0;
	unsigned int temp_nb_desc;
	struct rte_eth_dev_info dev_info;
	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;

	RTE_SET_USED(rx_conf);

	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
		  socket_id, mp);

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((pkt_len < dev_info.min_rx_bufsize) ||
	    (pkt_len > dev_info.max_rx_pktlen)) {
		dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
			__func__, dev_info.min_rx_bufsize,
			dev_info.max_rx_pktlen);
		return -EINVAL;
	}

	/*  Free up the existing queue  */
	if (eth_dev->data->rx_queues[queue_idx]) {
		cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
		eth_dev->data->rx_queues[queue_idx] = NULL;
	}

	eth_dev->data->rx_queues[queue_idx] = (void *)rxq;

	/* Sanity Checking
	 *
	 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
	 */
	temp_nb_desc = nb_desc;
	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
			 __func__, CXGBE_MIN_RING_DESC_SIZE,
			 CXGBE_DEFAULT_RX_DESC_SIZE);
		temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
			__func__, CXGBE_MIN_RING_DESC_SIZE,
			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
		return -(EINVAL);
	}

	rxq->rspq.size = temp_nb_desc;
	if ((&rxq->fl) != NULL)
		rxq->fl.size = temp_nb_desc;

	/* Set to jumbo mode if necessary */
	if (pkt_len > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
	else
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;

	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
			       &rxq->fl, t4_ethrx_handler,
			       t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
			       queue_idx, socket_id);

	dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
		  __func__, err, pi->port_id, rxq->rspq.cntxt_id);
	return err;
}