Beispiel #1
0
static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
				    uint16_t queue_idx,	uint16_t nb_desc,
				    unsigned int socket_id,
				    const struct rte_eth_txconf *tx_conf)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
	int err = 0;
	unsigned int temp_nb_desc;

	RTE_SET_USED(tx_conf);

	dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
		  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
		  socket_id, pi->first_qset);

	/*  Free up the existing queue  */
	if (eth_dev->data->tx_queues[queue_idx]) {
		cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
		eth_dev->data->tx_queues[queue_idx] = NULL;
	}

	eth_dev->data->tx_queues[queue_idx] = (void *)txq;

	/* Sanity Checking
	 *
	 * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
	 */
	temp_nb_desc = nb_desc;
	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
			 __func__, CXGBE_MIN_RING_DESC_SIZE,
			 CXGBE_DEFAULT_TX_DESC_SIZE);
		temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
			__func__, CXGBE_MIN_RING_DESC_SIZE,
			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
		return -(EINVAL);
	}

	txq->q.size = temp_nb_desc;

	err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
				   s->fw_evtq.cntxt_id, socket_id);

	dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
		  __func__, txq->q.cntxt_id, err);

	return err;
}
Beispiel #2
0
static int
enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
{
	struct rte_mbuf *mb;
	struct rq_enet_desc *rqd = rq->ring.descs;
	unsigned i;
	dma_addr_t dma_addr;

	dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
		  rq->ring.desc_count);

	for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
		mb = rte_rxmbuf_alloc(rq->mp);
		if (mb == NULL) {
			dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
			(unsigned)rq->index);
			return -ENOMEM;
		}

		dma_addr = (dma_addr_t)(mb->buf_physaddr
			   + RTE_PKTMBUF_HEADROOM);

		rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
				 mb->buf_len - RTE_PKTMBUF_HEADROOM);
		rq->mbuf_ring[i] = mb;
	}

	/* make sure all prior writes are complete before doing the PIO write */
	rte_rmb();

	/* Post all but the last 2 cache lines' worth of descriptors */
	rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
			/ sizeof(struct rq_enet_desc));
	rq->rx_nb_hold = 0;

	dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
		enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
	iowrite32(rq->posted_index, &rq->ctrl->posted_index);
	rte_rmb();

	return 0;

}
Beispiel #3
0
int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
	int ret;
	struct sge_eth_txq *txq = (struct sge_eth_txq *)
				  (eth_dev->data->tx_queues[tx_queue_id]);

	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);

	ret = t4_sge_eth_txq_stop(txq);
	if (ret == 0)
		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;

	return ret;
}
Beispiel #4
0
static void cxgbe_dev_tx_queue_release(void *q)
{
	struct sge_eth_txq *txq = (struct sge_eth_txq *)q;

	if (txq) {
		struct port_info *pi = (struct port_info *)
				       (txq->eth_dev->data->dev_private);
		struct adapter *adap = pi->adapter;

		dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
			  __func__, pi->port_id, txq->q.cntxt_id);

		t4_sge_eth_txq_release(adap, txq);
	}
}
Beispiel #5
0
static void cxgbe_dev_rx_queue_release(void *q)
{
	struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
	struct sge_rspq *rq = &rxq->rspq;

	if (rq) {
		struct port_info *pi = (struct port_info *)
				       (rq->eth_dev->data->dev_private);
		struct adapter *adap = pi->adapter;

		dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
			  __func__, pi->port_id, rxq->rspq.cntxt_id);

		t4_sge_eth_rxq_release(adap, rxq);
	}
}
Beispiel #6
0
static void
enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
{
	uint16_t i;

	if (!rq || !rq->mbuf_ring) {
		dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
		return;
	}

	for (i = 0; i < enic->config.rq_desc_count; i++) {
		if (rq->mbuf_ring[i]) {
			rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
			rq->mbuf_ring[i] = NULL;
		}
	}
}
Beispiel #7
0
static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
				   uint16_t rx_queue_id)
{
	int ret;
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adap = pi->adapter;
	struct sge_rspq *q;

	dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
		  __func__, pi->port_id, rx_queue_id);

	q = eth_dev->data->rx_queues[rx_queue_id];
	ret = t4_sge_eth_rxq_stop(adap, q);
	if (ret == 0)
		eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;

	return ret;
}
Beispiel #8
0
/*
 * We find out if we support MSI during this, and the register layout
 * depends on the MSI (doh). Don't acces the device specific bits in
 * BAR 0 before calling it!
 */
int
virtio_register_ints(struct virtio_softc *sc,
    struct virtio_int_handler *config_handler,
    struct virtio_int_handler vq_handlers[])
{
	int ret;
	int intr_types;

	/* Default offset until MSI-X is enabled, if ever. */
	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSIX;

	/* Determine which types of interrupts are supported */
	ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
		goto out_inttype;
	}

	/* If we have msi, let's use them. */
	if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
		ret = virtio_register_msi(sc, config_handler,
		    vq_handlers, intr_types);
		if (!ret)
			return (0);
	}

	/* Fall back to old-fashioned interrupts. */
	if (intr_types & DDI_INTR_TYPE_FIXED) {
		dev_debug(sc->sc_dev, CE_WARN,
		    "Using legacy interrupts");

		return (virtio_register_intx(sc, config_handler, vq_handlers));
	}

	dev_err(sc->sc_dev, CE_WARN,
	    "MSI failed and fixed interrupts not supported. Giving up.");
	ret = DDI_FAILURE;

out_inttype:
	return (ret);
}
Beispiel #9
0
/*
 * Negotiate features, save the result in sc->sc_features
 */
uint32_t
virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
{
	uint32_t host_features;
	uint32_t features;

	host_features = ddi_get32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));

	dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
	    host_features, guest_features);

	features = host_features & guest_features;
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
	    features);

	sc->sc_features = features;

	return (host_features);
}
Beispiel #10
0
static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
	uint16_t queue_idx,
	uint16_t nb_desc,
	unsigned int socket_id,
	const struct rte_eth_rxconf *rx_conf,
	struct rte_mempool *mp)
{
	int ret;
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	/* With Rx scatter support, two RQs are now used on VIC per RQ used
	 * by the application.
	 */
	if (queue_idx * 2 >= ENIC_RQ_MAX) {
		dev_err(enic,
			"Max number of RX queues exceeded.  Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
			ENIC_RQ_MAX);
		return -EINVAL;
	}

	eth_dev->data->rx_queues[queue_idx] =
		(void *)&enic->rq[enic_sop_rq(queue_idx)];

	ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
	if (ret) {
		dev_err(enic, "error in allocating rq\n");
		return ret;
	}

	enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh;
	dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
			enic->rq[queue_idx].rx_free_thresh);

	return enicpmd_dev_setup_intr(enic);
}
Beispiel #11
0
/*
 * Allocate/free a vq.
 */
struct virtqueue *
virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
    unsigned int indirect_num, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize = 0;
	int ret;
	unsigned int ncookies;
	size_t len;
	struct virtqueue *vq;

	ddi_put16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
	vq_size = ddi_get16(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
	if (vq_size == 0) {
		dev_err(sc->sc_dev, CE_WARN,
		    "virtqueue dest not exist, index %d for %s\n", index, name);
		goto out;
	}

	vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);

	/* size 0 => use native vq size, good for receive queues. */
	if (size)
		vq_size = MIN(vq_size, size);

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
	    sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
	    sizeof (struct vring_used_elem) * vq_size);

	allocsize = allocsize1 + allocsize2;

	ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma handle for vq %d", index);
		goto out_alloc_handle;
	}

	ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
	    &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
	    (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
	if (ret != DDI_SUCCESS) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to allocate dma memory for vq %d", index);
		goto out_alloc;
	}

	ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
	    (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
	if (ret != DDI_DMA_MAPPED) {
		dev_err(sc->sc_dev, CE_WARN,
		    "Failed to bind dma memory for vq %d", index);
		goto out_bind;
	}

	/* We asked for a single segment */
	ASSERT(ncookies == 1);
	/* and page-ligned buffers. */
	ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);

	(void) memset(vq->vq_vaddr, 0, allocsize);

	/* Make sure all zeros hit the buffer before we point the host to it */
	membar_producer();

	/* set the vq address */
	ddi_put32(sc->sc_ioh,
	    /* LINTED E_BAD_PTR_CAST_ALIGN */
	    (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
	    (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_descs = vq->vq_vaddr;
	vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
	vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);

	ASSERT(indirect_num == 0 ||
	    virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
	vq->vq_indirect_num = indirect_num;

	/* free slot management */
	vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
	    KM_SLEEP);

	ret = virtio_init_vq(sc, vq);
	if (ret)
		goto out_init;

	dev_debug(sc->sc_dev, CE_NOTE,
	    "Allocated %d entries for vq %d:%s (%d indirect descs)",
	    vq_size, index, name, indirect_num * vq_size);

	return (vq);

out_init:
	kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
	(void) ddi_dma_unbind_handle(vq->vq_dma_handle);
out_bind:
	ddi_dma_mem_free(&vq->vq_dma_acch);
out_alloc:
	ddi_dma_free_handle(&vq->vq_dma_handle);
out_alloc_handle:
	kmem_free(vq, sizeof (struct virtqueue));
out:
	return (NULL);
}
Beispiel #12
0
void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
			struct rte_eth_dev_info *device_info)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;

	static const struct rte_eth_desc_lim cxgbe_desc_lim = {
		.nb_max = CXGBE_MAX_RING_DESC_SIZE,
		.nb_min = CXGBE_MIN_RING_DESC_SIZE,
		.nb_align = 1,
	};

	device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
	device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
	device_info->max_rx_queues = max_queues;
	device_info->max_tx_queues = max_queues;
	device_info->max_mac_addrs = 1;
	/* XXX: For now we support one MAC/port */
	device_info->max_vfs = adapter->params.arch.vfcount;
	device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */

	device_info->rx_queue_offload_capa = 0UL;
	device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;

	device_info->tx_queue_offload_capa = 0UL;
	device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;

	device_info->reta_size = pi->rss_size;
	device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
	device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;

	device_info->rx_desc_lim = cxgbe_desc_lim;
	device_info->tx_desc_lim = cxgbe_desc_lim;
	cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}

void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      1, -1, 1, -1, false);
}

void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      0, -1, 1, -1, false);
}

void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 1, 1, -1, false);
}

void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 0, 1, -1, false);
}

int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
			  int wait_to_complete)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct rte_eth_link new_link = { 0 };
	unsigned int i, work_done, budget = 32;
	u8 old_link = pi->link_cfg.link_ok;

	for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
		cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

		/* Exit if link status changed or always forced up */
		if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
			break;

		if (!wait_to_complete)
			break;

		rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
	}

	new_link.link_status = force_linkup(adapter) ?
			       ETH_LINK_UP : pi->link_cfg.link_ok;
	new_link.link_autoneg = pi->link_cfg.autoneg;
	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	new_link.link_speed = pi->link_cfg.speed;

	return rte_eth_linkstatus_set(eth_dev, &new_link);
}

/**
 * Set device link up.
 */
int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	unsigned int work_done, budget = 32;
	struct sge *s = &adapter->sge;
	int ret;

	/* Flush all link events */
	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

	/* If link already up, nothing to do */
	if (pi->link_cfg.link_ok)
		return 0;

	ret = cxgbe_set_link_status(pi, true);
	if (ret)
		return ret;

	cxgbe_dev_link_update(dev, 1);
	return 0;
}

/**
 * Set device link down.
 */
int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	unsigned int work_done, budget = 32;
	struct sge *s = &adapter->sge;
	int ret;

	/* Flush all link events */
	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

	/* If link already down, nothing to do */
	if (!pi->link_cfg.link_ok)
		return 0;

	ret = cxgbe_set_link_status(pi, false);
	if (ret)
		return ret;

	cxgbe_dev_link_update(dev, 0);
	return 0;
}

int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct rte_eth_dev_info dev_info;
	int err;
	uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
		return -EINVAL;

	/* set to jumbo mode if needed */
	if (new_mtu > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.offloads |=
			DEV_RX_OFFLOAD_JUMBO_FRAME;
	else
		eth_dev->data->dev_conf.rxmode.offloads &=
			~DEV_RX_OFFLOAD_JUMBO_FRAME;

	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
			    -1, -1, true);
	if (!err)
		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;

	return err;
}

/*
 * Stop device.
 */
void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}

/* Start the device.
 * It returns 0 on success.
 */
int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int err = 0, i;

	CXGBE_FUNC_TRACE();

	/*
	 * If we don't have a connection to the firmware there's nothing we
	 * can do.
	 */
	if (!(adapter->flags & FW_OK)) {
		err = -ENXIO;
		goto out;
	}

	if (!(adapter->flags & FULL_INIT_DONE)) {
		err = cxgbe_up(adapter);
		if (err < 0)
			goto out;
	}

	cxgbe_enable_rx_queues(pi);

	err = setup_rss(pi);
	if (err)
		goto out;

	for (i = 0; i < pi->n_tx_qsets; i++) {
		err = cxgbe_dev_tx_queue_start(eth_dev, i);
		if (err)
			goto out;
	}

	for (i = 0; i < pi->n_rx_qsets; i++) {
		err = cxgbe_dev_rx_queue_start(eth_dev, i);
		if (err)
			goto out;
	}

	err = link_start(pi);
	if (err)
		goto out;

out:
	return err;
}

/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}

int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	uint64_t configured_offloads;
	int err;

	CXGBE_FUNC_TRACE();
	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;

	/* KEEP_CRC offload flag is not supported by PMD
	 * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
	 */
	if (rte_eth_dev_must_keep_crc(configured_offloads)) {
		dev_info(adapter, "can't disable hw crc strip\n");
		eth_dev->data->dev_conf.rxmode.offloads |=
			DEV_RX_OFFLOAD_CRC_STRIP;
	}

	if (!(adapter->flags & FW_QUEUE_BOUND)) {
		err = setup_sge_fwevtq(adapter);
		if (err)
			return err;
		adapter->flags |= FW_QUEUE_BOUND;
		err = setup_sge_ctrl_txq(adapter);
		if (err)
			return err;
	}

	err = cfg_queue_count(eth_dev);
	if (err)
		return err;

	return 0;
}

int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
	int ret;
	struct sge_eth_txq *txq = (struct sge_eth_txq *)
				  (eth_dev->data->tx_queues[tx_queue_id]);

	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);

	ret = t4_sge_eth_txq_start(txq);
	if (ret == 0)
		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;

	return ret;
}
Beispiel #13
0
static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
				    uint16_t queue_idx,	uint16_t nb_desc,
				    unsigned int socket_id,
				    const struct rte_eth_rxconf *rx_conf,
				    struct rte_mempool *mp)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
	int err = 0;
	int msi_idx = 0;
	unsigned int temp_nb_desc;
	struct rte_eth_dev_info dev_info;
	unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;

	RTE_SET_USED(rx_conf);

	dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
		  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
		  socket_id, mp);

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((pkt_len < dev_info.min_rx_bufsize) ||
	    (pkt_len > dev_info.max_rx_pktlen)) {
		dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
			__func__, dev_info.min_rx_bufsize,
			dev_info.max_rx_pktlen);
		return -EINVAL;
	}

	/*  Free up the existing queue  */
	if (eth_dev->data->rx_queues[queue_idx]) {
		cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
		eth_dev->data->rx_queues[queue_idx] = NULL;
	}

	eth_dev->data->rx_queues[queue_idx] = (void *)rxq;

	/* Sanity Checking
	 *
	 * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
	 */
	temp_nb_desc = nb_desc;
	if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
		dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
			 __func__, CXGBE_MIN_RING_DESC_SIZE,
			 CXGBE_DEFAULT_RX_DESC_SIZE);
		temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
	} else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
		dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
			__func__, CXGBE_MIN_RING_DESC_SIZE,
			CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
		return -(EINVAL);
	}

	rxq->rspq.size = temp_nb_desc;
	if ((&rxq->fl) != NULL)
		rxq->fl.size = temp_nb_desc;

	/* Set to jumbo mode if necessary */
	if (pkt_len > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
	else
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;

	err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
			       &rxq->fl, t4_ethrx_handler,
			       t4_get_mps_bg_map(adapter, pi->tx_chan), mp,
			       queue_idx, socket_id);

	dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
		  __func__, err, pi->port_id, rxq->rspq.cntxt_id);
	return err;
}