Beispiel #1
0
/*
 * Stop device.
 */
static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int i, dev_down = 0;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);

	/*  See if all ports are down */
	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		/*
		 * Skip first port of the adapter since it will be closed
		 * by DPDK
		 */
		if (i == 0)
			continue;
		dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
	}
Beispiel #2
0
/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}
Beispiel #3
0
void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
			struct rte_eth_dev_info *device_info)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;

	static const struct rte_eth_desc_lim cxgbe_desc_lim = {
		.nb_max = CXGBE_MAX_RING_DESC_SIZE,
		.nb_min = CXGBE_MIN_RING_DESC_SIZE,
		.nb_align = 1,
	};

	device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
	device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
	device_info->max_rx_queues = max_queues;
	device_info->max_tx_queues = max_queues;
	device_info->max_mac_addrs = 1;
	/* XXX: For now we support one MAC/port */
	device_info->max_vfs = adapter->params.arch.vfcount;
	device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */

	device_info->rx_queue_offload_capa = 0UL;
	device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;

	device_info->tx_queue_offload_capa = 0UL;
	device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;

	device_info->reta_size = pi->rss_size;
	device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
	device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;

	device_info->rx_desc_lim = cxgbe_desc_lim;
	device_info->tx_desc_lim = cxgbe_desc_lim;
	cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}

void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      1, -1, 1, -1, false);
}

void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      0, -1, 1, -1, false);
}

void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 1, 1, -1, false);
}

void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 0, 1, -1, false);
}

int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
			  int wait_to_complete)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct rte_eth_link new_link = { 0 };
	unsigned int i, work_done, budget = 32;
	u8 old_link = pi->link_cfg.link_ok;

	for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
		cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

		/* Exit if link status changed or always forced up */
		if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
			break;

		if (!wait_to_complete)
			break;

		rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
	}

	new_link.link_status = force_linkup(adapter) ?
			       ETH_LINK_UP : pi->link_cfg.link_ok;
	new_link.link_autoneg = pi->link_cfg.autoneg;
	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	new_link.link_speed = pi->link_cfg.speed;

	return rte_eth_linkstatus_set(eth_dev, &new_link);
}

/**
 * Set device link up.
 */
int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	unsigned int work_done, budget = 32;
	struct sge *s = &adapter->sge;
	int ret;

	/* Flush all link events */
	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

	/* If link already up, nothing to do */
	if (pi->link_cfg.link_ok)
		return 0;

	ret = cxgbe_set_link_status(pi, true);
	if (ret)
		return ret;

	cxgbe_dev_link_update(dev, 1);
	return 0;
}

/**
 * Set device link down.
 */
int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
{
	struct port_info *pi = (struct port_info *)(dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	unsigned int work_done, budget = 32;
	struct sge *s = &adapter->sge;
	int ret;

	/* Flush all link events */
	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);

	/* If link already down, nothing to do */
	if (!pi->link_cfg.link_ok)
		return 0;

	ret = cxgbe_set_link_status(pi, false);
	if (ret)
		return ret;

	cxgbe_dev_link_update(dev, 0);
	return 0;
}

int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct rte_eth_dev_info dev_info;
	int err;
	uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
		return -EINVAL;

	/* set to jumbo mode if needed */
	if (new_mtu > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.offloads |=
			DEV_RX_OFFLOAD_JUMBO_FRAME;
	else
		eth_dev->data->dev_conf.rxmode.offloads &=
			~DEV_RX_OFFLOAD_JUMBO_FRAME;

	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
			    -1, -1, true);
	if (!err)
		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;

	return err;
}

/*
 * Stop device.
 */
void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}

/* Start the device.
 * It returns 0 on success.
 */
int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int err = 0, i;

	CXGBE_FUNC_TRACE();

	/*
	 * If we don't have a connection to the firmware there's nothing we
	 * can do.
	 */
	if (!(adapter->flags & FW_OK)) {
		err = -ENXIO;
		goto out;
	}

	if (!(adapter->flags & FULL_INIT_DONE)) {
		err = cxgbe_up(adapter);
		if (err < 0)
			goto out;
	}

	cxgbe_enable_rx_queues(pi);

	err = setup_rss(pi);
	if (err)
		goto out;

	for (i = 0; i < pi->n_tx_qsets; i++) {
		err = cxgbe_dev_tx_queue_start(eth_dev, i);
		if (err)
			goto out;
	}

	for (i = 0; i < pi->n_rx_qsets; i++) {
		err = cxgbe_dev_rx_queue_start(eth_dev, i);
		if (err)
			goto out;
	}

	err = link_start(pi);
	if (err)
		goto out;

out:
	return err;
}

/*
 * Stop device: disable rx and tx functions to allow for reconfiguring.
 */
void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);
}

int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	uint64_t configured_offloads;
	int err;

	CXGBE_FUNC_TRACE();
	configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;

	/* KEEP_CRC offload flag is not supported by PMD
	 * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
	 */
	if (rte_eth_dev_must_keep_crc(configured_offloads)) {
		dev_info(adapter, "can't disable hw crc strip\n");
		eth_dev->data->dev_conf.rxmode.offloads |=
			DEV_RX_OFFLOAD_CRC_STRIP;
	}

	if (!(adapter->flags & FW_QUEUE_BOUND)) {
		err = setup_sge_fwevtq(adapter);
		if (err)
			return err;
		adapter->flags |= FW_QUEUE_BOUND;
		err = setup_sge_ctrl_txq(adapter);
		if (err)
			return err;
	}

	err = cfg_queue_count(eth_dev);
	if (err)
		return err;

	return 0;
}

int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
	int ret;
	struct sge_eth_txq *txq = (struct sge_eth_txq *)
				  (eth_dev->data->tx_queues[tx_queue_id]);

	dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);

	ret = t4_sge_eth_txq_start(txq);
	if (ret == 0)
		eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;

	return ret;
}
Beispiel #4
0
static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
			       struct rte_eth_dev_info *device_info)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;

	static const struct rte_eth_desc_lim cxgbe_desc_lim = {
		.nb_max = CXGBE_MAX_RING_DESC_SIZE,
		.nb_min = CXGBE_MIN_RING_DESC_SIZE,
		.nb_align = 1,
	};

	device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
	device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
	device_info->max_rx_queues = max_queues;
	device_info->max_tx_queues = max_queues;
	device_info->max_mac_addrs = 1;
	/* XXX: For now we support one MAC/port */
	device_info->max_vfs = adapter->params.arch.vfcount;
	device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */

	device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
				       DEV_RX_OFFLOAD_IPV4_CKSUM |
				       DEV_RX_OFFLOAD_UDP_CKSUM |
				       DEV_RX_OFFLOAD_TCP_CKSUM;

	device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
				       DEV_TX_OFFLOAD_IPV4_CKSUM |
				       DEV_TX_OFFLOAD_UDP_CKSUM |
				       DEV_TX_OFFLOAD_TCP_CKSUM |
				       DEV_TX_OFFLOAD_TCP_TSO;

	device_info->reta_size = pi->rss_size;

	device_info->rx_desc_lim = cxgbe_desc_lim;
	device_info->tx_desc_lim = cxgbe_desc_lim;
}

static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      1, -1, 1, -1, false);
}

static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      0, -1, 1, -1, false);
}

static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 1, 1, -1, false);
}

static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;

	/* TODO: address filters ?? */

	t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
		      -1, 0, 1, -1, false);
}

static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
				 __rte_unused int wait_to_complete)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct sge *s = &adapter->sge;
	struct rte_eth_link *old_link = &eth_dev->data->dev_link;
	unsigned int work_done, budget = 4;

	cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
	if (old_link->link_status == pi->link_cfg.link_ok)
		return -1;  /* link not changed */

	eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;

	/* link has changed */
	return 0;
}

static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	struct rte_eth_dev_info dev_info;
	int err;
	uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;

	cxgbe_dev_info_get(eth_dev, &dev_info);

	/* Must accommodate at least ETHER_MIN_MTU */
	if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
		return -EINVAL;

	/* set to jumbo mode if needed */
	if (new_mtu > ETHER_MAX_LEN)
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
	else
		eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;

	err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
			    -1, -1, true);
	if (!err)
		eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;

	return err;
}

static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
				    uint16_t tx_queue_id);
static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
				    uint16_t tx_queue_id);
static void cxgbe_dev_tx_queue_release(void *q);
static void cxgbe_dev_rx_queue_release(void *q);

/*
 * Stop device.
 */
static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
	struct adapter *adapter = pi->adapter;
	int i, dev_down = 0;

	CXGBE_FUNC_TRACE();

	if (!(adapter->flags & FULL_INIT_DONE))
		return;

	cxgbe_down(pi);

	/*
	 *  We clear queues only if both tx and rx path of the port
	 *  have been disabled
	 */
	t4_sge_eth_clear_queues(pi);

	/*  See if all ports are down */
	for_each_port(adapter, i) {
		pi = adap2pinfo(adapter, i);
		/*
		 * Skip first port of the adapter since it will be closed
		 * by DPDK
		 */
		if (i == 0)
			continue;
		dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
	}

	/* If rest of the ports are stopped, then free up resources */
	if (dev_down == (adapter->params.nports - 1))
		cxgbe_close(adapter);
}