/** * DPDK callback to retrieve physical link information. * * @param dev * Pointer to Ethernet device structure. * @param wait_to_complete * Wait for request completion (ignored). */ int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct priv *priv = mlx5_get_priv(dev); int ret; priv_lock(priv); ret = mlx5_link_update_unlocked(dev, wait_to_complete); priv_unlock(priv); return ret; }
/** * DPDK callback to get information about the device. * * @param dev * Pointer to Ethernet device structure. * @param[out] info * Info structure output buffer. */ void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { struct priv *priv = mlx5_get_priv(dev); unsigned int max; char ifname[IF_NAMESIZE]; priv_lock(priv); /* FIXME: we should ask the device for these values. */ info->min_rx_bufsize = 32; info->max_rx_pktlen = 65536; /* * Since we need one CQ per QP, the limit is the minimum number * between the two values. */ max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ? priv->device_attr.max_qp : priv->device_attr.max_cq); /* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */ if (max >= 65535) max = 65535; info->max_rx_queues = max; info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); info->rx_offload_capa = (priv->hw_csum ? (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM) : 0); info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; if (priv->hw_csum) info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM); if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); /* FIXME: RETA update/query API expects the callee to know the size of * the indirection table, for this PMD the size varies depending on * the number of RX queues, it becomes impossible to find the correct * size if it is not fixed. * The API should be updated to solve this problem. */ info->reta_size = priv->ind_table_max_size; info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_56G | ETH_LINK_SPEED_100G; priv_unlock(priv); }
/** * DPDK callback to retrieve physical link information (unlocked version). * * @param dev * Pointer to Ethernet device structure. * @param wait_to_complete * Wait for request completion (ignored). */ static int mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) { struct priv *priv = mlx5_get_priv(dev); struct ethtool_cmd edata = { .cmd = ETHTOOL_GSET }; struct ifreq ifr; struct rte_eth_link dev_link; int link_speed = 0; (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; } memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); ifr.ifr_data = &edata; if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", strerror(errno)); return -1; } link_speed = ethtool_cmd_speed(&edata); if (link_speed == -1) dev_link.link_speed = 0; else dev_link.link_speed = link_speed; dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ? ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) { /* Link status changed. */ dev->data->dev_link = dev_link; return 0; } /* Link status is still the same. */ return -1; }
/** * DPDK callback to close the device. * * Destroy all queues and objects, free memory. * * @param dev * Pointer to Ethernet device structure. */ static void mlx5_dev_close(struct rte_eth_dev *dev) { struct priv *priv = mlx5_get_priv(dev); unsigned int i; priv_lock(priv); DEBUG("%p: closing device \"%s\"", (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); /* In case mlx5_dev_stop() has not been called. */ priv_dev_interrupt_handler_uninstall(priv, dev); priv_special_flow_disable_all(priv); priv_mac_addrs_disable(priv); priv_destroy_hash_rxqs(priv); /* Remove flow director elements. */ priv_fdir_disable(priv); priv_fdir_delete_filters_list(priv); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; if (priv->rxqs != NULL) { /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; struct rxq_ctrl *rxq_ctrl; if (rxq == NULL) continue; rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); (*priv->rxqs)[i] = NULL; rxq_cleanup(rxq_ctrl); rte_free(rxq_ctrl); } priv->rxqs_n = 0; priv->rxqs = NULL; }