static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) { struct ecore_dev *edev = &qdev->edev; struct qed_update_vport_params params = { .vport_id = 0, .accept_any_vlan = action, .update_accept_any_vlan_flg = 1, }; int rc; /* Proceed only if action actually needs to be performed */ if (qdev->accept_any_vlan == action) return; rc = qdev->ops->vport_update(edev, ¶ms); if (rc) { DP_ERR(edev, "Failed to %s accept-any-vlan\n", action ? "enable" : "disable"); } else { DP_INFO(edev, "%s accept-any-vlan\n", action ? "enabled" : "disabled"); qdev->accept_any_vlan = action; } } void qede_config_rx_mode(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; /* TODO: - QED_FILTER_TYPE_UCAST */ enum qed_filter_rx_mode_type accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR; struct qed_filter_params rx_mode; int rc; /* Configure the struct for the Rx mode */ memset(&rx_mode, 0, sizeof(struct qed_filter_params)); rx_mode.type = QED_FILTER_TYPE_RX_MODE; rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE, eth_dev->data->mac_addrs[0].addr_bytes); if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) { accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; } else { rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, eth_dev->data-> mac_addrs[0].addr_bytes); if (rc) { DP_ERR(edev, "Unable to add filter\n"); return; } } /* take care of VLAN mode */ if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) { qede_config_accept_any_vlan(qdev, true); } else if (!qdev->non_configured_vlans) { /* If we dont have non-configured VLANs and promisc * is not set, then check if we need to disable * accept_any_vlan mode. * Because in this case, accept_any_vlan mode is set * as part of IFF_RPOMISC flag handling. */ qede_config_accept_any_vlan(qdev, false); } rx_mode.filter.accept_flags = accept_flags; rc = qdev->ops->filter_config(edev, &rx_mode); if (rc) DP_ERR(edev, "Filter config failed rc=%d\n", rc); } static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) { struct qed_update_vport_params vport_update_params; struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); int rc; memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = 0; vport_update_params.update_inner_vlan_removal_flg = 1; vport_update_params.inner_vlan_removal_flg = set_stripping; rc = qdev->ops->vport_update(edev, &vport_update_params); if (rc) { DP_ERR(edev, "Update V-PORT failed %d\n", rc); return rc; } return 0; } static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); if (mask & ETH_VLAN_STRIP_MASK) { if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) (void)qede_vlan_stripping(eth_dev, 1); else (void)qede_vlan_stripping(eth_dev, 0); } DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n", mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip); } static int qede_set_ucast_rx_vlan(struct qede_dev *qdev, enum qed_filter_xcast_params_type opcode, uint16_t vid) { struct qed_filter_params filter_cmd; struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); memset(&filter_cmd, 0, sizeof(filter_cmd)); filter_cmd.type = QED_FILTER_TYPE_UCAST; filter_cmd.filter.ucast.type = opcode; filter_cmd.filter.ucast.vlan_valid = 1; filter_cmd.filter.ucast.vlan = vid; return qdev->ops->filter_config(edev, &filter_cmd); } static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qed_dev_eth_info *dev_info = &qdev->dev_info; int rc; if (vlan_id != 0 && qdev->configured_vlans == dev_info->num_vlan_filters) { DP_NOTICE(edev, false, "Reached max VLAN filter limit" " enabling accept_any_vlan\n"); qede_config_accept_any_vlan(qdev, true); return 0; } if (on) { rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD, vlan_id); if (rc) DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, rc); else if (vlan_id != 0) qdev->configured_vlans++; } else { rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL, vlan_id); if (rc) DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", vlan_id, rc); else if (vlan_id != 0) qdev->configured_vlans--; } DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n", vlan_id, on, rc, qdev->configured_vlans); return rc; } static int qede_dev_configure(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(edev); if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) { DP_NOTICE(edev, false, "Unequal number of rx/tx queues " "is not supported RX=%u TX=%u\n", eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues); return -EINVAL; } qdev->num_rss = eth_dev->data->nb_rx_queues; /* Initial state */ qdev->state = QEDE_CLOSE; /* Sanity checks and throw warnings */ if (rxmode->enable_scatter == 1) { DP_ERR(edev, "RX scatter packets is not supported\n"); return -EINVAL; } if (rxmode->enable_lro == 1) { DP_INFO(edev, "LRO is not supported\n"); return -EINVAL; } if (!rxmode->hw_strip_crc) DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); if (!rxmode->hw_ip_checksum) DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " "in hw\n"); DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", QEDE_RSS_CNT(qdev), qdev->num_tc); DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u" " port %u first_on_engine %d\n", edev->hwfns[0].my_id, edev->hwfns[0].rel_pf_id, edev->hwfns[0].abs_pf_id, edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine); return 0; } /* Info about HW descriptor ring limitations */ static const struct rte_eth_desc_lim qede_rx_desc_lim = { .nb_max = NUM_RX_BDS_MAX, .nb_min = 128, .nb_align = 128 /* lowest common multiple */ }; static const struct rte_eth_desc_lim qede_tx_desc_lim = { .nb_max = NUM_TX_BDS_MAX, .nb_min = 256, .nb_align = 256 }; static void qede_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; PMD_INIT_FUNC_TRACE(edev); dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU + QEDE_ETH_OVERHEAD); dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; dev_info->rx_desc_lim = qede_rx_desc_lim; dev_info->tx_desc_lim = qede_tx_desc_lim; dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev); dev_info->max_tx_queues = dev_info->max_rx_queues; dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs; if (IS_VF(edev)) dev_info->max_vfs = 0; else dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev); dev_info->driver_name = qdev->drv_ver; dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; dev_info->default_txconf = (struct rte_eth_txconf) { .txq_flags = QEDE_TXQ_FLAGS, }; dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM); dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM); dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G; } /* return 0 means link status changed, -1 means not changed */ static int qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; uint16_t link_duplex; struct qed_link_output link; struct rte_eth_link *curr = ð_dev->data->dev_link; memset(&link, 0, sizeof(struct qed_link_output)); qdev->ops->common->get_link(edev, &link); /* Link Speed */ curr->link_speed = link.speed; /* Link Mode */ switch (link.duplex) { case QEDE_DUPLEX_HALF: link_duplex = ETH_LINK_HALF_DUPLEX; break; case QEDE_DUPLEX_FULL: link_duplex = ETH_LINK_FULL_DUPLEX; break; case QEDE_DUPLEX_UNKNOWN: default: link_duplex = -1; } curr->link_duplex = link_duplex; /* Link Status */ curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; /* AN */ curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? ETH_LINK_AUTONEG : ETH_LINK_FIXED; DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", curr->link_speed, curr->link_duplex, curr->link_autoneg, curr->link_status); /* return 0 means link status changed, -1 means not changed */ return ((curr->link_status == link.link_up) ? -1 : 0); } static void qede_rx_mode_setting(struct rte_eth_dev *eth_dev, enum qed_filter_rx_mode_type accept_flags) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct qed_filter_params rx_mode; DP_INFO(edev, "%s mode %u\n", __func__, accept_flags); memset(&rx_mode, 0, sizeof(struct qed_filter_params)); rx_mode.type = QED_FILTER_TYPE_RX_MODE; rx_mode.filter.accept_flags = accept_flags; qdev->ops->filter_config(edev, &rx_mode); } static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; PMD_INIT_FUNC_TRACE(edev); enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; qede_rx_mode_setting(eth_dev, type); } static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; PMD_INIT_FUNC_TRACE(edev); if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); else qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); } static void qede_dev_close(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; PMD_INIT_FUNC_TRACE(edev); /* dev_stop() shall cleanup fp resources in hw but without releasing * dma memories and sw structures so that dev_start() can be called * by the app without reconfiguration. However, in dev_close() we * can release all the resources and device can be brought up newly */ if (qdev->state != QEDE_STOP) qede_dev_stop(eth_dev); else DP_INFO(edev, "Device is already stopped\n"); qede_free_mem_load(qdev); qede_free_fp_arrays(qdev); qede_dev_set_link_state(eth_dev, false); qdev->ops->common->slowpath_stop(edev); qdev->ops->common->remove(edev); rte_intr_disable(ð_dev->pci_dev->intr_handle); rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); qdev->state = QEDE_CLOSE; } static void qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct ecore_eth_stats stats; qdev->ops->get_vport_stats(edev, &stats); /* RX Stats */ eth_stats->ipackets = stats.rx_ucast_pkts + stats.rx_mcast_pkts + stats.rx_bcast_pkts; eth_stats->ibytes = stats.rx_ucast_bytes + stats.rx_mcast_bytes + stats.rx_bcast_bytes; eth_stats->ierrors = stats.rx_crc_errors + stats.rx_align_errors + stats.rx_carrier_errors + stats.rx_oversize_packets + stats.rx_jabbers + stats.rx_undersize_packets; eth_stats->rx_nombuf = stats.no_buff_discards; eth_stats->imissed = stats.mftag_filter_discards + stats.mac_filter_discards + stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; /* TX stats */ eth_stats->opackets = stats.tx_ucast_pkts + stats.tx_mcast_pkts + stats.tx_bcast_pkts; eth_stats->obytes = stats.tx_ucast_bytes + stats.tx_mcast_bytes + stats.tx_bcast_bytes; eth_stats->oerrors = stats.tx_err_drop_pkts; DP_INFO(edev, "no_buff_discards=%" PRIu64 "" " mac_filter_discards=%" PRIu64 "" " brb_truncates=%" PRIu64 "" " brb_discards=%" PRIu64 "\n", stats.no_buff_discards, stats.mac_filter_discards, stats.brb_truncates, stats.brb_discards); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qed_link_params link_params; int rc; DP_INFO(edev, "setting link state %d\n", link_up); memset(&link_params, 0, sizeof(link_params)); link_params.link_up = link_up; rc = qdev->ops->common->set_link(edev, &link_params); if (rc != ECORE_SUCCESS) DP_ERR(edev, "Unable to set link state %d\n", link_up); return rc; } static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) { return qede_dev_set_link_state(eth_dev, true); } static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) { return qede_dev_set_link_state(eth_dev, false); } static void qede_reset_stats(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); } static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) { enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) type |= QED_FILTER_RX_MODE_TYPE_PROMISC; qede_rx_mode_setting(eth_dev, type); } static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) { if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC); else qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); } static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qed_link_output current_link; struct qed_link_params params; memset(¤t_link, 0, sizeof(current_link)); qdev->ops->common->get_link(edev, ¤t_link); memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; if (fc_conf->autoneg) { if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { DP_ERR(edev, "Autoneg not supported\n"); return -EINVAL; } params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; } /* Pause is assumed to be supported (SUPPORTED_Pause) */ if (fc_conf->mode == RTE_FC_FULL) params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | QED_LINK_PAUSE_RX_ENABLE); if (fc_conf->mode == RTE_FC_TX_PAUSE) params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; if (fc_conf->mode == RTE_FC_RX_PAUSE) params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; params.link_up = true; (void)qdev->ops->common->set_link(edev, ¶ms); return 0; } static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qed_link_output current_link; memset(¤t_link, 0, sizeof(current_link)); qdev->ops->common->get_link(edev, ¤t_link); if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) fc_conf->autoneg = true; if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | QED_LINK_PAUSE_TX_ENABLE)) fc_conf->mode = RTE_FC_FULL; else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) fc_conf->mode = RTE_FC_RX_PAUSE; else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) fc_conf->mode = RTE_FC_TX_PAUSE; else fc_conf->mode = RTE_FC_NONE; return 0; } static const uint32_t * qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) { static const uint32_t ptypes[] = { RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV6, RTE_PTYPE_UNKNOWN }; if (eth_dev->rx_pkt_burst == qede_recv_pkts) return ptypes; return NULL; } static const struct eth_dev_ops qede_eth_dev_ops = { .dev_configure = qede_dev_configure, .dev_infos_get = qede_dev_info_get, .rx_queue_setup = qede_rx_queue_setup, .rx_queue_release = qede_rx_queue_release, .tx_queue_setup = qede_tx_queue_setup, .tx_queue_release = qede_tx_queue_release, .dev_start = qede_dev_start, .dev_set_link_up = qede_dev_set_link_up, .dev_set_link_down = qede_dev_set_link_down, .link_update = qede_link_update, .promiscuous_enable = qede_promiscuous_enable, .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, .mac_addr_add = qede_mac_addr_add, .mac_addr_remove = qede_mac_addr_remove, .mac_addr_set = qede_mac_addr_set, .vlan_offload_set = qede_vlan_offload_set, .vlan_filter_set = qede_vlan_filter_set, .flow_ctrl_set = qede_flow_ctrl_set, .flow_ctrl_get = qede_flow_ctrl_get, .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, }; static const struct eth_dev_ops qede_eth_vf_dev_ops = { .dev_configure = qede_dev_configure, .dev_infos_get = qede_dev_info_get, .rx_queue_setup = qede_rx_queue_setup, .rx_queue_release = qede_rx_queue_release, .tx_queue_setup = qede_tx_queue_setup, .tx_queue_release = qede_tx_queue_release, .dev_start = qede_dev_start, .dev_set_link_up = qede_dev_set_link_up, .dev_set_link_down = qede_dev_set_link_down, .link_update = qede_link_update, .promiscuous_enable = qede_promiscuous_enable, .promiscuous_disable = qede_promiscuous_disable, .allmulticast_enable = qede_allmulticast_enable, .allmulticast_disable = qede_allmulticast_disable, .dev_stop = qede_dev_stop, .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, .vlan_offload_set = qede_vlan_offload_set, .vlan_filter_set = qede_vlan_filter_set, .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, }; static void qede_update_pf_params(struct ecore_dev *edev) { struct ecore_pf_params pf_params; /* 32 rx + 32 tx */ memset(&pf_params, 0, sizeof(struct ecore_pf_params)); pf_params.eth_pf_params.num_cons = 64; qed_ops->common->update_pf_params(edev, &pf_params); } static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) { struct rte_pci_device *pci_dev; struct rte_pci_addr pci_addr; struct qede_dev *adapter; struct ecore_dev *edev; struct qed_dev_eth_info dev_info; struct qed_slowpath_params params; uint32_t qed_ver; static bool do_once = true; uint8_t bulletin_change; uint8_t vf_mac[ETHER_ADDR_LEN]; uint8_t is_mac_forced; bool is_mac_exist; /* Fix up ecore debug level */ uint32_t dp_module = ~0 & ~ECORE_MSG_HW; uint8_t dp_level = ECORE_LEVEL_VERBOSE; uint32_t max_mac_addrs; int rc; /* Extract key data structures */ adapter = eth_dev->data->dev_private; edev = &adapter->edev; pci_addr = eth_dev->pci_dev->addr; PMD_INIT_FUNC_TRACE(edev); snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", pci_addr.bus, pci_addr.devid, pci_addr.function, eth_dev->data->port_id); eth_dev->rx_pkt_burst = qede_recv_pkts; eth_dev->tx_pkt_burst = qede_xmit_pkts; if (rte_eal_process_type() != RTE_PROC_PRIMARY) { DP_NOTICE(edev, false, "Skipping device init from secondary process\n"); return 0; } pci_dev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pci_dev); qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); qed_ops = qed_get_eth_ops(); if (!qed_ops) { DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); return -EINVAL; } DP_INFO(edev, "Starting qede probe\n"); rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, dp_module, dp_level, is_vf); if (rc != 0) { DP_ERR(edev, "qede probe failed rc %d\n", rc); return -ENODEV; } qede_update_pf_params(edev); rte_intr_callback_register(ð_dev->pci_dev->intr_handle, qede_interrupt_handler, (void *)eth_dev); if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) { DP_ERR(edev, "rte_intr_enable() failed\n"); return -ENODEV; } /* Start the Slowpath-process */ memset(¶ms, 0, sizeof(struct qed_slowpath_params)); params.int_mode = ECORE_INT_MODE_MSIX; params.drv_major = QEDE_MAJOR_VERSION; params.drv_minor = QEDE_MINOR_VERSION; params.drv_rev = QEDE_REVISION_VERSION; params.drv_eng = QEDE_ENGINEERING_VERSION; strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(edev, ¶ms); if (rc) { DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); return -ENODEV; } rc = qed_ops->fill_dev_info(edev, &dev_info); if (rc) { DP_ERR(edev, "Cannot get device_info rc %d\n", rc); qed_ops->common->slowpath_stop(edev); qed_ops->common->remove(edev); return -ENODEV; } qede_alloc_etherdev(adapter, &dev_info); adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION); if (!is_vf) adapter->dev_info.num_mac_addrs = (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), ECORE_MAC); else ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), &adapter->dev_info.num_mac_addrs); /* Allocate memory for storing MAC addr */ eth_dev->data->mac_addrs = rte_zmalloc(edev->name, (ETHER_ADDR_LEN * adapter->dev_info.num_mac_addrs), RTE_CACHE_LINE_SIZE); if (eth_dev->data->mac_addrs == NULL) { DP_ERR(edev, "Failed to allocate MAC address\n"); qed_ops->common->slowpath_stop(edev); qed_ops->common->remove(edev); return -ENOMEM; } if (!is_vf) { ether_addr_copy((struct ether_addr *)edev->hwfns[0]. hw_info.hw_mac_addr, ð_dev->data->mac_addrs[0]); ether_addr_copy(ð_dev->data->mac_addrs[0], &adapter->primary_mac); } else { ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), &bulletin_change); if (bulletin_change) { is_mac_exist = ecore_vf_bulletin_get_forced_mac( ECORE_LEADING_HWFN(edev), vf_mac, &is_mac_forced); if (is_mac_exist && is_mac_forced) { DP_INFO(edev, "VF macaddr received from PF\n"); ether_addr_copy((struct ether_addr *)&vf_mac, ð_dev->data->mac_addrs[0]); ether_addr_copy(ð_dev->data->mac_addrs[0], &adapter->primary_mac); } else { DP_NOTICE(edev, false, "No VF macaddr assigned\n"); } } } eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; if (do_once) { qede_print_adapter_info(adapter); do_once = false; } DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", adapter->primary_mac.addr_bytes[0], adapter->primary_mac.addr_bytes[1], adapter->primary_mac.addr_bytes[2], adapter->primary_mac.addr_bytes[3], adapter->primary_mac.addr_bytes[4], adapter->primary_mac.addr_bytes[5]); return rc; } static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) { return qede_common_dev_init(eth_dev, 1); } static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) { return qede_common_dev_init(eth_dev, 0); } static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) { /* only uninitialize in the primary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; /* safe to close dev here */ qede_dev_close(eth_dev); eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; if (eth_dev->data->mac_addrs) rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; return 0; } static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) { return qede_dev_common_uninit(eth_dev); } static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) { return qede_dev_common_uninit(eth_dev); } static struct rte_pci_id pci_id_qedevf_map[] = { #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) { QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF) }, { QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV) }, {.vendor_id = 0,} }; static struct rte_pci_id pci_id_qede_map[] = { #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) { QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E) }, { QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S) }, { QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40) }, { QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25) }, {.vendor_id = 0,} }; static struct eth_driver rte_qedevf_pmd = { .pci_drv = { .name = "rte_qedevf_pmd", .id_table = pci_id_qedevf_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, }, .eth_dev_init = qedevf_eth_dev_init, .eth_dev_uninit = qedevf_eth_dev_uninit, .dev_private_size = sizeof(struct qede_dev), }; static struct eth_driver rte_qede_pmd = { .pci_drv = { .name = "rte_qede_pmd", .id_table = pci_id_qede_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, }, .eth_dev_init = qede_eth_dev_init, .eth_dev_uninit = qede_eth_dev_uninit, .dev_private_size = sizeof(struct qede_dev), }; static int rte_qedevf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { rte_eth_driver_register(&rte_qedevf_pmd); return 0; } static int rte_qede_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { rte_eth_driver_register(&rte_qede_pmd); return 0; } static struct rte_driver rte_qedevf_driver = { .type = PMD_PDEV, .init = rte_qede_pmd_init }; static struct rte_driver rte_qede_driver = { .type = PMD_PDEV, .init = rte_qedevf_pmd_init }; PMD_REGISTER_DRIVER(rte_qede_driver); PMD_REGISTER_DRIVER(rte_qedevf_driver);
void qed_int_sp_dpc(unsigned long hwfn_cookie) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie; struct qed_pi_info *pi_info = NULL; struct qed_sb_attn_info *sb_attn; struct qed_sb_info *sb_info; int arr_size; u16 rc = 0; if (!p_hwfn) { DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n"); return; } if (!p_hwfn->p_sp_sb) { DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); return; } sb_info = &p_hwfn->p_sp_sb->sb_info; arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); if (!sb_info) { DP_ERR(p_hwfn->cdev, "Status block is NULL - cannot ack interrupts\n"); return; } if (!p_hwfn->p_sb_attn) { DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); return; } sb_attn = p_hwfn->p_sb_attn; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", p_hwfn, p_hwfn->my_id); /* Disable ack for def status block. Required both for msix + * inta in non-mask mode, in inta does no harm. */ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); /* Gather Interrupts/Attentions information */ if (!sb_info->sb_virt) { DP_ERR( p_hwfn->cdev, "Interrupt Status block is NULL - cannot check for new interrupts!\n"); } else { u32 tmp_index = sb_info->sb_ack; rc = qed_sb_update_sb_idx(sb_info); DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, "Interrupt indices: 0x%08x --> 0x%08x\n", tmp_index, sb_info->sb_ack); } if (!sb_attn || !sb_attn->sb_attn) { DP_ERR( p_hwfn->cdev, "Attentions Status block is NULL - cannot check for new attentions!\n"); } else { u16 tmp_index = sb_attn->index; rc |= qed_attn_update_idx(p_hwfn, sb_attn); DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, "Attention indices: 0x%08x --> 0x%08x\n", tmp_index, sb_attn->index); } /* Check if we expect interrupts at this time. if not just ack them */ if (!(rc & QED_SB_EVENT_MASK)) { qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return; } /* Check the validity of the DPC ptt. If not ack interrupts and fail */ if (!p_hwfn->p_dpc_ptt) { DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); return; } if (rc & QED_SB_ATT_IDX) qed_int_attentions(p_hwfn); if (rc & QED_SB_IDX) { int pi; /* Look for a free index */ for (pi = 0; pi < arr_size; pi++) { pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; if (pi_info->comp_cb) pi_info->comp_cb(p_hwfn, pi_info->cookie); } } if (sb_attn && (rc & QED_SB_ATT_IDX)) /* This should be done before the interrupts are enabled, * since otherwise a new attention will be generated. */ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); }
static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, u32 *o_mcp_param) { u8 delay = CHIP_MCP_RESP_ITER_US; u32 seq, cnt = 1, actual_mb_seq; int rc = 0; /* Get actual driver mailbox sequence */ actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK; /* Use MCP history register to check if MCP reset occurred between * init time and now. */ if (p_hwfn->mcp_info->mcp_hist != qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n"); qed_load_mcp_offsets(p_hwfn, p_ptt); qed_mcp_cmd_port_init(p_hwfn, p_ptt); } seq = ++p_hwfn->mcp_info->drv_mb_seq; /* Set drv param */ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); /* Set drv command along with the updated sequence */ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); DP_VERBOSE(p_hwfn, QED_MSG_SP, "wrote command (%x) to MFW MB param 0x%08x\n", (cmd | seq), param); do { /* Wait for MFW response */ udelay(delay); *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); /* Give the FW up to 5 second (500*10ms) */ } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < QED_DRV_MB_MAX_RETRIES)); DP_VERBOSE(p_hwfn, QED_MSG_SP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", cnt * delay, *o_mcp_resp, seq); /* Is this a reply to our command? */ if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { *o_mcp_resp &= FW_MSG_CODE_MASK; /* Get the MCP param */ *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); } else { /* FW BUG! */ DP_ERR(p_hwfn, "MFW failed to respond!\n"); *o_mcp_resp = 0; rc = -EAGAIN; } return rc; }
static int qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params) { struct ecore_sp_vport_update_params sp_params; struct ecore_rss_params sp_rss_params; int rc, i; memset(&sp_params, 0, sizeof(sp_params)); memset(&sp_rss_params, 0, sizeof(sp_rss_params)); /* Translate protocol params into sp params */ sp_params.vport_id = params->vport_id; sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; sp_params.update_inner_vlan_removal_flg = params->update_inner_vlan_removal_flg; sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; sp_params.update_tx_switching_flg = params->update_tx_switching_flg; sp_params.tx_switching_flg = params->tx_switching_flg; sp_params.accept_any_vlan = params->accept_any_vlan; sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. * We need to re-fix the rss values per engine for CMT. */ if (edev->num_hwfns > 1 && params->update_rss_flg) { struct qed_update_vport_rss_params *rss = ¶ms->rss_params; int k, max = 0; /* Find largest entry, since it's possible RSS needs to * be disabled [in case only 1 queue per-hwfn] */ for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++) max = (max > rss->rss_ind_table[k]) ? max : rss->rss_ind_table[k]; /* Either fix RSS values or disable RSS */ if (edev->num_hwfns < max + 1) { int divisor = (max + edev->num_hwfns - 1) / edev->num_hwfns; DP_VERBOSE(edev, ECORE_MSG_SPQ, "CMT - fixing RSS values (modulo %02x)\n", divisor); for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++) rss->rss_ind_table[k] = rss->rss_ind_table[k] % divisor; } else { DP_VERBOSE(edev, ECORE_MSG_SPQ, "CMT - 1 queue per-hwfn; Disabling RSS\n"); params->update_rss_flg = 0; } } /* Now, update the RSS configuration for actual configuration */ if (params->update_rss_flg) { sp_rss_params.update_rss_config = 1; sp_rss_params.rss_enable = 1; sp_rss_params.update_rss_capabilities = 1; sp_rss_params.update_rss_ind_table = 1; sp_rss_params.update_rss_key = 1; sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ rte_memcpy(sp_rss_params.rss_ind_table, params->rss_params.rss_ind_table, ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t)); rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, ECORE_RSS_KEY_SIZE * sizeof(uint32_t)); } sp_params.rss_params = &sp_rss_params; for_each_hwfn(edev, i) { struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; rc = ecore_sp_vport_update(p_hwfn, &sp_params, ECORE_SPQ_MODE_EBLOCK, NULL); if (rc) { DP_ERR(edev, "Failed to update VPORT\n"); return rc; } DP_VERBOSE(edev, ECORE_MSG_SPQ, "Updated V-PORT %d: active_flag %d [update %d]\n", params->vport_id, params->vport_active_flg, params->update_vport_active_flg); } return 0; }