static inline int bond_pci_addr_cmp(const struct rte_device *dev, const void *_pci_addr) { struct rte_pci_device *pdev; const struct rte_pci_addr *paddr = _pci_addr; pdev = RTE_DEV_TO_PCI(*(struct rte_device **)(void *)&dev); return rte_eal_compare_pci_addr(&pdev->addr, paddr); }
/* * Initialize driver * It returns 0 on success. */ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = NULL; char name[RTE_ETH_NAME_MAX_LEN]; int err = 0; CXGBE_FUNC_TRACE(); eth_dev->dev_ops = &cxgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &cxgbe_recv_pkts; eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts; /* for secondary processes, we don't initialise any further as primary * has already done this work. */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; pci_dev = RTE_DEV_TO_PCI(eth_dev->device); snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); adapter = rte_zmalloc(name, sizeof(*adapter), 0); if (!adapter) return -1; adapter->use_unpacked_mode = 1; adapter->regs = (void *)pci_dev->mem_resource[0].addr; if (!adapter->regs) { dev_err(adapter, "%s: cannot map device registers\n", __func__); err = -ENOMEM; goto out_free_adapter; } adapter->pdev = pci_dev; adapter->eth_dev = eth_dev; pi->adapter = adapter; err = cxgbe_probe(adapter); if (err) { dev_err(adapter, "%s: cxgbe probe failed with err %d\n", __func__, err); goto out_free_adapter; } return 0; out_free_adapter: rte_free(adapter); return err; }
static int skeleton_eventdev_init(struct rte_eventdev *eventdev) { struct rte_pci_device *pci_dev; struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev); int ret = 0; PMD_DRV_FUNC_TRACE(); eventdev->dev_ops = &skeleton_eventdev_ops; eventdev->schedule = NULL; eventdev->enqueue = skeleton_eventdev_enqueue; eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst; eventdev->dequeue = skeleton_eventdev_dequeue; eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst; /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; pci_dev = RTE_DEV_TO_PCI(eventdev->dev); skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; if (!skel->reg_base) { PMD_DRV_ERR("Failed to map BAR0"); ret = -ENODEV; goto fail; } skel->device_id = pci_dev->id.device_id; skel->vendor_id = pci_dev->id.vendor_id; skel->subsystem_device_id = pci_dev->id.subsystem_device_id; skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u", pci_dev->id.vendor_id, pci_dev->id.device_id, pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function); PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)", eventdev->data->dev_id, eventdev->data->socket_id, skel->vendor_id, skel->device_id); fail: return ret; }
}, { RTE_PCI_DEVICE(0x8086, 0x19e3), }, {.device_id = 0}, }; static int crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv, struct rte_cryptodev *cryptodev) { struct qat_pmd_private *internals; PMD_INIT_FUNC_TRACE(); PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x", RTE_DEV_TO_PCI(cryptodev->device)->addr.bus, RTE_DEV_TO_PCI(cryptodev->device)->addr.devid, RTE_DEV_TO_PCI(cryptodev->device)->addr.function); cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; cryptodev->dev_ops = &crypto_qat_ops; cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst; cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst; cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; internals = cryptodev->data->dev_private;
static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *device_info) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; static const struct rte_eth_desc_lim cxgbe_desc_lim = { .nb_max = CXGBE_MAX_RING_DESC_SIZE, .nb_min = CXGBE_MIN_RING_DESC_SIZE, .nb_align = 1, }; device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; device_info->max_rx_queues = max_queues; device_info->max_tx_queues = max_queues; device_info->max_mac_addrs = 1; /* XXX: For now we support one MAC/port */ device_info->max_vfs = adapter->params.arch.vfcount; device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO; device_info->reta_size = pi->rss_size; device_info->rx_desc_lim = cxgbe_desc_lim; device_info->tx_desc_lim = cxgbe_desc_lim; device_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G; } static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 1, -1, 1, -1, false); } static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, 0, -1, 1, -1, false); } static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, -1, 1, 1, -1, false); } static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, -1, 0, 1, -1, false); } static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct rte_eth_link *old_link = ð_dev->data->dev_link; unsigned int work_done, budget = 4; cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); if (old_link->link_status == pi->link_cfg.link_ok) return -1; /* link not changed */ eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok; eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; eth_dev->data->dev_link.link_speed = pi->link_cfg.speed; /* link has changed */ return 0; } static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; struct rte_eth_dev_info dev_info; int err; uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; cxgbe_dev_info_get(eth_dev, &dev_info); /* Must accommodate at least ETHER_MIN_MTU */ if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen)) return -EINVAL; /* set to jumbo mode if needed */ if (new_mtu > ETHER_MAX_LEN) eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; else eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!err) eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu; return err; } static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id); static void cxgbe_dev_tx_queue_release(void *q); static void cxgbe_dev_rx_queue_release(void *q); /* * Stop device. */ static void cxgbe_dev_close(struct rte_eth_dev *eth_dev) { struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; int i, dev_down = 0; CXGBE_FUNC_TRACE(); if (!(adapter->flags & FULL_INIT_DONE)) return; cxgbe_down(pi); /* * We clear queues only if both tx and rx path of the port * have been disabled */ t4_sge_eth_clear_queues(pi); /* See if all ports are down */ for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); /* * Skip first port of the adapter since it will be closed * by DPDK */ if (i == 0) continue; dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0; } /* If rest of the ports are stopped, then free up resources */ if (dev_down == (adapter->params.nports - 1)) cxgbe_close(adapter); }