int rw_piot_dev_callback_register(rw_piot_api_handle_t api_handle, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg) { rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle); ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev)); if (NULL == rw_piot_dev) { RW_PIOT_LOG(RTE_LOG_ERR, "PIOT Could not find device by handle\n"); return -1; } if (rw_piot_dev->device_group->device_group_type == PIOT_PCI) { return(rte_eth_dev_callback_register(rw_piot_dev->rte_port_id, event, cb_fn, cb_arg)); } return(0); }
static int __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) { struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev; struct bond_dev_private *internals; struct bond_dev_private *temp_internals; struct rte_eth_link link_props; struct rte_eth_dev_info dev_info; int i, j; if (valid_slave_port_id(slave_port_id) != 0) return -1; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; internals = bonded_eth_dev->data->dev_private; /* Verify that new slave device is not already a slave of another * bonded device */ for (i = rte_eth_dev_count()-1; i >= 0; i--) { if (valid_bonded_ethdev(&rte_eth_devices[i]) == 0) { temp_internals = rte_eth_devices[i].data->dev_private; for (j = 0; j < temp_internals->slave_count; j++) { /* Device already a slave of a bonded device */ if (temp_internals->slaves[j].port_id == slave_port_id) { RTE_BOND_LOG(ERR, "Slave port %d is already a slave", slave_port_id); return -1; } } } } slave_eth_dev = &rte_eth_devices[slave_port_id]; /* Add slave details to bonded device */ slave_add(internals, slave_eth_dev); memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(slave_port_id, &dev_info); if (internals->slave_count < 1) { /* if MAC is not user defined then use MAC of first slave add to * bonded device */ if (!internals->user_defined_mac) mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs); /* Inherit eth dev link properties from first slave */ link_properties_set(bonded_eth_dev, &(slave_eth_dev->data->dev_link)); /* Make primary slave */ internals->primary_port = slave_port_id; /* Take the first dev's offload capabilities */ internals->rx_offload_capa = dev_info.rx_offload_capa; internals->tx_offload_capa = dev_info.tx_offload_capa; } else { /* Check slave link properties are supported if props are set, * all slaves must be the same */ if (internals->link_props_set) { if (link_properties_valid(&(bonded_eth_dev->data->dev_link), &(slave_eth_dev->data->dev_link))) { RTE_BOND_LOG(ERR, "Slave port %d link speed/duplex not supported", slave_port_id); return -1; } } else { link_properties_set(bonded_eth_dev, &(slave_eth_dev->data->dev_link)); } internals->rx_offload_capa &= dev_info.rx_offload_capa; internals->tx_offload_capa &= dev_info.tx_offload_capa; } internals->slave_count++; /* Update all slave devices MACs*/ mac_address_slaves_update(bonded_eth_dev); if (bonded_eth_dev->data->dev_started) { if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) { RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d", slave_port_id); return -1; } } /* Register link status change callback with bonded device pointer as * argument*/ rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC, bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id); /* If bonded device is started then we can add the slave to our active * slave array */ if (bonded_eth_dev->data->dev_started) { rte_eth_link_get_nowait(slave_port_id, &link_props); if (link_props.link_status == 1) activate_slave(bonded_eth_dev, slave_port_id); } return 0; }
static int __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) { struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev; struct bond_dev_private *internals; struct rte_eth_link link_props; struct rte_eth_dev_info dev_info; if (valid_slave_port_id(slave_port_id) != 0) return -1; bonded_eth_dev = &rte_eth_devices[bonded_port_id]; internals = bonded_eth_dev->data->dev_private; slave_eth_dev = &rte_eth_devices[slave_port_id]; if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) { RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device"); return -1; } /* Add slave details to bonded device */ slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE; rte_eth_dev_info_get(slave_port_id, &dev_info); if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) { RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small", slave_port_id); return -1; } slave_add(internals, slave_eth_dev); /* We need to store slaves reta_size to be able to synchronize RETA for all * slave devices even if its sizes are different. */ internals->slaves[internals->slave_count].reta_size = dev_info.reta_size; if (internals->slave_count < 1) { /* if MAC is not user defined then use MAC of first slave add to * bonded device */ if (!internals->user_defined_mac) mac_address_set(bonded_eth_dev, slave_eth_dev->data->mac_addrs); /* Inherit eth dev link properties from first slave */ link_properties_set(bonded_eth_dev, &(slave_eth_dev->data->dev_link)); /* Make primary slave */ internals->primary_port = slave_port_id; internals->current_primary_port = slave_port_id; /* Inherit queues settings from first slave */ internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues; internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues; internals->reta_size = dev_info.reta_size; /* Take the first dev's offload capabilities */ internals->rx_offload_capa = dev_info.rx_offload_capa; internals->tx_offload_capa = dev_info.tx_offload_capa; internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads; /* Inherit first slave's max rx packet size */ internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen; } else { internals->rx_offload_capa &= dev_info.rx_offload_capa; internals->tx_offload_capa &= dev_info.tx_offload_capa; internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads; /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be * the power of 2, the lower one is GCD */ if (internals->reta_size > dev_info.reta_size) internals->reta_size = dev_info.reta_size; if (!internals->max_rx_pktlen && dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen) internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen; } bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= internals->flow_type_rss_offloads; internals->slave_count++; /* Update all slave devices MACs*/ mac_address_slaves_update(bonded_eth_dev); if (bonded_eth_dev->data->dev_started) { if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) { slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE); RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d", slave_port_id); return -1; } } /* Register link status change callback with bonded device pointer as * argument*/ rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC, bond_ethdev_lsc_event_callback, &bonded_eth_dev->data->port_id); /* If bonded device is started then we can add the slave to our active * slave array */ if (bonded_eth_dev->data->dev_started) { rte_eth_link_get_nowait(slave_port_id, &link_props); if (link_props.link_status == ETH_LINK_UP) { if (internals->active_slave_count == 0 && !internals->user_defined_primary_port) bond_ethdev_primary_set(internals, slave_port_id); if (find_slave_by_id(internals->active_slaves, internals->active_slave_count, slave_port_id) == internals->active_slave_count) activate_slave(bonded_eth_dev, slave_port_id); } } slave_vlan_filter_set(bonded_port_id, slave_port_id); return 0; }
static int fs_eth_dev_create(struct rte_vdev_device *vdev) { struct rte_eth_dev *dev; struct ether_addr *mac; struct fs_priv *priv; struct sub_device *sdev; const char *params; unsigned int socket_id; uint8_t i; int ret; dev = NULL; priv = NULL; socket_id = rte_socket_id(); INFO("Creating fail-safe device on NUMA socket %u", socket_id); params = rte_vdev_device_args(vdev); if (params == NULL) { ERROR("This PMD requires sub-devices, none provided"); return -1; } dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); if (dev == NULL) { ERROR("Unable to allocate rte_eth_dev"); return -1; } priv = PRIV(dev); priv->dev = dev; dev->dev_ops = &failsafe_ops; dev->data->mac_addrs = &PRIV(dev)->mac_addrs[0]; dev->data->dev_link = eth_link; PRIV(dev)->nb_mac_addr = 1; TAILQ_INIT(&PRIV(dev)->flow_list); dev->rx_pkt_burst = (eth_rx_burst_t)&failsafe_rx_burst; dev->tx_pkt_burst = (eth_tx_burst_t)&failsafe_tx_burst; ret = fs_sub_device_alloc(dev, params); if (ret) { ERROR("Could not allocate sub_devices"); goto free_dev; } ret = failsafe_args_parse(dev, params); if (ret) goto free_subs; ret = rte_eth_dev_owner_new(&priv->my_owner.id); if (ret) { ERROR("Failed to get unique owner identifier"); goto free_args; } snprintf(priv->my_owner.name, sizeof(priv->my_owner.name), FAILSAFE_OWNER_NAME); DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id, priv->my_owner.name, priv->my_owner.id); ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW, failsafe_eth_new_event_callback, dev); if (ret) { ERROR("Failed to register NEW callback"); goto free_args; } ret = failsafe_eal_init(dev); if (ret) goto unregister_new_callback; ret = fs_mutex_init(priv); if (ret) goto unregister_new_callback; ret = failsafe_hotplug_alarm_install(dev); if (ret) { ERROR("Could not set up plug-in event detection"); goto unregister_new_callback; } mac = &dev->data->mac_addrs[0]; if (failsafe_mac_from_arg) { /* * If MAC address was provided as a parameter, * apply to all probed slaves. */ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) { ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac); if (ret) { ERROR("Failed to set default MAC address"); goto cancel_alarm; } } } else {
DpdkPcapResultCode_t deviceInit(int deviceId, char *errbuf) { struct rte_eth_conf portConf; struct rte_eth_rxconf rxConf; struct rte_eth_txconf txConf; int queueId = 0; memset(&portConf, 0, sizeof(portConf)); memset(&rxConf, 0, sizeof(rxConf)); memset(&txConf, 0, sizeof(txConf)); if (initFinished == 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Global DPDK init is not performed yet"); return DPDKPCAP_FAILURE; } if (portInitFinished[deviceId] == 1) { return DPDKPCAP_OK; } portConf.rxmode.split_hdr_size = 0; portConf.rxmode.header_split = 0; portConf.rxmode.hw_ip_checksum = 0; portConf.rxmode.hw_vlan_filter = 0; portConf.rxmode.jumbo_frame = 0; portConf.rxmode.hw_strip_crc = 0; portConf.txmode.mq_mode = ETH_MQ_TX_NONE; if (rte_eth_dev_configure(deviceId, DPDKPCAP_RX_QUEUE_NUMBER, DPDKPCAP_TX_QUEUE_NUMBER, &portConf) < 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not configure the device %d", deviceId); return DPDKPCAP_FAILURE; } rte_eth_dev_callback_register(deviceId, RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL); rxConf.rx_thresh.pthresh = DPDKPCAP_RX_PTHRESH; rxConf.rx_thresh.hthresh = DPDKPCAP_RX_HTHRESH; rxConf.rx_thresh.wthresh = DPDKPCAP_RX_WTHRESH; if (rte_eth_rx_queue_setup(deviceId, queueId, DPDKPCAP_RX_QUEUE_DESC_NUMBER, SOCKET_ID_ANY, &rxConf, rxPool) < 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not setup RX queue of the device %d", deviceId); return DPDKPCAP_FAILURE; } txConf.tx_thresh.pthresh = DPDKPCAP_TX_PTHRESH; txConf.tx_thresh.hthresh = DPDKPCAP_TX_HTHRESH; txConf.tx_thresh.wthresh = DPDKPCAP_TX_WTHRESH; if (rte_eth_tx_queue_setup(deviceId, queueId, DPDKPCAP_TX_QUEUE_DESC_NUMBER, SOCKET_ID_ANY, &txConf) < 0) { snprintf (errbuf, PCAP_ERRBUF_SIZE, "Could not setup TX queue of the device %d", deviceId); return DPDKPCAP_FAILURE; } portInitFinished[deviceId] = 1; return DPDKPCAP_OK; }