void counter_firewall_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) { struct counter_t *counter = (struct counter_t *) arg; poll_counter(counter); if (nb_rx != 0) { uint64_t start_c = rte_get_tsc_cycles(), diff_c; // check table and send packet // check if <drop_at> votes are to drop the packet // if yes: drop it! // else send it struct indextable_entry *entry; struct rte_mbuf *ok_pkt; struct metadata_t *meta; struct ether_hdr *eth; for (unsigned i = 0; i < nb_rx; ++i) { struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *); if (!is_same_ether_addr(&counter->fw_port_mac, ð->d_addr)) { RTE_LOG(INFO, COUNTER, "Wrong d_MAC... "FORMAT_MAC"\n", ARG_V_MAC(eth->d_addr)); continue; } entry = indextable_get(counter->indextable, buffer[i]); if (entry != NULL) { ok_pkt = entry->packet; meta = &entry->meta; meta->decissions |= 1 << counter->chain_index; int decission_count = count_decissions(meta->decissions); counter->pkts_received_fw += nb_rx; if (decission_count >= counter->drop_at) { fwd_to_wrapper(counter, ok_pkt, meta); } else { rte_pktmbuf_free(ok_pkt); counter->pkts_dropped++; } indextable_delete(counter->indextable, entry); counter->nb_mbuf--; } else { RTE_LOG(WARNING, COUNTER, "Received unregistered packet.\n"); // print_packet_hex(buffer[i]); } } diff_c = rte_get_tsc_cycles() - start_c; counter->cTime += diff_c;//* 1000.0 / rte_get_tsc_hz(); }
void counter_register_pkt(void *arg, struct rte_mbuf **buffer, int nb_rx) { if (nb_rx == 0) return; struct counter_t *counter = (struct counter_t *) arg; uint64_t start_a = rte_get_tsc_cycles(), diff_a; if (nb_rx > rte_ring_free_count(counter->ring)) { RTE_LOG(ERR, COUNTER, "Not enough free entries in ring!\n"); } // enqueue packet in ring // this methode must be thread safe struct rte_mbuf *bulk[nb_rx]; unsigned nb_registered = 0; for (unsigned i = 0; i < nb_rx; ++i) { struct ether_hdr *eth = rte_pktmbuf_mtod(buffer[i], struct ether_hdr *); if (!is_same_ether_addr(&counter->rx_register->mac, ð->d_addr)) { continue; } bulk[nb_registered] = rte_pktmbuf_clone(buffer[i], counter->clone_pool); if (bulk[nb_registered] == NULL) { RTE_LOG(ERR, COUNTER, "Could not clone mbuf!\n"); continue; } nb_registered += 1; } int n = rte_ring_enqueue_burst(counter->ring,(void * const*) &bulk, nb_registered); if (n < nb_rx) { RTE_LOG(ERR, COUNTER, "Could not enqueue every new packtes for registration! " "(%"PRIu32"/%"PRIu32") free: %"PRIu32"\n", n, nb_rx, rte_ring_free_count(counter->ring)); } diff_a = rte_get_tsc_cycles() - start_a; counter->aTime += diff_a;//* 1000.0 / rte_get_tsc_hz(); counter->nb_measurements_a += nb_rx; }
/* * This function learns the MAC address of the device and set init * L2 header and L3 header info. */ int vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m) { int i, ret; struct ether_hdr *pkt_hdr; struct virtio_net *dev = vdev->dev; uint64_t portid = dev->device_fh; struct ipv4_hdr *ip; struct rte_eth_tunnel_filter_conf tunnel_filter_conf; if (unlikely(portid > VXLAN_N_PORTS)) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: Not configuring device," "as already have %d ports for VXLAN.", dev->device_fh, VXLAN_N_PORTS); return -1; } /* Learn MAC address of guest device from packet */ pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing" " MAC address and has not been registered.\n", dev->device_fh); return -1; } for (i = 0; i < ETHER_ADDR_LEN; i++) { vdev->mac_address.addr_bytes[i] = vxdev.port[portid].vport_mac.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; vxdev.port[portid].peer_mac.addr_bytes[i] = peer_mac[i]; } memset(&tunnel_filter_conf, 0, sizeof(struct rte_eth_tunnel_filter_conf)); ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); tunnel_filter_conf.filter_type = tep_filter_type[filter_idx]; /* inner MAC */ ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); tunnel_filter_conf.queue_id = vdev->rx_q; tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q]; if (tep_filter_type[filter_idx] == RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID) tunnel_filter_conf.inner_vlan = INNER_VLAN_ID; tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN; ret = rte_eth_dev_filter_ctrl(ports[0], RTE_ETH_FILTER_TUNNEL, RTE_ETH_FILTER_ADD, &tunnel_filter_conf); if (ret) { RTE_LOG(ERR, VHOST_DATA, "%d Failed to add device MAC address to cloud filter\n", vdev->rx_q); return -1; } /* Print out inner MAC and VNI info. */ RTE_LOG(INFO, VHOST_DATA, "(%d) MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VNI %d registered\n", vdev->rx_q, vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], tenant_id_conf[vdev->rx_q]); vxdev.port[portid].vport_id = portid; for (i = 0; i < 4; i++) { /* Local VTEP IP */ vxdev.port_ip |= vxlan_multicast_ips[portid][i] << (8 * i); /* Remote VTEP IP */ vxdev.port[portid].peer_ip |= vxlan_overlay_ips[portid][i] << (8 * i); } vxdev.out_key = tenant_id_conf[vdev->rx_q]; ether_addr_copy(&vxdev.port[portid].peer_mac, &app_l2_hdr[portid].d_addr); ether_addr_copy(&ports_eth_addr[0], &app_l2_hdr[portid].s_addr); app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); ip = &app_ip_hdr[portid]; ip->version_ihl = IP_VHL_DEF; ip->type_of_service = 0; ip->total_length = 0; ip->packet_id = 0; ip->fragment_offset = IP_DN_FRAGMENT_FLAG; ip->time_to_live = IP_DEFTTL; ip->next_proto_id = IPPROTO_UDP; ip->hdr_checksum = 0; ip->src_addr = vxdev.port_ip; ip->dst_addr = vxdev.port[portid].peer_ip; /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; return 0; }
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *device_info) { struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ device_info->max_rx_queues = enic->conf_rq_count / 2; device_info->max_tx_queues = enic->conf_wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; /* "Max" mtu is not a typo. HW receives packet sizes up to the * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is * a hint to the driver to size receive buffers accordingly so that * larger-than-vnic-mtu packets get truncated.. For DPDK, we let * the user decide the buffer size via rxmode.max_rx_pkt_len, basically * ignoring vNIC mtu. */ device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS; device_info->rx_offload_capa = enic->rx_offload_capa; device_info->tx_offload_capa = enic->tx_offload_capa; device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; device_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH }; device_info->reta_size = enic->reta_size; device_info->hash_key_size = enic->hash_key_size; device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; device_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = enic->config.rq_desc_count, .nb_min = ENIC_MIN_RQ_DESCS, .nb_align = ENIC_ALIGN_DESCS, }; device_info->tx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = enic->config.wq_desc_count, .nb_min = ENIC_MIN_WQ_DESCS, .nb_align = ENIC_ALIGN_DESCS, .nb_seg_max = ENIC_TX_XMIT_MAX, .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC, }; device_info->default_rxportconf = (struct rte_eth_dev_portconf) { .burst_size = ENIC_DEFAULT_RX_BURST, .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max, ENIC_DEFAULT_RX_RING_SIZE), .nb_queues = ENIC_DEFAULT_RX_RINGS, }; device_info->default_txportconf = (struct rte_eth_dev_portconf) { .burst_size = ENIC_DEFAULT_TX_BURST, .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max, ENIC_DEFAULT_TX_RING_SIZE), .nb_queues = ENIC_DEFAULT_TX_RINGS, }; } static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN, RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_NONFRAG, RTE_PTYPE_UNKNOWN }; static const uint32_t ptypes_overlay[] = { RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN, RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_NONFRAG, RTE_PTYPE_TUNNEL_GRENAT, RTE_PTYPE_INNER_L2_ETHER, RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_INNER_L4_TCP, RTE_PTYPE_INNER_L4_UDP, RTE_PTYPE_INNER_L4_FRAG, RTE_PTYPE_INNER_L4_NONFRAG, RTE_PTYPE_UNKNOWN }; if (dev->rx_pkt_burst != enic_dummy_recv_pkts && dev->rx_pkt_burst != NULL) { struct enic *enic = pmd_priv(dev); if (enic->overlay_offload) return ptypes_overlay; else return ptypes; } return NULL; } static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return; ENICPMD_FUNC_TRACE(); enic->promisc = 1; enic_add_packet_filter(enic); } static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return; ENICPMD_FUNC_TRACE(); enic->promisc = 0; enic_add_packet_filter(enic); } static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return; ENICPMD_FUNC_TRACE(); enic->allmulti = 1; enic_add_packet_filter(enic); } static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return; ENICPMD_FUNC_TRACE(); enic->allmulti = 0; enic_add_packet_filter(enic); } static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, __rte_unused uint32_t index, __rte_unused uint32_t pool) { struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -E_RTE_SECONDARY; ENICPMD_FUNC_TRACE(); return enic_set_mac_address(enic, mac_addr->addr_bytes); } static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) { struct enic *enic = pmd_priv(eth_dev); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return; ENICPMD_FUNC_TRACE(); if (enic_del_mac_address(enic, index)) dev_err(enic, "del mac addr failed\n"); } static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, struct ether_addr *addr) { struct enic *enic = pmd_priv(eth_dev); int ret; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -E_RTE_SECONDARY; ENICPMD_FUNC_TRACE(); ret = enic_del_mac_address(enic, 0); if (ret) return ret; return enic_set_mac_address(enic, addr->addr_bytes); } static void debug_log_add_del_addr(struct ether_addr *addr, bool add) { char mac_str[ETHER_ADDR_FMT_SIZE]; ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr); PMD_INIT_LOG(DEBUG, " %s address %s\n", add ? "add" : "remove", mac_str); } static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct enic *enic = pmd_priv(eth_dev); char mac_str[ETHER_ADDR_FMT_SIZE]; struct ether_addr *addr; uint32_t i, j; int ret; ENICPMD_FUNC_TRACE(); /* Validate the given addresses first */ for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) { addr = &mc_addr_set[i]; if (!is_multicast_ether_addr(addr) || is_broadcast_ether_addr(addr)) { ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr); PMD_INIT_LOG(ERR, " invalid multicast address %s\n", mac_str); return -EINVAL; } } /* Flush all if requested */ if (nb_mc_addr == 0 || mc_addr_set == NULL) { PMD_INIT_LOG(DEBUG, " flush multicast addresses\n"); for (i = 0; i < enic->mc_count; i++) { addr = &enic->mc_addrs[i]; debug_log_add_del_addr(addr, false); ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); if (ret) return ret; } enic->mc_count = 0; return 0; } if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) { PMD_INIT_LOG(ERR, " too many multicast addresses: max=%d\n", ENIC_MULTICAST_PERFECT_FILTERS); return -ENOSPC; } /* * devcmd is slow, so apply the difference instead of flushing and * adding everything. * 1. Delete addresses on the NIC but not on the host */ for (i = 0; i < enic->mc_count; i++) { addr = &enic->mc_addrs[i]; for (j = 0; j < nb_mc_addr; j++) { if (is_same_ether_addr(addr, &mc_addr_set[j])) break; } if (j < nb_mc_addr) continue; debug_log_add_del_addr(addr, false); ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); if (ret) return ret; } /* 2. Add addresses on the host but not on the NIC */ for (i = 0; i < nb_mc_addr; i++) { addr = &mc_addr_set[i]; for (j = 0; j < enic->mc_count; j++) { if (is_same_ether_addr(addr, &enic->mc_addrs[j])) break; } if (j < enic->mc_count) continue; debug_log_add_del_addr(addr, true); ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes); if (ret) return ret; } /* Keep a copy so we can flush/apply later on.. */ memcpy(enic->mc_addrs, mc_addr_set, nb_mc_addr * sizeof(struct ether_addr)); enic->mc_count = nb_mc_addr; return 0; } static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); return enic_set_mtu(enic, mtu); } static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { struct enic *enic = pmd_priv(dev); uint16_t i, idx, shift; ENICPMD_FUNC_TRACE(); if (reta_size != ENIC_RSS_RETA_SIZE) { dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", reta_size, ENIC_RSS_RETA_SIZE); return -EINVAL; } for (i = 0; i < reta_size; i++) { idx = i / RTE_RETA_GROUP_SIZE; shift = i % RTE_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( enic->rss_cpu.cpu[i / 4].b[i % 4]); } return 0; } static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { struct enic *enic = pmd_priv(dev); union vnic_rss_cpu rss_cpu; uint16_t i, idx, shift; ENICPMD_FUNC_TRACE(); if (reta_size != ENIC_RSS_RETA_SIZE) { dev_err(enic, "reta_update: wrong reta_size. given=%u" " expected=%u\n", reta_size, ENIC_RSS_RETA_SIZE); return -EINVAL; } /* * Start with the current reta and modify it per reta_conf, as we * need to push the entire reta even if we only modify one entry. */ rss_cpu = enic->rss_cpu; for (i = 0; i < reta_size; i++) { idx = i / RTE_RETA_GROUP_SIZE; shift = i % RTE_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) rss_cpu.cpu[i / 4].b[i % 4] = enic_rte_rq_idx_to_sop_idx( reta_conf[idx].reta[shift]); } return enic_set_rss_reta(enic, &rss_cpu); } static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct enic *enic = pmd_priv(dev); ENICPMD_FUNC_TRACE(); return enic_set_rss_conf(enic, rss_conf); } static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct enic *enic = pmd_priv(dev); ENICPMD_FUNC_TRACE(); if (rss_conf == NULL) return -EINVAL; if (rss_conf->rss_key != NULL && rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" " expected=%u+\n", rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); return -EINVAL; } rss_conf->rss_hf = enic->rss_hf; if (rss_conf->rss_key != NULL) { int i; for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { rss_conf->rss_key[i] = enic->rss_key.key[i / 10].b[i % 10]; } rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; } return 0; } static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo) { struct enic *enic = pmd_priv(dev); struct vnic_rq *rq_sop; struct vnic_rq *rq_data; struct rte_eth_rxconf *conf; uint16_t sop_queue_idx; uint16_t data_queue_idx; ENICPMD_FUNC_TRACE(); sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id); rq_sop = &enic->rq[sop_queue_idx]; rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ qinfo->mp = rq_sop->mp; qinfo->scattered_rx = rq_sop->data_queue_enable; qinfo->nb_desc = rq_sop->ring.desc_count; if (qinfo->scattered_rx) qinfo->nb_desc += rq_data->ring.desc_count; conf = &qinfo->conf; memset(conf, 0, sizeof(*conf)); conf->rx_free_thresh = rq_sop->rx_free_thresh; conf->rx_drop_en = 1; /* * Except VLAN stripping (port setting), all the checksum offloads * are always enabled. */ conf->offloads = enic->rx_offload_capa; if (!enic->ig_vlan_strip_en) conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; /* rx_thresh and other fields are not applicable for enic */ } static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, struct rte_eth_txq_info *qinfo) { struct enic *enic = pmd_priv(dev); struct vnic_wq *wq = &enic->wq[tx_queue_id]; ENICPMD_FUNC_TRACE(); qinfo->nb_desc = wq->ring.desc_count; memset(&qinfo->conf, 0, sizeof(qinfo->conf)); qinfo->conf.offloads = wq->offloads; /* tx_thresh, and all the other fields are not applicable for enic */ } static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); return 0; } static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); return 0; } static int udp_tunnel_common_check(struct enic *enic, struct rte_eth_udp_tunnel *tnl) { if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) return -ENOTSUP; if (!enic->overlay_offload) { PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not " "supported\n"); return -ENOTSUP; } return 0; } static int update_vxlan_port(struct enic *enic, uint16_t port) { if (vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, port)) { PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n"); return -EINVAL; } PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port); enic->vxlan_port = port; return 0; } static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *tnl) { struct enic *enic = pmd_priv(eth_dev); int ret; ENICPMD_FUNC_TRACE(); ret = udp_tunnel_common_check(enic, tnl); if (ret) return ret; /* * The NIC has 1 configurable VXLAN port number. "Adding" a new port * number replaces it. */ if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) { PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n", tnl->udp_port); return -EINVAL; } return update_vxlan_port(enic, tnl->udp_port); } static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, struct rte_eth_udp_tunnel *tnl) { struct enic *enic = pmd_priv(eth_dev); int ret; ENICPMD_FUNC_TRACE(); ret = udp_tunnel_common_check(enic, tnl); if (ret) return ret; /* * Clear the previously set port number and restore the * hardware default port number. Some drivers disable VXLAN * offloads when there are no configured port numbers. But * enic does not do that as VXLAN is part of overlay offload, * which is tied to inner RSS and TSO. */ if (tnl->udp_port != enic->vxlan_port) { PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n", tnl->udp_port); return -EINVAL; } return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT); } static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, size_t fw_size) { struct vnic_devcmd_fw_info *info; struct enic *enic; int ret; ENICPMD_FUNC_TRACE(); if (fw_version == NULL || fw_size <= 0) return -EINVAL; enic = pmd_priv(eth_dev); ret = vnic_dev_fw_info(enic->vdev, &info); if (ret) return ret; snprintf(fw_version, fw_size, "%s %s", info->fw_version, info->fw_build); fw_version[fw_size - 1] = '\0'; return 0; } static const struct eth_dev_ops enicpmd_eth_dev_ops = { .dev_configure = enicpmd_dev_configure, .dev_start = enicpmd_dev_start, .dev_stop = enicpmd_dev_stop, .dev_set_link_up = NULL, .dev_set_link_down = NULL, .dev_close = enicpmd_dev_close, .promiscuous_enable = enicpmd_dev_promiscuous_enable, .promiscuous_disable = enicpmd_dev_promiscuous_disable, .allmulticast_enable = enicpmd_dev_allmulticast_enable, .allmulticast_disable = enicpmd_dev_allmulticast_disable, .link_update = enicpmd_dev_link_update, .stats_get = enicpmd_dev_stats_get, .stats_reset = enicpmd_dev_stats_reset, .queue_stats_mapping_set = NULL, .dev_infos_get = enicpmd_dev_info_get, .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, .mtu_set = enicpmd_mtu_set, .vlan_filter_set = NULL, .vlan_tpid_set = NULL, .vlan_offload_set = enicpmd_vlan_offload_set, .vlan_strip_queue_set = NULL, .rx_queue_start = enicpmd_dev_rx_queue_start, .rx_queue_stop = enicpmd_dev_rx_queue_stop, .tx_queue_start = enicpmd_dev_tx_queue_start, .tx_queue_stop = enicpmd_dev_tx_queue_stop, .rx_queue_setup = enicpmd_dev_rx_queue_setup, .rx_queue_release = enicpmd_dev_rx_queue_release, .rx_queue_count = enicpmd_dev_rx_queue_count, .rx_descriptor_done = NULL, .tx_queue_setup = enicpmd_dev_tx_queue_setup, .tx_queue_release = enicpmd_dev_tx_queue_release, .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, .rxq_info_get = enicpmd_dev_rxq_info_get, .txq_info_get = enicpmd_dev_txq_info_get, .dev_led_on = NULL, .dev_led_off = NULL, .flow_ctrl_get = NULL, .flow_ctrl_set = NULL, .priority_flow_ctrl_set = NULL, .mac_addr_add = enicpmd_add_mac_addr, .mac_addr_remove = enicpmd_remove_mac_addr, .mac_addr_set = enicpmd_set_mac_addr, .set_mc_addr_list = enicpmd_set_mc_addr_list, .filter_ctrl = enicpmd_dev_filter_ctrl, .reta_query = enicpmd_dev_rss_reta_query, .reta_update = enicpmd_dev_rss_reta_update, .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, .rss_hash_update = enicpmd_dev_rss_hash_update, .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add, .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del, .fw_version_get = enicpmd_dev_fw_version_get, }; static int enic_parse_zero_one(const char *key, const char *value, void *opaque) { struct enic *enic; bool b; enic = (struct enic *)opaque; if (strcmp(value, "0") == 0) { b = false; } else if (strcmp(value, "1") == 0) { b = true; } else { dev_err(enic, "Invalid value for %s" ": expected=0|1 given=%s\n", key, value); return -EINVAL; } if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0) enic->disable_overlay = b; if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0) enic->enable_avx2_rx = b; return 0; } static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, const char *value, void *opaque) { struct enic *enic; enic = (struct enic *)opaque; if (strcmp(value, "trunk") == 0) { /* Trunk mode: always tag */ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; } else if (strcmp(value, "untag") == 0) { /* Untag default VLAN mode: untag if VLAN = default VLAN */ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; } else if (strcmp(value, "priority") == 0) { /* * Priority-tag default VLAN mode: priority tag (VLAN header * with ID=0) if VLAN = default */ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; } else if (strcmp(value, "pass") == 0) { /* Pass through mode: do not touch tags */ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; } else { dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE ": expected=trunk|untag|priority|pass given=%s\n", value); return -EINVAL; } return 0; } static int enic_check_devargs(struct rte_eth_dev *dev) { static const char *const valid_keys[] = { ENIC_DEVARG_DISABLE_OVERLAY, ENIC_DEVARG_ENABLE_AVX2_RX, ENIC_DEVARG_IG_VLAN_REWRITE, NULL}; struct enic *enic = pmd_priv(dev); struct rte_kvargs *kvlist; ENICPMD_FUNC_TRACE(); enic->disable_overlay = false; enic->enable_avx2_rx = false; enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; if (!dev->device->devargs) return 0; kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); if (!kvlist) return -EINVAL; if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, enic_parse_zero_one, enic) < 0 || rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX, enic_parse_zero_one, enic) < 0 || rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, enic_parse_ig_vlan_rewrite, enic) < 0) { rte_kvargs_free(kvlist); return -EINVAL; } rte_kvargs_free(kvlist); return 0; } /* Initialize the driver * It returns 0 on success. */ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pdev; struct rte_pci_addr *addr; struct enic *enic = pmd_priv(eth_dev); int err; ENICPMD_FUNC_TRACE(); enic->port_id = eth_dev->data->port_id; enic->rte_dev = eth_dev; eth_dev->dev_ops = &enicpmd_eth_dev_ops; eth_dev->rx_pkt_burst = &enic_recv_pkts; eth_dev->tx_pkt_burst = &enic_xmit_pkts; eth_dev->tx_pkt_prepare = &enic_prep_pkts; /* Let rte_eth_dev_close() release the port resources */ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; pdev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pdev); enic->pdev = pdev; addr = &pdev->addr; snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", addr->domain, addr->bus, addr->devid, addr->function); err = enic_check_devargs(eth_dev); if (err) return err; return enic_probe(enic); } static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic), eth_enicpmd_dev_init); } static int eth_enic_pci_remove(struct rte_pci_device *pci_dev) { return rte_eth_dev_pci_generic_remove(pci_dev, NULL); } static struct rte_pci_driver rte_enic_pmd = { .id_table = pci_id_enic_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_enic_pci_probe, .remove = eth_enic_pci_remove, }; RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_enic, ENIC_DEVARG_DISABLE_OVERLAY "=0|1 " ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 " ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
/** * Convert Ethernet item to EFX filter specification. * * @param item[in] * Item specification. Only source and destination addresses and * Ethernet type fields are supported. In addition to full and * empty masks of destination address, individual/group mask is * also supported. If the mask is NULL, default mask will be used. * Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error * Perform verbose error reporting if not NULL. */ static int sfc_flow_parse_eth(const struct rte_flow_item *item, efx_filter_spec_t *efx_spec, struct rte_flow_error *error) { int rc; const struct rte_flow_item_eth *spec = NULL; const struct rte_flow_item_eth *mask = NULL; const struct rte_flow_item_eth supp_mask = { .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .type = 0xffff, }; const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; rc = sfc_flow_parse_init(item, (const void **)&spec, (const void **)&mask, &supp_mask, &rte_flow_item_eth_mask, sizeof(struct rte_flow_item_eth), error); if (rc != 0) return rc; /* If "spec" is not set, could be any Ethernet */ if (spec == NULL) return 0; if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes, EFX_MAC_ADDR_LEN); } else if (memcmp(mask->dst.addr_bytes, ig_mask, EFX_MAC_ADDR_LEN) == 0) { if (is_unicast_ether_addr(&spec->dst)) efx_spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; else efx_spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; } else if (!is_zero_ether_addr(&mask->dst)) { goto fail_bad_mask; } if (is_same_ether_addr(&mask->src, &supp_mask.src)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC; rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes, EFX_MAC_ADDR_LEN); } else if (!is_zero_ether_addr(&mask->src)) { goto fail_bad_mask; } /* * Ether type is in big-endian byte order in item and * in little-endian in efx_spec, so byte swap is used */ if (mask->type == supp_mask.type) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; efx_spec->efs_ether_type = rte_bswap16(spec->type); } else if (mask->type != 0) { goto fail_bad_mask; } return 0; fail_bad_mask: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Bad mask in the ETH pattern item"); return -rte_errno; } /** * Convert VLAN item to EFX filter specification. * * @param item[in] * Item specification. Only VID field is supported. * The mask can not be NULL. Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error * Perform verbose error reporting if not NULL. */ static int sfc_flow_parse_vlan(const struct rte_flow_item *item, efx_filter_spec_t *efx_spec, struct rte_flow_error *error) { int rc; uint16_t vid; const struct rte_flow_item_vlan *spec = NULL; const struct rte_flow_item_vlan *mask = NULL; const struct rte_flow_item_vlan supp_mask = { .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX), }; rc = sfc_flow_parse_init(item, (const void **)&spec, (const void **)&mask, &supp_mask, NULL, sizeof(struct rte_flow_item_vlan), error); if (rc != 0) return rc; /* * VID is in big-endian byte order in item and * in little-endian in efx_spec, so byte swap is used. * If two VLAN items are included, the first matches * the outer tag and the next matches the inner tag. */ if (mask->tci == supp_mask.tci) { /* Apply mask to keep VID only */ vid = rte_bswap16(spec->tci & mask->tci); if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_OUTER_VID)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID; efx_spec->efs_outer_vid = vid; } else if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_INNER_VID)) { efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID; efx_spec->efs_inner_vid = vid; } else { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "More than two VLAN items"); return -rte_errno; } } else { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "VLAN ID in TCI match is required"); return -rte_errno; } return 0; }
static void firewall_replay(const unsigned char *pkts[], int pkts_nb, int *pkts_size) { struct pg_brick *gen_west, *gen_east; struct pg_brick *fw; struct pg_brick *col_west, *col_east; struct pg_error *error = NULL; uint16_t i, packet_count; struct rte_mbuf *packet; struct ether_hdr *eth; uint64_t filtered_pkts_mask; struct rte_mbuf **filtered_pkts; struct ether_addr tmp_addr; int ret; /* have some collectors and generators on each sides * [collector]--[generator>]--[firewall]--[<generator]--[collector] * 10.0.2.15 173.194.40.111 * 8:0:27:b6:5:16 52:54:0:12:35:2 */ gen_west = pg_packetsgen_new("gen_west", 1, 1, EAST_SIDE, &packet, 1, &error); g_assert(!error); gen_east = pg_packetsgen_new("gen_east", 1, 1, WEST_SIDE, &packet, 1, &error); g_assert(!error); fw = pg_firewall_new("fw", 1, 1, PG_NONE, &error); g_assert(!error); col_west = pg_collect_new("col_west", 1, 1, &error); g_assert(!error); col_east = pg_collect_new("col_east", 1, 1, &error); g_assert(!error); pg_brick_link(col_west, gen_west, &error); g_assert(!error); pg_brick_link(gen_west, fw, &error); g_assert(!error); pg_brick_link(fw, gen_east, &error); g_assert(!error); pg_brick_link(gen_east, col_east, &error); g_assert(!error); /* open all traffic of 10.0.2.15 from the west side of the firewall * returning traffic should be allowed due to STATEFUL option */ ret = pg_firewall_rule_add(fw, "src host 10.0.2.15", WEST_SIDE, 1, &error); g_assert(!error); g_assert(ret == 0); ret = pg_firewall_reload(fw, &error); g_assert(!error); g_assert(ret < 0); /* replay traffic */ for (i = 0; i < pkts_nb; i++) { struct ip *ip; packet = build_packet(pkts[i], pkts_size[i]); eth = rte_pktmbuf_mtod(packet, struct ether_hdr*); ip = (struct ip *)(eth + 1); if (ip->ip_src.s_addr == inet_addr("10.0.2.15")) { pg_brick_poll(gen_west, &packet_count, &error); g_assert(!error); g_assert(packet_count == 1); filtered_pkts = pg_brick_west_burst_get(col_east, &filtered_pkts_mask, &error); g_assert(!error); g_assert(pg_mask_count(filtered_pkts_mask) == 1); /* check eth source address */ eth = rte_pktmbuf_mtod(filtered_pkts[0], struct ether_hdr*); pg_scan_ether_addr(&tmp_addr, "08:00:27:b6:05:16"); g_assert(is_same_ether_addr(ð->s_addr, &tmp_addr)); /* check ip source address */ ip = (struct ip *)(eth + 1); g_assert(ip->ip_src.s_addr == inet_addr("10.0.2.15")); } else if (ip->ip_src.s_addr == inet_addr("173.194.40.111")) {
/* check ip source address */ ip = (struct ip *)(eth + 1); g_assert(ip->ip_src.s_addr == inet_addr("10.0.2.15")); } else if (ip->ip_src.s_addr == inet_addr("173.194.40.111")) { pg_brick_poll(gen_east, &packet_count, &error); g_assert(!error); g_assert(packet_count == 1); filtered_pkts = pg_brick_east_burst_get(col_west, &filtered_pkts_mask, &error); g_assert(!error); g_assert(pg_mask_count(filtered_pkts_mask) == 1); /* check eth source address */ eth = rte_pktmbuf_mtod(filtered_pkts[0], struct ether_hdr*); pg_scan_ether_addr(&tmp_addr, "52:54:00:12:35:02"); g_assert(is_same_ether_addr(ð->s_addr, &tmp_addr)); /* check ip source address */ ip = (struct ip *)(eth + 1); g_assert(ip->ip_src.s_addr == inet_addr("173.194.40.111")); } else g_assert(0); rte_pktmbuf_free(packet); /* ensure that connexion is tracked even when reloading */ ret = pg_firewall_rule_add(fw, "src host 6.6.6.6", WEST_SIDE, 0, &error); g_assert(!error); g_assert(ret == 0); ret = pg_firewall_reload(fw, &error); g_assert(!error);
static void avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); memset(dev_info, 0, sizeof(*dev_info)); dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs; dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs; dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN; dev_info->max_rx_pktlen = AVF_FRAME_SIZE_MAX; dev_info->hash_key_size = vf->vf_res->rss_key_size; dev_info->reta_size = vf->vf_res->rss_lut_size; dev_info->flow_type_rss_offloads = AVF_RSS_OFFLOAD_ALL; dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH, .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS, }; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = AVF_MAX_RING_DESC, .nb_min = AVF_MIN_RING_DESC, .nb_align = AVF_ALIGN_RING_DESC, }; dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = AVF_MAX_RING_DESC, .nb_min = AVF_MIN_RING_DESC, .nb_align = AVF_ALIGN_RING_DESC, }; } static const uint32_t * avf_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { RTE_PTYPE_L2_ETHER, RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_ICMP, RTE_PTYPE_L4_NONFRAG, RTE_PTYPE_L4_SCTP, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_UNKNOWN }; return ptypes; } int avf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) { struct rte_eth_link new_link; struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); /* Only read status info stored in VF, and the info is updated * when receive LINK_CHANGE evnet from PF by Virtchnnl. */ switch (vf->link_speed) { case VIRTCHNL_LINK_SPEED_100MB: new_link.link_speed = ETH_SPEED_NUM_100M; break; case VIRTCHNL_LINK_SPEED_1GB: new_link.link_speed = ETH_SPEED_NUM_1G; break; case VIRTCHNL_LINK_SPEED_10GB: new_link.link_speed = ETH_SPEED_NUM_10G; break; case VIRTCHNL_LINK_SPEED_20GB: new_link.link_speed = ETH_SPEED_NUM_20G; break; case VIRTCHNL_LINK_SPEED_25GB: new_link.link_speed = ETH_SPEED_NUM_25G; break; case VIRTCHNL_LINK_SPEED_40GB: new_link.link_speed = ETH_SPEED_NUM_40G; break; default: new_link.link_speed = ETH_SPEED_NUM_NONE; break; } new_link.link_duplex = ETH_LINK_FULL_DUPLEX; new_link.link_status = vf->link_up ? ETH_LINK_UP : ETH_LINK_DOWN; new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link, *(uint64_t *)&dev->data->dev_link, *(uint64_t *)&new_link) == 0) return -1; return 0; } static void avf_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); int ret; if (vf->promisc_unicast_enabled) return; ret = avf_config_promisc(adapter, TRUE, vf->promisc_multicast_enabled); if (!ret) vf->promisc_unicast_enabled = TRUE; } static void avf_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); int ret; if (!vf->promisc_unicast_enabled) return; ret = avf_config_promisc(adapter, FALSE, vf->promisc_multicast_enabled); if (!ret) vf->promisc_unicast_enabled = FALSE; } static void avf_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); int ret; if (vf->promisc_multicast_enabled) return; ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, TRUE); if (!ret) vf->promisc_multicast_enabled = TRUE; } static void avf_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); int ret; if (!vf->promisc_multicast_enabled) return; ret = avf_config_promisc(adapter, vf->promisc_unicast_enabled, FALSE); if (!ret) vf->promisc_multicast_enabled = FALSE; } static int avf_dev_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr, __rte_unused uint32_t index, __rte_unused uint32_t pool) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); int err; if (is_zero_ether_addr(addr)) { PMD_DRV_LOG(ERR, "Invalid Ethernet Address"); return -EINVAL; } err = avf_add_del_eth_addr(adapter, addr, TRUE); if (err) { PMD_DRV_LOG(ERR, "fail to add MAC address"); return -EIO; } vf->mac_num++; return 0; } static void avf_dev_del_mac_addr(struct rte_eth_dev *dev, uint32_t index) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); struct ether_addr *addr; int err; addr = &dev->data->mac_addrs[index]; err = avf_add_del_eth_addr(adapter, addr, FALSE); if (err) PMD_DRV_LOG(ERR, "fail to delete MAC address"); vf->mac_num--; } static int avf_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); int err; if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) return -ENOTSUP; err = avf_add_del_vlan(adapter, vlan_id, on); if (err) return -EIO; return 0; } static int avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; int err; if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) return -ENOTSUP; /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ if (dev_conf->rxmode.hw_vlan_strip) err = avf_enable_vlan_strip(adapter); else err = avf_disable_vlan_strip(adapter); if (err) return -EIO; } return 0; } static int avf_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); uint8_t *lut; uint16_t i, idx, shift; int ret; if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; if (reta_size != vf->vf_res->rss_lut_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number of hardware can " "support (%d)", reta_size, vf->vf_res->rss_lut_size); return -EINVAL; } lut = rte_zmalloc("rss_lut", reta_size, 0); if (!lut) { PMD_DRV_LOG(ERR, "No memory can be allocated"); return -ENOMEM; } /* store the old lut table temporarily */ rte_memcpy(lut, vf->rss_lut, reta_size); for (i = 0; i < reta_size; i++) { idx = i / RTE_RETA_GROUP_SIZE; shift = i % RTE_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) lut[i] = reta_conf[idx].reta[shift]; } rte_memcpy(vf->rss_lut, lut, reta_size); /* send virtchnnl ops to configure rss*/ ret = avf_configure_rss_lut(adapter); if (ret) /* revert back */ rte_memcpy(vf->rss_lut, lut, reta_size); rte_free(lut); return ret; } static int avf_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); uint16_t i, idx, shift; if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; if (reta_size != vf->vf_res->rss_lut_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number of hardware can " "support (%d)", reta_size, vf->vf_res->rss_lut_size); return -EINVAL; } for (i = 0; i < reta_size; i++) { idx = i / RTE_RETA_GROUP_SIZE; shift = i % RTE_RETA_GROUP_SIZE; if (reta_conf[idx].mask & (1ULL << shift)) reta_conf[idx].reta[shift] = vf->rss_lut[i]; } return 0; } static int avf_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; /* HENA setting, it is enabled by default, no change */ if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { PMD_DRV_LOG(DEBUG, "No key to be configured"); return 0; } else if (rss_conf->rss_key_len != vf->vf_res->rss_key_size) { PMD_DRV_LOG(ERR, "The size of hash key configured " "(%d) doesn't match the size of hardware can " "support (%d)", rss_conf->rss_key_len, vf->vf_res->rss_key_size); return -EINVAL; } rte_memcpy(vf->rss_key, rss_conf->rss_key, rss_conf->rss_key_len); return avf_configure_rss_key(adapter); } static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) return -ENOTSUP; /* Just set it to default value now. */ rss_conf->rss_hf = AVF_RSS_OFFLOAD_ALL; if (!rss_conf->rss_key) return 0; rss_conf->rss_key_len = vf->vf_res->rss_key_size; rte_memcpy(rss_conf->rss_key, vf->rss_key, rss_conf->rss_key_len); return 0; } static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); uint32_t frame_size = mtu + AVF_ETH_OVERHEAD; int ret = 0; if (mtu < ETHER_MIN_MTU || frame_size > AVF_FRAME_SIZE_MAX) return -EINVAL; /* mtu setting is forbidden if port is start */ if (dev->data->dev_started) { PMD_DRV_LOG(ERR, "port must be stopped before configuration"); return -EBUSY; } if (frame_size > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; return ret; } static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); struct ether_addr *perm_addr, *old_addr; int ret; old_addr = (struct ether_addr *)hw->mac.addr; perm_addr = (struct ether_addr *)hw->mac.perm_addr; if (is_same_ether_addr(mac_addr, old_addr)) return; /* If the MAC address is configured by host, skip the setting */ if (is_valid_assigned_ether_addr(perm_addr)) return; ret = avf_add_del_eth_addr(adapter, old_addr, FALSE); if (ret) PMD_DRV_LOG(ERR, "Fail to delete old MAC:" " %02X:%02X:%02X:%02X:%02X:%02X", old_addr->addr_bytes[0], old_addr->addr_bytes[1], old_addr->addr_bytes[2], old_addr->addr_bytes[3], old_addr->addr_bytes[4], old_addr->addr_bytes[5]); ret = avf_add_del_eth_addr(adapter, mac_addr, TRUE); if (ret) PMD_DRV_LOG(ERR, "Fail to add new MAC:" " %02X:%02X:%02X:%02X:%02X:%02X", mac_addr->addr_bytes[0], mac_addr->addr_bytes[1], mac_addr->addr_bytes[2], mac_addr->addr_bytes[3], mac_addr->addr_bytes[4], mac_addr->addr_bytes[5]); ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); } static int avf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct virtchnl_eth_stats *pstats = NULL; int ret; ret = avf_query_stats(adapter, &pstats); if (ret == 0) { stats->ipackets = pstats->rx_unicast + pstats->rx_multicast + pstats->rx_broadcast; stats->opackets = pstats->tx_broadcast + pstats->tx_multicast + pstats->tx_unicast; stats->imissed = pstats->rx_discards; stats->oerrors = pstats->tx_errors + pstats->tx_discards; stats->ibytes = pstats->rx_bytes; stats->obytes = pstats->tx_bytes; } else { PMD_DRV_LOG(ERR, "Get statistics failed"); } return -EIO; } static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); uint16_t msix_intr; msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; if (msix_intr == AVF_MISC_VEC_ID) { PMD_DRV_LOG(INFO, "MISC is also enabled for control"); AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK | AVFINT_DYN_CTL01_ITR_INDX_MASK); } else { AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START), AVFINT_DYN_CTLN1_INTENA_MASK | AVFINT_DYN_CTLN1_ITR_INDX_MASK); } AVF_WRITE_FLUSH(hw); rte_intr_enable(&pci_dev->intr_handle); return 0; } static int avf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t msix_intr; msix_intr = pci_dev->intr_handle.intr_vec[queue_id]; if (msix_intr == AVF_MISC_VEC_ID) { PMD_DRV_LOG(ERR, "MISC is used for control, cannot disable it"); return -EIO; } AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(msix_intr - AVF_RX_VEC_START), 0); AVF_WRITE_FLUSH(hw); return 0; } static int avf_check_vf_reset_done(struct avf_hw *hw) { int i, reset; for (i = 0; i < AVF_RESET_WAIT_CNT; i++) { reset = AVF_READ_REG(hw, AVFGEN_RSTAT) & AVFGEN_RSTAT_VFR_STATE_MASK; reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT; if (reset == VIRTCHNL_VFR_VFACTIVE || reset == VIRTCHNL_VFR_COMPLETED) break; rte_delay_ms(20); } if (i >= AVF_RESET_WAIT_CNT) return -1; return 0; } static int avf_init_vf(struct rte_eth_dev *dev) { int i, err, bufsz; struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); err = avf_set_mac_type(hw); if (err) { PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err); goto err; } err = avf_check_vf_reset_done(hw); if (err) { PMD_INIT_LOG(ERR, "VF is still resetting"); goto err; } avf_init_adminq_parameter(hw); err = avf_init_adminq(hw); if (err) { PMD_INIT_LOG(ERR, "init_adminq failed: %d", err); goto err; } vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0); if (!vf->aq_resp) { PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory"); goto err_aq; } if (avf_check_api_version(adapter) != 0) { PMD_INIT_LOG(ERR, "check_api version failed"); goto err_api; } bufsz = sizeof(struct virtchnl_vf_resource) + (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); if (!vf->vf_res) { PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); goto err_api; } if (avf_get_vf_resource(adapter) != 0) { PMD_INIT_LOG(ERR, "avf_get_vf_config failed"); goto err_alloc; } /* Allocate memort for RSS info */ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { vf->rss_key = rte_zmalloc("rss_key", vf->vf_res->rss_key_size, 0); if (!vf->rss_key) { PMD_INIT_LOG(ERR, "unable to allocate rss_key memory"); goto err_rss; } vf->rss_lut = rte_zmalloc("rss_lut", vf->vf_res->rss_lut_size, 0); if (!vf->rss_lut) { PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory"); goto err_rss; } } return 0; err_rss: rte_free(vf->rss_key); rte_free(vf->rss_lut); err_alloc: rte_free(vf->vf_res); vf->vsi_res = NULL; err_api: rte_free(vf->aq_resp); err_aq: avf_shutdown_adminq(hw); err: return -1; }