static int timed_deletes(unsigned with_hash, unsigned with_data, unsigned table_index) { unsigned i; const uint64_t start_tsc = rte_rdtsc(); int32_t ret; for (i = 0; i < KEYS_TO_ADD; i++) { /* There are no delete functions with data, so just call two functions */ if (with_hash) ret = rte_hash_del_key_with_hash(h[table_index], (const void *) keys[i], signatures[i]); else ret = rte_hash_del_key(h[table_index], (const void *) keys[i]); if (ret >= 0) positions[i] = ret; else { printf("Failed to add key number %u\n", ret); return -1; } } const uint64_t end_tsc = rte_rdtsc(); const uint64_t time_taken = end_tsc - start_tsc; cycles[table_index][DELETE][with_hash][with_data] = time_taken/KEYS_TO_ADD; return 0; }
int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) { int32_t pos; struct enic_fdir_node *key; /* See if the key is in the table */ pos = rte_hash_del_key(enic->fdir.hash, params); switch (pos) { case -EINVAL: case -ENOENT: enic->fdir.stats.f_remove++; return -EINVAL; default: /* The entry is present in the table */ key = enic->fdir.nodes[pos]; /* Delete the filter */ vnic_dev_classifier(enic->vdev, CLSF_DEL, &key->fltr_id, NULL); rte_free(key); enic->fdir.nodes[pos] = NULL; enic->fdir.stats.free++; enic->fdir.stats.remove++; break; } return 0; }
int sflow_socket_delete(sflow_key_t* key, bool is_locked) { sflow_key_dump("delete socket for key", key); if (likely(!is_locked)) rte_rwlock_write_lock(&sflow_hash_lock); int32_t socket_id = rte_hash_del_key(sflow_hash, key); sflow_socket_t* socket = ((int32_t) socket_id) < 0 ? NULL : sflow_sockets[socket_id]; if (likely(!is_locked)) rte_rwlock_write_unlock(&sflow_hash_lock); if (!socket) return -1; je_free(socket); sflow_sockets[socket_id] = NULL; return 0; }
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) { struct enic_fdir_node *key; struct filter_v2 fltr; int32_t pos; u8 do_free = 0; u16 old_fltr_id = 0; u32 flowtype_supported; u16 flex_bytes; u16 queue; struct filter_action_v2 action; memset(&fltr, 0, sizeof(fltr)); memset(&action, 0, sizeof(action)); flowtype_supported = enic->fdir.types_mask & (1 << params->input.flow_type); flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | (params->input.flow_ext.flexbytes[0] & 0xFF)); if (!enic->fdir.hash || (params->input.flow_ext.vlan_tci & 0xFFF) || !flowtype_supported || flex_bytes || params->action.behavior /* drop */) { enic->fdir.stats.f_add++; return -ENOTSUP; } /* Get the enicpmd RQ from the DPDK Rx queue */ queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue); if (!enic->rq[queue].in_use) return -EINVAL; /* See if the key is already there in the table */ pos = rte_hash_del_key(enic->fdir.hash, params); switch (pos) { case -EINVAL: enic->fdir.stats.f_add++; return -EINVAL; case -ENOENT: /* Add a new classifier entry */ if (!enic->fdir.stats.free) { enic->fdir.stats.f_add++; return -ENOSPC; } key = rte_zmalloc("enic_fdir_node", sizeof(struct enic_fdir_node), 0); if (!key) { enic->fdir.stats.f_add++; return -ENOMEM; } break; default: /* The entry is already present in the table. * Check if there is a change in queue */ key = enic->fdir.nodes[pos]; enic->fdir.nodes[pos] = NULL; if (unlikely(key->rq_index == queue)) { /* Nothing to be done */ enic->fdir.stats.f_add++; pos = rte_hash_add_key(enic->fdir.hash, params); if (pos < 0) { dev_err(enic, "Add hash key failed\n"); return pos; } enic->fdir.nodes[pos] = key; dev_warning(enic, "FDIR rule is already present\n"); return 0; } if (likely(enic->fdir.stats.free)) { /* Add the filter and then delete the old one. * This is to avoid packets from going into the * default queue during the window between * delete and add */ do_free = 1; old_fltr_id = key->fltr_id; } else { /* No free slots in the classifier. * Delete the filter and add the modified one later */ vnic_dev_classifier(enic->vdev, CLSF_DEL, &key->fltr_id, NULL, NULL); enic->fdir.stats.free++; } break; } key->filter = *params; key->rq_index = queue; enic->fdir.copy_fltr_fn(&fltr, ¶ms->input, &enic->rte_dev->data->dev_conf.fdir_conf.mask); action.type = FILTER_ACTION_RQ_STEERING; action.rq_idx = queue; if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr, &action)) { key->fltr_id = queue; } else { dev_err(enic, "Add classifier entry failed\n"); enic->fdir.stats.f_add++; rte_free(key); return -1; } if (do_free) vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL, NULL); else{ enic->fdir.stats.free--; enic->fdir.stats.add++; } pos = rte_hash_add_key(enic->fdir.hash, params); if (pos < 0) { enic->fdir.stats.f_add++; dev_err(enic, "Add hash key failed\n"); return pos; } enic->fdir.nodes[pos] = key; return 0; }
int32_t onvm_ft_remove_key(struct onvm_ft *table, struct onvm_ft_ipv4_5tuple *key) { return rte_hash_del_key(table->hash, (const void *)key); }
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) { struct enic_fdir_node *key; struct filter fltr = {0}; int32_t pos; u8 do_free = 0; u16 old_fltr_id = 0; u32 flowtype_supported; u16 flex_bytes; u16 queue; flowtype_supported = ( (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) || (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type)); flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) | (params->input.flow_ext.flexbytes[0] & 0xFF)); if (!enic->fdir.hash || (params->input.flow_ext.vlan_tci & 0xFFF) || !flowtype_supported || flex_bytes || params->action.behavior /* drop */) { enic->fdir.stats.f_add++; return -ENOTSUP; } queue = params->action.rx_queue; /* See if the key is already there in the table */ pos = rte_hash_del_key(enic->fdir.hash, params); switch (pos) { case -EINVAL: enic->fdir.stats.f_add++; return -EINVAL; case -ENOENT: /* Add a new classifier entry */ if (!enic->fdir.stats.free) { enic->fdir.stats.f_add++; return -ENOSPC; } key = rte_zmalloc("enic_fdir_node", sizeof(struct enic_fdir_node), 0); if (!key) { enic->fdir.stats.f_add++; return -ENOMEM; } break; default: /* The entry is already present in the table. * Check if there is a change in queue */ key = enic->fdir.nodes[pos]; enic->fdir.nodes[pos] = NULL; if (unlikely(key->rq_index == queue)) { /* Nothing to be done */ enic->fdir.stats.f_add++; pos = rte_hash_add_key(enic->fdir.hash, params); if (pos < 0) { dev_err(enic, "Add hash key failed\n"); return pos; } enic->fdir.nodes[pos] = key; dev_warning(enic, "FDIR rule is already present\n"); return 0; } if (likely(enic->fdir.stats.free)) { /* Add the filter and then delete the old one. * This is to avoid packets from going into the * default queue during the window between * delete and add */ do_free = 1; old_fltr_id = key->fltr_id; } else { /* No free slots in the classifier. * Delete the filter and add the modified one later */ vnic_dev_classifier(enic->vdev, CLSF_DEL, &key->fltr_id, NULL); enic->fdir.stats.free++; } break; } key->filter = *params; key->rq_index = queue; fltr.type = FILTER_IPV4_5TUPLE; fltr.u.ipv4.src_addr = rte_be_to_cpu_32( params->input.flow.ip4_flow.src_ip); fltr.u.ipv4.dst_addr = rte_be_to_cpu_32( params->input.flow.ip4_flow.dst_ip); fltr.u.ipv4.src_port = rte_be_to_cpu_16( params->input.flow.udp4_flow.src_port); fltr.u.ipv4.dst_port = rte_be_to_cpu_16( params->input.flow.udp4_flow.dst_port); if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) fltr.u.ipv4.protocol = PROTO_TCP; else fltr.u.ipv4.protocol = PROTO_UDP; fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) { key->fltr_id = queue; } else { dev_err(enic, "Add classifier entry failed\n"); enic->fdir.stats.f_add++; rte_free(key); return -1; } if (do_free) vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL); else{ enic->fdir.stats.free--; enic->fdir.stats.add++; } pos = rte_hash_add_key(enic->fdir.hash, params); if (pos < 0) { dev_err(enic, "Add hash key failed\n"); return pos; } enic->fdir.nodes[pos] = key; return 0; }