struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) { struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; int ver; int i; ver = table->node_ver; while (*bucket < table->n_buckets) { i = 0; head = flex_array_get(table->buckets, *bucket); hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { if (i < *last) { i++; continue; } *last = i + 1; return flow; } (*bucket)++; *last = 0; } return NULL; }
/** * Conveniently returns the number of defined subpackets in a given register. * Note that this is different than nr_subpackets, which is the maximum number * of subpackets that could be defined in the descriptor. */ static int rmi_register_subpackets(struct rmi_reg_descriptor *desc, int reg) { struct rmi_register_desc *rdesc; rdesc = flex_array_get(desc->structure, reg); if (rdesc) return bitmap_weight(rdesc->subpackets, rdesc->nr_subpackets); return 0; }
/** * Conveniently returns true if the specified register contains the desired * subpacket. */ static bool rmi_register_has_subpacket(struct rmi_reg_descriptor *desc, int reg, int sp) { struct rmi_register_desc *rdesc; rdesc = flex_array_get(desc->structure, reg); if (rdesc && sp < rdesc->nr_subpackets) return test_bit(sp, rdesc->subpackets); return false; }
/** * Conveniently returns the offset of a given register. */ static int rmi_register_offset(struct rmi_reg_descriptor *desc, int reg) { struct rmi_register_desc *rdesc; rdesc = flex_array_get(desc->structure, reg); if (rdesc) return rdesc->offset; return 0; }
/** * flex_array_get_ptr - pull a ptr back out of the array * @fa: the flex array from which to extract data * @element_nr: index of the element to fetch from the array * * Returns the pointer placed in the flex array at element_nr using * flex_array_put_ptr(). This function should not be called if the * element in question was not set using the _put_ptr() helper. */ void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr) { void **tmp; tmp = flex_array_get(fa, element_nr); if (!tmp) return NULL; return *tmp; }
static void fa_zero(struct flex_array *fa, size_t index, size_t count) { void *elem; while (count--) { elem = flex_array_get(fa, index); memset(elem, 0, fa->element_size); index++; } }
static void fa_copy(struct flex_array *fa, struct flex_array *from, size_t index, size_t count) { void *elem; while (count--) { elem = flex_array_get(from, index); flex_array_put(fa, index, elem, 0); index++; } }
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) { struct task_and_cgroup *tc; if (!tset->tc_array || tset->idx >= tset->tc_array_len) return NULL; tc = flex_array_get(tset->tc_array, tset->idx++); tset->cur_cgrp = tc->cgrp; return tc->task; }
void ovs_flow_tbl_destroy(struct flow_table *table) { int i; if (!table) return; for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); struct hlist_node *node, *n; hlist_for_each_entry_safe(flow, node, n, head, hash_node) { hlist_del_init_rcu(&flow->hash_node); flow_free(flow); } }
static void __table_instance_destroy(struct table_instance *ti) { int i; if (ti->keep_flows) goto skip_flows; for (i = 0; i < ti->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(ti->buckets, i); struct hlist_node *n; int ver = ti->node_ver; hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { hlist_del(&flow->hash_node[ver]); ovs_flow_free(flow, false); } }
void ovs_flow_tbl_destroy(struct flow_table *table) { int i; if (!table) return; if (table->keep_flows) goto skip_flows; for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); struct hlist_node *node, *n; int ver = table->node_ver; hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { hlist_del_rcu(&flow->hash_node[ver]); ovs_flow_free(flow); } }
static void table_instance_destroy(struct table_instance *ti, bool deferred) { int i; if (!ti) return; if (ti->keep_flows) goto skip_flows; for (i = 0; i < ti->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(ti->buckets, i); struct hlist_node *n; int ver = ti->node_ver; hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { hlist_del_rcu(&flow->hash_node[ver]); vxbox_flow_free(flow, deferred); } }
static struct flex_array *alloc_buckets(unsigned int n_buckets) { struct flex_array *buckets; int i, err; buckets = flex_array_alloc(sizeof(struct hlist_head *), n_buckets, GFP_KERNEL); if (!buckets) return NULL; err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); if (err) { flex_array_free(buckets); return NULL; } for (i = 0; i < n_buckets; i++) INIT_HLIST_HEAD((struct hlist_head *) flex_array_get(buckets, i)); return buckets; }
static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { hash = jhash_1word(hash, table->hash_seed); return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); }
static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); }
static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) { hash = jhash_1word(hash, ti->hash_seed); return flex_array_get(ti->buckets, (hash & (ti->n_buckets - 1))); }