static int bridge_table_lock(struct vr_interface *vif, uint8_t *mac) { uint8_t lock = 1; uint32_t hash; unsigned long t1s, t1ns, t2s, t2ns, diff; if (!vif->vif_bridge_table_lock) return -EINVAL; hash = vr_hash(mac, VR_ETHER_ALEN, 0); hash %= vr_num_cpus; vr_get_mono_time(&t1s, &t1ns); while (lock) { lock = __sync_lock_test_and_set(&vif->vif_bridge_table_lock[hash], lock); if (lock) { vr_get_mono_time(&t2s, &t2ns); if (t2ns >= t1ns) { diff = t2ns - t1ns; } else { diff = 999999999 - t1ns + t2ns; } if (diff >= 50000) { return -EINVAL; } } } return hash; }
vr_hentry_t * vr_htable_find_hentry(vr_htable_t htable, void *key, unsigned int key_len) { unsigned int hash, tmp_hash, ind, i, ent_key_len; vr_hentry_t *ent, *o_ent; vr_hentry_key ent_key; struct vr_htable *table = (struct vr_htable *)htable; if (!table || !key) return NULL; if (!key_len) { key_len = table->ht_key_size; if (!key_len) return NULL; } ent = NULL; hash = vr_hash(key, key_len, 0); /* Look into the hash table from hash*/ tmp_hash = hash % table->ht_hentries; tmp_hash &= ~(table->ht_bucket_size - 1); for (i = 0; i < table->ht_bucket_size; i++) { ind = tmp_hash + i; ent = vr_btable_get(table->ht_htable, ind); if (!(ent->hentry_flags & VR_HENTRY_FLAG_VALID)) continue; ent_key = table->ht_get_key(htable, ent, &ent_key_len); if (!ent_key || (key_len != ent_key_len)) continue; if (memcmp(ent_key, key, key_len) == 0) return ent; } for(o_ent = ent->hentry_next; o_ent; o_ent = o_ent->hentry_next) { /* Though in the list, can be under the deletion */ if (!(o_ent->hentry_flags & VR_HENTRY_FLAG_VALID)) continue; ent_key = table->ht_get_key(htable, o_ent, &ent_key_len); if (!ent_key || (key_len != ent_key_len)) continue; if (memcmp(ent_key, key, key_len) == 0) return o_ent; } /* Entry not found */ return NULL; }
uint32_t __vr_fragment_get_hash(unsigned int vrf, uint32_t sip, uint32_t dip, struct vr_packet *pkt) { struct vr_fragment_key vfk; struct vr_ip *ip; ip = (struct vr_ip *)pkt_network_header(pkt); __fragment_key(&vfk, vrf, sip, dip, ip->ip_id); return vr_hash(&vfk, sizeof(vfk), 0); }
static struct vr_flow_entry * vr_find_free_entry(struct vrouter *router, struct vr_flow_key *key, unsigned int *fe_index) { unsigned int i, index, hash; struct vr_flow_entry *tmp_fe, *fe = NULL; *fe_index = 0; hash = vr_hash(key, sizeof(*key), 0); index = (hash % vr_flow_entries) & ~(VR_FLOW_ENTRIES_PER_BUCKET - 1); for (i = 0; i < VR_FLOW_ENTRIES_PER_BUCKET; i++) { tmp_fe = vr_flow_table_entry_get(router, index); if (tmp_fe && !(tmp_fe->fe_flags & VR_FLOW_FLAG_ACTIVE)) { if (vr_set_flow_active(tmp_fe)) { vr_init_flow_entry(tmp_fe); fe = tmp_fe; break; } } index++; } if (!fe) { index = hash % vr_oflow_entries; for (i = 0; i < vr_oflow_entries; i++) { tmp_fe = vr_oflow_table_entry_get(router, index); if (tmp_fe && !(tmp_fe->fe_flags & VR_FLOW_FLAG_ACTIVE)) { if (vr_set_flow_active(tmp_fe)) { vr_init_flow_entry(tmp_fe); fe = tmp_fe; break; } } index = (index + 1) % vr_oflow_entries; } if (fe) *fe_index += vr_flow_entries; } if (fe) { *fe_index += index; memcpy(&fe->fe_key, key, sizeof(*key)); } return fe; }
int vr_fragment_add(struct vrouter *router, unsigned short vrf, struct vr_ip *iph, unsigned short sport, unsigned short dport) { unsigned int hash, index, i; struct vr_fragment_key key; struct vr_fragment *fe; fragment_key(&key, vrf, iph); hash = vr_hash(&key, sizeof(key), 0); index = (hash % FRAG_TABLE_ENTRIES) * FRAG_TABLE_BUCKETS; for (i = 0; i < FRAG_TABLE_BUCKETS; i++) { fe = fragment_entry_get(router, index + i); if (fe && !fe->f_dip && fragment_entry_alloc(fe)) { fragment_entry_set(fe, vrf, iph, sport, dport); break; } else { fe = NULL; continue; } } if (!fe) { index = (hash % FRAG_OTABLE_ENTRIES); for (i = 0; i < FRAG_OTABLE_ENTRIES; i++) { fe = fragment_oentry_get(router, (index + i) % FRAG_OTABLE_ENTRIES); if (fe && !fe->f_dip && fragment_entry_alloc(fe)) { fragment_entry_set(fe, vrf, iph, sport, dport); break; } else { fe = NULL; continue; } } } if (!fe) return -ENOMEM; fe->f_received += (ntohs(iph->ip_len) - (iph->ip_hl * 4)); return 0; }
struct vr_flow_entry * vr_find_flow(struct vrouter *router, struct vr_flow *key, uint8_t type, unsigned int *fe_index) { unsigned int hash; struct vr_flow_entry *flow_e; hash = vr_hash(key, key->flow_key_len, 0); /* first look in the regular flow table */ flow_e = vr_flow_table_lookup(key, type, router->vr_flow_table, vr_flow_entries, VR_FLOW_ENTRIES_PER_BUCKET, hash, fe_index); /* if not in the regular flow table, lookup in the overflow flow table */ if (!flow_e) { flow_e = vr_flow_table_lookup(key, type, router->vr_oflow_table, vr_oflow_entries, 0, hash, fe_index); *fe_index += vr_flow_entries; } return flow_e; }
struct vr_fragment * vr_fragment_get(struct vrouter *router, unsigned short vrf, struct vr_ip *iph) { unsigned int hash, index, i; struct vr_fragment_key key; struct vr_fragment *fe; unsigned long sec, nsec; fragment_key(&key, vrf, iph); hash = vr_hash(&key, sizeof(key), 0); index = (hash % FRAG_TABLE_ENTRIES) * FRAG_TABLE_BUCKETS; for (i = 0; i < FRAG_TABLE_BUCKETS; i++) { fe = fragment_entry_get(router, index + i); if (fe && !memcmp((const void *)&key, (const void *)&(fe->f_key), sizeof(key))) break; } if (i == FRAG_TABLE_BUCKETS) { index = (hash % FRAG_OTABLE_ENTRIES); for (i = 0; i < FRAG_OTABLE_ENTRIES; i++) { fe = fragment_oentry_get(router, (index + i) % FRAG_OTABLE_ENTRIES); if (fe && !memcmp((const void *)&key, (const void *)&(fe->f_key), sizeof(key))) break; } if (i == FRAG_OTABLE_ENTRIES) fe = NULL; } if (fe) { vr_get_mono_time(&sec, &nsec); fe->f_time = sec; } return fe; }
static struct vr_flow_entry * vr_find_free_entry(struct vrouter *router, struct vr_flow *key, uint8_t type, bool need_hold, unsigned int *fe_index) { unsigned int i, index, hash; struct vr_flow_entry *tmp_fe, *fe = NULL; *fe_index = 0; hash = vr_hash(key, key->flow_key_len, 0); index = (hash % vr_flow_entries) & ~(VR_FLOW_ENTRIES_PER_BUCKET - 1); for (i = 0; i < VR_FLOW_ENTRIES_PER_BUCKET; i++) { tmp_fe = vr_flow_table_entry_get(router, index); if (tmp_fe && !(tmp_fe->fe_flags & VR_FLOW_FLAG_ACTIVE)) { if (vr_set_flow_active(tmp_fe)) { vr_init_flow_entry(tmp_fe); fe = tmp_fe; break; } } index++; } if (!fe) { index = hash % vr_oflow_entries; for (i = 0; i < vr_oflow_entries; i++) { tmp_fe = vr_oflow_table_entry_get(router, index); if (tmp_fe && !(tmp_fe->fe_flags & VR_FLOW_FLAG_ACTIVE)) { if (vr_set_flow_active(tmp_fe)) { vr_init_flow_entry(tmp_fe); fe = tmp_fe; break; } } index = (index + 1) % vr_oflow_entries; } if (fe) *fe_index += vr_flow_entries; } if (fe) { *fe_index += index; if (need_hold) { fe->fe_hold_list = vr_zalloc(sizeof(struct vr_flow_queue)); if (!fe->fe_hold_list) { vr_reset_flow_entry(router, fe, *fe_index); fe = NULL; } else { fe->fe_hold_list->vfq_index = *fe_index; } } if (fe) { fe->fe_type = type; fe->fe_key.flow_key_len = key->flow_key_len; memcpy(&fe->fe_key, key, key->flow_key_len); } } return fe; }
int vr_htable_find_duplicate_hentry_index(vr_htable_t htable, vr_hentry_t *hentry) { unsigned int hash, tmp_hash, ind, i, key_len, ent_key_len; vr_hentry_t *ent; vr_hentry_key hkey; struct vr_htable *table = (struct vr_htable *)htable; if (!table || !hentry) return -1; hkey = table->ht_get_key(htable, hentry, &key_len); if (!key_len) { key_len = table->ht_key_size; if (!key_len) return -1; } hash = vr_hash(hkey, key_len, 0); /* Look into the hash table from hash */ tmp_hash = hash % table->ht_hentries; tmp_hash &= ~(table->ht_bucket_size - 1); for (i = 0; i < table->ht_bucket_size; i++) { ind = tmp_hash + i; ent = vr_btable_get(table->ht_htable, ind); if (ent->hentry_index == VR_INVALID_HENTRY_INDEX) continue; if (ent == hentry) continue; hkey = table->ht_get_key(htable, ent, &ent_key_len); if (!hkey || (ent_key_len != key_len)) continue; if (memcmp(hkey, hentry, key_len) != 0) continue; return ind; } /* Look into the complete over flow table starting from hash*/ tmp_hash = hash % table->ht_oentries; for (i = 0; i < table->ht_oentries; i++) { ind = table->ht_hentries + ((tmp_hash + i) % table->ht_oentries); ent = vr_btable_get(table->ht_otable, ((tmp_hash + i) % table->ht_oentries)); if (ent->hentry_index == VR_INVALID_HENTRY_INDEX) continue; if (ent == hentry) continue; hkey = table->ht_get_key(htable, ent, &ent_key_len); if (!hkey || (ent_key_len != key_len)) continue; if (memcmp(hkey, hentry, table->ht_key_size) != 0) continue; return ind; } /* No duplicate entry is found */ return -1; }
vr_hentry_t * vr_htable_find_free_hentry(vr_htable_t htable, void *key, unsigned int key_size) { unsigned int hash, tmp_hash, i; struct vr_htable *table = (struct vr_htable *)htable; vr_hentry_t *ent, *o_ent; int ind, bucket_index; if (!table || !key) return NULL; if (!key_size) { key_size = table->ht_key_size; if (!key_size) return NULL; } hash = vr_hash(key, key_size, 0); tmp_hash = hash % table->ht_hentries; tmp_hash &= ~(table->ht_bucket_size - 1); ind = 0; ent = NULL; for (i = 0; i < table->ht_bucket_size; i++) { ind = tmp_hash + i; ent = vr_btable_get(table->ht_htable, ind); if (!(ent->hentry_flags & VR_HENTRY_FLAG_VALID)) { if (__sync_bool_compare_and_swap(&ent->hentry_flags, (ent->hentry_flags & ~VR_HENTRY_FLAG_VALID), VR_HENTRY_FLAG_VALID)) { ent->hentry_bucket_index = VR_INVALID_HENTRY_INDEX; (void)__sync_add_and_fetch(&table->ht_used_entries, 1); return ent; } } } bucket_index = ind; if (table->ht_oentries) { o_ent = vr_htable_get_free_oentry(table); if (!o_ent) { return NULL; } o_ent->hentry_bucket_index = bucket_index; o_ent->hentry_next_index = VR_INVALID_HENTRY_INDEX; o_ent->hentry_flags = VR_HENTRY_FLAG_VALID; /* Link the overflow entry at the start */ do { o_ent->hentry_next = ent->hentry_next; /* Update the next entry's index in o_ent */ if (o_ent->hentry_next) o_ent->hentry_next_index = o_ent->hentry_next->hentry_index; if (__sync_bool_compare_and_swap(&ent->hentry_next, o_ent->hentry_next, o_ent)) { /* * ent->hentry_next need not be o_ent for the below * statement, if some new entry is inserted after 'ent'. * So updating hentry_next_index by taking hentry_next * pointer should still do the right thing */ ent->hentry_next_index = ent->hentry_next->hentry_index; (void)__sync_add_and_fetch(&table->ht_used_entries, 1); return o_ent; } } while (1); } return NULL; }