static void __vr_htable_oentry_invalidate(struct vr_htable *table, vr_hentry_t *ent) { vr_hentry_t *prev, *head_ent; if (!table || !ent) return; if (ent->hentry_flags & VR_HENTRY_FLAG_IN_FREE_LIST) return; if (ent->hentry_index >= table->ht_hentries) { head_ent = __vr_htable_get_hentry_by_index((vr_htable_t)table, ent->hentry_bucket_index); for (prev = head_ent; prev; prev = prev->hentry_next) { if (prev->hentry_next == ent) { prev->hentry_next = ent->hentry_next; if (prev->hentry_next) { prev->hentry_next_index = ent->hentry_next->hentry_index; } else { prev->hentry_next_index = VR_INVALID_HENTRY_INDEX; } break; } } vr_htable_oentry_invalidate(table, ent); } return; }
/* * Returns the hash entry, given an index, only if Valid */ vr_hentry_t * vr_htable_get_hentry_by_index(vr_htable_t htable, unsigned int index) { vr_hentry_t *ent; ent = __vr_htable_get_hentry_by_index(htable, index); if(ent && (ent->hentry_flags & VR_HENTRY_FLAG_VALID)) return ent; return NULL; }
void vr_htable_release_hentry(vr_htable_t htable, vr_hentry_t *ent) { unsigned int cpu_num, delete_index; struct vr_hentry_delete_data *delete_data; vr_hentry_t *head_ent; struct vr_htable *table = (struct vr_htable *)htable; if (!(ent->hentry_flags & VR_HENTRY_FLAG_VALID)) return; (void)__sync_sub_and_fetch(&table->ht_used_entries, 1); /* Mark it as Invalid */ ent->hentry_flags &= ~VR_HENTRY_FLAG_VALID; if (ent->hentry_index < table->ht_hentries) return; if (vr_not_ready) return; ent->hentry_flags |= VR_HENTRY_FLAG_DELETE_MARKED; head_ent = __vr_htable_get_hentry_by_index(htable, ent->hentry_bucket_index); delete_index = head_ent->hentry_index / table->ht_bucket_size; delete_data = vr_btable_get(table->ht_dtable, delete_index); (void)__sync_add_and_fetch(&delete_data->hd_count, 1); /* Schedule the deltion only if it is not already scheduled */ if (__sync_bool_compare_and_swap(&delete_data->hd_scheduled, 0, 1)) { delete_data->hd_table = (struct vr_htable *)htable; delete_data->hd_index = head_ent->hentry_index; /* Schedule the deletion on a cpu based on bucket index */ cpu_num = head_ent->hentry_index % vr_num_cpus; if (vr_schedule_work(cpu_num, vr_htable_hentry_scheduled_delete, (void *)delete_data)) { /* * We can only write back the status as not scheduled. There * might be some entries that get marked as Deleted, but * would not be pushed to free list as work queue is not * scheduled. These marked entries would be deleted only if * this hash bucket is revisisted */ (void)__sync_bool_compare_and_swap(&delete_data->hd_scheduled,1, 0); } } return; }
static void vr_htable_hentry_defer_delete(struct vrouter *router, void *arg) { vr_hentry_t *ent; struct vr_hentry_delete_data *defer_data; struct vr_htable *table; defer_data = (struct vr_hentry_delete_data *)arg; table = (struct vr_htable *)(defer_data->hd_table); ent = __vr_htable_get_hentry_by_index((vr_htable_t)table, defer_data->hd_index); vr_htable_oentry_invalidate(table, ent); return; }
void vr_htable_reset(vr_htable_t htable, htable_trav_cb cb, void *data) { unsigned int i; vr_hentry_t *ent, *next; struct vr_htable *table = (struct vr_htable *)htable; if (!table || !cb) return; for (i = 0; i < table->ht_hentries + table->ht_oentries; i++) { ent = __vr_htable_get_hentry_by_index(htable, i); cb(htable, ent, i, data); if (ent->hentry_flags & VR_HENTRY_FLAG_VALID) { ent->hentry_flags &= ~VR_HENTRY_FLAG_VALID; (void)__sync_sub_and_fetch(&table->ht_used_entries, 1); } if ((i < table->ht_hentries) && ent->hentry_next) { next = ent->hentry_next; ent->hentry_next = NULL; ent->hentry_next_index = VR_INVALID_HENTRY_INDEX; ent = next; while (ent) { next = ent->hentry_next; if (ent->hentry_flags & VR_HENTRY_FLAG_VALID) { ent->hentry_flags &= ~VR_HENTRY_FLAG_VALID; (void)__sync_sub_and_fetch(&table->ht_used_entries, 1); } vr_htable_oentry_invalidate(table, ent); ent = next; } } } return; }
void vr_htable_release_hentry(vr_htable_t htable, vr_hentry_t *ent) { unsigned int cpu_num, delete_index; struct vr_hentry_delete_data *delete_data; vr_hentry_t *head_ent; struct vr_htable *table = (struct vr_htable *)htable; if (!(ent->hentry_flags & VR_HENTRY_FLAG_VALID)) return; /* Mark it as Invalid */ ent->hentry_flags &= ~VR_HENTRY_FLAG_VALID; if (ent->hentry_index < table->ht_hentries) return; if (vr_not_ready) { __vr_htable_oentry_invalidate(table, ent); return; } ent->hentry_flags |= VR_HENTRY_FLAG_DELETE_MARKED; head_ent = __vr_htable_get_hentry_by_index(htable, ent->hentry_bucket_index); delete_index = head_ent->hentry_index / table->ht_bucket_size; delete_data = vr_btable_get(table->ht_dtable, delete_index); /* Schedule the deltion only if it is not already scheduled */ if (__sync_add_and_fetch(&delete_data->hd_scheduled, 1) == 1) { delete_data->hd_table = (struct vr_htable *)htable; delete_data->hd_index = head_ent->hentry_index; /* Schedule the deletion on a cpu based on bucket index */ cpu_num = head_ent->hentry_index % vr_num_cpus; vr_schedule_work(cpu_num, vr_htable_hentry_scheduled_delete, (void *)delete_data); } return; }
static void vr_htable_hentry_scheduled_delete(void *arg) { unsigned int count; struct vr_hentry_delete_data *delete_data, *defer_data; vr_hentry_t *head_ent, *ent, *prev, *next; struct vr_htable *table; delete_data = (struct vr_hentry_delete_data *)arg; table = delete_data->hd_table; head_ent = __vr_htable_get_hentry_by_index((vr_htable_t)(table), delete_data->hd_index); if (!head_ent) return; (void)__sync_bool_compare_and_swap(&delete_data->hd_scheduled, 1, 0); /* * We attempt to delete only those many entries that have been * delete marked. If some new entries are delete marked while * processing these, they will get scheduled in new work item */ count = delete_data->hd_count; (void)__sync_sub_and_fetch(&delete_data->hd_count, count); prev = head_ent; ent = head_ent->hentry_next; while (count && ent) { /* * Process only if delete marked. If already processed, * delete marking is changed to delete processed */ if (ent->hentry_flags & VR_HENTRY_FLAG_DELETE_MARKED) { /* * As the insertion happens only at head entry, it has * to be verified if something is inserted while delete * attemped. If inserted, traversal needs to restart, to * get hold of the new previous */ if (prev == head_ent) { if (!__sync_bool_compare_and_swap(&prev->hentry_next, ent, ent->hentry_next)) { prev = head_ent; ent = head_ent->hentry_next; continue; } } else { prev->hentry_next = ent->hentry_next; } count--; /* update next index for the previous */ if (ent->hentry_next) prev->hentry_next_index = ent->hentry_next->hentry_index; else prev->hentry_next_index = VR_INVALID_HENTRY_INDEX; ent->hentry_flags &= ~VR_HENTRY_FLAG_DELETE_MARKED; ent->hentry_flags |= VR_HENTRY_FLAG_DELETE_PROCESSED; } next = ent->hentry_next; /* * A separate check for VR_HENTRY_FLAG_DELETE_PROCESSED flag to * defer the entry if we ever failed to allocate memeory while * deferring it */ if (ent->hentry_flags & VR_HENTRY_FLAG_DELETE_PROCESSED) { /* * Defer the entry to reset the values. If alloc of * defer data fails, this entry will be in delete state * for ever */ if (!vr_not_ready) { defer_data = vr_get_defer_data(sizeof(*defer_data)); if (defer_data) { defer_data->hd_table = delete_data->hd_table; defer_data->hd_index = ent->hentry_index; vr_defer(delete_data->hd_table->ht_router, vr_htable_hentry_defer_delete, (void *)defer_data); } } else { vr_htable_oentry_invalidate(table, ent); ent = next; continue; } } /* Previous should not be under deletion */ if (!(ent->hentry_flags & VR_HENTRY_FLAG_UNDER_DELETION)) prev = ent; ent = next; } return; }