static void _hash_enter(struct hash_header *ht, int key, void *data) { /* precondition: there is no entry for <key> yet */ struct hash_link *temp; int i; if (DEBUG>1234) vlog(LOG_DEBUG,"_hash_enter"); temp = (void*)malloc(sizeof(struct hash_link)); temp->key = key; temp->next = ht->buckets[HASH_KEY(ht,key)]; temp->data = data; ht->buckets[HASH_KEY(ht,key)] = temp; if (ht->klistlen >= ht->klistsize) { ht->keylist = (void*)realloc(ht->keylist,sizeof(key_type)* (ht->klistsize*=2)); } if (ht->klistlen==0) ht->keylist[0]=key; else for (i=ht->klistlen; i>=0; i--) { if (ht->keylist[i-1]<key) { ht->keylist[i] = key; break; } ht->keylist[i] = ht->keylist[i-1]; } ht->klistlen++; }
void _hash_enter(struct hash_header *ht,int key,void *data) { /* precondition: there is no entry for <key> yet */ struct hash_link *temp; int i; temp = (struct hash_link *)malloc(sizeof(struct hash_link)); temp->key = key; temp->next = ht->buckets[HASH_KEY(ht,key)]; temp->data = data; ht->buckets[HASH_KEY(ht,key)] = temp; if(ht->klistlen>=ht->klistsize) { ht->keylist = (void*)realloc(ht->keylist,sizeof(*ht->keylist)* (ht->klistsize*=2)); } for(i=ht->klistlen;i>=0;i--) { if(ht->keylist[i-1]<key) { ht->keylist[i] = key; break; } ht->keylist[i] = ht->keylist[i-1]; } ht->klistlen++; }
void _hash_enter( HASH_HEADER *ht, int key, void *data ) { /* precondition: there is no entry for <key> yet */ HASH_LINK *temp; int i; temp = (HASH_LINK *) malloc( sizeof( HASH_LINK ) ); temp->key = key; temp->next = ht->buckets[HASH_KEY( ht, key )]; temp->data = data; ht->buckets[HASH_KEY( ht, key )] = temp; if ( ht->klistlen >= ht->klistsize ) { ht->keylist = (void *) realloc( ht->keylist, sizeof( *ht->keylist ) * ( ht->klistsize *= 2 ) ); } for ( i = ht->klistlen; i >= 0; i-- ) { if ( i == 0 || (ht->keylist[i - 1] < key) ) { ht->keylist[i] = key; break; } ht->keylist[i] = ht->keylist[i - 1]; } ht->klistlen++; return; }
size_t hash_set(hash_t *const hash, char const *const key) { size_t const x = hashfunc(hash, key); if(HASH_NOTFOUND == x) return x; size_t i = x; for(;;) { char const *const k = HASH_KEY(hash, i); if(0 == nulcmp(k, hash->keylen)) break; if(0 == memcmp(k, key, hash->keylen)) return i; i = (i + 1) % hash->count; if(x == i) return HASH_NOTFOUND; } memcpy(HASH_KEY(hash, i), key, hash->keylen); return i; }
static void evict_lower_half (log_t *log) { ptrdiff_t size = ASIZE (log->key_and_value) / 2; EMACS_INT median = approximate_median (log, 0, size); ptrdiff_t i; for (i = 0; i < size; i++) /* Evict not only values smaller but also values equal to the median, so as to make sure we evict something no matter what. */ if (XINT (HASH_VALUE (log, i)) <= median) { Lisp_Object key = HASH_KEY (log, i); { /* FIXME: we could make this more efficient. */ Lisp_Object tmp; XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */ Fremhash (key, tmp); } eassert (EQ (log->next_free, make_number (i))); { int j; eassert (VECTORP (key)); for (j = 0; j < ASIZE (key); j++) ASET (key, j, Qnil); } set_hash_key_slot (log, i, key); } }
size_t hash_del_internal(hash_t *const hash, size_t const x) { if(x >= hash->count) return 0; size_t i = x; for(;;) { size_t const next = (i + 1) % hash->count; if(x == next) break; char const *const k = HASH_KEY(hash, next); if(0 == nulcmp(k, hash->keylen)) break; size_t const alt = hashfunc(hash, k); if(next == alt) break; memcpy(HASH_KEY(hash, i), k, hash->keylen); i = next; } memset(HASH_KEY(hash, i), 0, hash->keylen); return (hash->count + i - x) % hash->count; }
void *hash_remove(struct hash_header *ht,int key) { struct hash_link **scan; scan = ht->buckets+HASH_KEY(ht,key); while(*scan && (*scan)->key!=key) scan = &(*scan)->next; if(*scan) { int i; struct hash_link *temp, *aux; temp = (*scan)->data; aux = *scan; *scan = aux->next; free(aux); for(i=0;i<ht->klistlen;i++) if(ht->keylist[i]==key) break; if(i<ht->klistlen) { bcopy((char *)ht->keylist+i+1,(char *)ht->keylist+i,(ht->klistlen-i) *sizeof(*ht->keylist)); ht->klistlen--; } return temp; } return NULL; }
void* hash_table_lookup_uint_key(hash_table htable, uint32_t key) { hash_table_node** table = htable.table; uint32_t size = htable.size; uint32_t size_mask = htable.size_mask; uint32_t hash = HASH_KEY(key); uint32_t slot = hash & size_mask; uint32_t i = 0; void* item_ptr = NULL; while (i < size) { hash_table_node* node = table[slot]; if (node != NULL && node->uint_key == key) { item_ptr = node->item; break; } ++i; slot = (slot + 1) & size_mask; } return item_ptr; }
static void record_backtrace (log_t *log, EMACS_INT count) { Lisp_Object backtrace; ptrdiff_t index; if (!INTEGERP (log->next_free)) /* FIXME: transfer the evicted counts to a special entry rather than dropping them on the floor. */ evict_lower_half (log); index = XINT (log->next_free); /* Get a "working memory" vector. */ backtrace = HASH_KEY (log, index); get_backtrace (backtrace); { /* We basically do a `gethash+puthash' here, except that we have to be careful to avoid memory allocation since we're in a signal handler, and we optimize the code to try and avoid computing the hash+lookup twice. See fns.c:Fputhash for reference. */ EMACS_UINT hash; ptrdiff_t j = hash_lookup (log, backtrace, &hash); if (j >= 0) { EMACS_INT old_val = XINT (HASH_VALUE (log, j)); EMACS_INT new_val = saturated_add (old_val, count); set_hash_value_slot (log, j, make_number (new_val)); } else { /* BEWARE! hash_put in general can allocate memory. But currently it only does that if log->next_free is nil. */ int j; eassert (!NILP (log->next_free)); j = hash_put (log, backtrace, make_number (count), hash); /* Let's make sure we've put `backtrace' right where it already was to start with. */ eassert (index == j); /* FIXME: If the hash-table is almost full, we should set some global flag so that some Elisp code can offload its data elsewhere, so as to avoid the eviction code. There are 2 ways to do that, AFAICT: - Set a flag checked in QUIT, such that QUIT can then call Fprofiler_cpu_log and stash the full log for later use. - Set a flag check in post-gc-hook, so that Elisp code can call profiler-cpu-log. That gives us more flexibility since that Elisp code can then do all kinds of fun stuff like write the log to disk. Or turn it right away into a call tree. Of course, using Elisp is generally preferable, but it may take longer until we get a chance to run the Elisp code, so there's more risk that the table will get full before we get there. */ } } }
void *hash_find( HASH_HEADER *ht, int key ) { HASH_LINK *scan; scan = ht->buckets[HASH_KEY( ht, key )]; while ( scan && scan->key != key ) scan = scan->next; return scan ? scan->data : NULL; }
void *hash_find(struct hash_header *ht,int key) { struct hash_link *scan; scan = ht->buckets[HASH_KEY(ht,key)]; while(scan && scan->key!=key) scan = scan->next; return scan ? scan->data : NULL; }
VOID CmpRemoveKeyHash( IN PCM_KEY_HASH KeyHash ) /*++ Routine Description: Removes a key hash structure from the hash table. Arguments: KeyHash - Supplies the key hash structure to be deleted. Return Value: None --*/ { HASH_VALUE Hash; ULONG Index; PCM_KEY_HASH *Prev; PCM_KEY_HASH Current; ASSERT_KEY_HASH(KeyHash); Hash = HASH_KEY(KeyHash->ConvKey); Index = Hash % CmpHashTableSize; // // Find this entry. // Prev = &CmpCacheTable[Index]; while (TRUE) { Current = *Prev; ASSERT(Current != NULL); ASSERT_KEY_HASH(Current); if (Current == KeyHash) { *Prev = Current->NextHash; #if DBG if (*Prev) { ASSERT_KEY_HASH(*Prev); } #endif break; } Prev = &Current->NextHash; } }
bool hash_table_insert_uint_key(hash_table htable, void* item, uint32_t key) { hash_table_node** table = htable.table; uint32_t size = htable.size; uint32_t size_mask = htable.size_mask; uint32_t hash = HASH_KEY(key); uint32_t slot = hash & size_mask; uint32_t i; hash_table_node* node_ptr = NULL; bool update_current_node = false; bool is_insertion_successful = false; for (i = 0; i < size; ++i) { node_ptr = table[slot]; if (node_ptr == NULL) { break; } else if (node_ptr->uint_key == key) { update_current_node = true; break; } slot = (slot + 1) & size_mask; } if (i < size) { if (update_current_node == true) { node_ptr->item = item; } else { node_ptr = malloc(HASH_TABLE_NODE_SIZE); hash_table_node node = { item, uint_key_type, key, 0, hash }; *node_ptr = node; table[slot] = node_ptr; } is_insertion_successful = true; } return is_insertion_successful; }
hash_t *obj_init_typemap( ualloc_t *ua, err_t *out_err) { err_t x = ERROR_NONE; hash_t *typemap = NULL; hash_t *namemap = NULL; hash_t *isamap = NULL; x = HASH_CREATE_SIMPLE(ua,HASH_F_INTKEY,&typemap); if (x) goto DONE; x = HASH_CREATE_SIMPLE(ua,HASH_F_COPYVAL,&namemap); if (x) goto DONE; x = HASH_CREATE_SIMPLE(ua,HASH_F_INTKEY,&isamap); if (x) goto DONE; x = hash_add(typemap, HASH_KEY(_INTERNAL_MN_KEY), HASH_VAL(namemap)); if (x) goto DONE; x = hash_add(typemap, HASH_KEY(_INTERNAL_ISA_KEY), HASH_VAL(isamap)); DEATH: if (x) { if (isamap) (void) hash_destroy(isamap); if (namemap) (void) hash_destroy(namemap); if (typemap) (void) hash_destroy(typemap); typemap = NULL; } if (out_err) *out_err = x; return typemap; }
// old public method static inline void *qt_hash_get(qt_hash h, const qt_key_t key) { size_t bucket; uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); bucket = lkey % h->size; if (h->B[bucket] == UNINITIALIZED) { // You'd think returning NULL at this point would be a good idea; but // if we do that, we risk losing key/value pairs (incorrectly reporting // them as absent) when the hash table resizes initialize_bucket(h, bucket); } return qt_lf_list_find(&(h->B[bucket]), so_regularkey(lkey), key, NULL, NULL, NULL, h->op_equals); }
// old public method static inline int qt_hash_remove(qt_hash h, const qt_key_t key) { size_t bucket; uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); bucket = lkey % h->size; if (h->B[bucket] == UNINITIALIZED) { initialize_bucket(h, bucket); } if (!qt_lf_list_delete(&(h->B[bucket]), so_regularkey(lkey), key, h->op_equals, h->op_cleanup)) { return 0; } qthread_incr(&h->count, -1); return 1; }
void *qt_dictionary_get(qt_dictionary *h, const qt_key_t key) { uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); assert(h); unsigned bucket = BASE_SPINE_BUCKET(lkey); // spine_element_t *child_id = &(h->base[bucket]); spine_element_t child_val = h->base[bucket]; // spine_element_t *cur_id = NULL; spine_t *cur_spine = NULL; unsigned depth = 0; do { if (child_val.e == NULL) { // not found return NULL; } else if (SPINE_PTR_TEST(child_val)) { // cur_id = child_id; cur_spine = SPINE_PTR(h, child_val); depth++; assert(depth <= 11); // otherwise, something has gone horribly wrong bucket = SPINE_BUCKET(lkey, depth); // child_id = &cur_spine->elements[bucket]; child_val = cur_spine->elements[bucket]; } else { // check that key == element->hashed_key if (child_val.e->hashed_key == lkey) { hash_entry *e = child_val.e; while(e) { if (h->op_equals(e->key, key)) { return e->value; } e = e->next; } return NULL; } else { return NULL; } } } while (1); }
bool hash_table_remove_uint_key(hash_table htable, uint32_t key) { hash_table_node** table = htable.table; uint32_t size = htable.size; uint32_t size_mask = htable.size_mask; uint32_t hash = HASH_KEY(key); uint32_t index = hash & size_mask; bool is_removal_successful = false; hash_table_node* found_node = table[index]; if (found_node != NULL && found_node->uint_key == key) { free(found_node); table[index] = NULL; is_removal_successful = true; } return is_removal_successful; }
// old public method; added last param to distinguish between put and put if absent static inline hash_entry *qt_hash_put(qt_hash h, qt_key_t key, void *value, int put_choice) { hash_entry *node = qpool_alloc(hash_entry_pool); hash_entry *ret = node; size_t bucket; uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); bucket = lkey % h->size; assert(node); assert((lkey & MSB) == 0); node->hashed_key = so_regularkey(lkey); node->key = key; // Also store original key! node->value = value; node->next = (hash_entry*)UNINITIALIZED; if (h->B[bucket] == UNINITIALIZED) { initialize_bucket(h, bucket); } if(put_choice == PUT_IF_ABSENT) { if (!qt_lf_list_insert(&(h->B[bucket]), node, NULL, &ret, h->op_equals)) { qpool_free(hash_entry_pool, node); return ret->value; } } else { qt_lf_force_list_insert(&(h->B[bucket]), node, h->op_equals); } size_t csize = h->size; if (qthread_incr(&h->count, 1) / csize > MAX_LOAD) { if (2 * csize <= hard_max_buckets) { // this caps the size of the hash qthread_cas(&h->size, csize, 2 * csize); } } return ret->value; }
static Lisp_Object hash_get_category_set (Lisp_Object table, Lisp_Object category_set) { Lisp_Object val; struct Lisp_Hash_Table *h; int i; unsigned hash; if (NILP (XCHAR_TABLE (table)->extras[1])) XCHAR_TABLE (table)->extras[1] = make_hash_table (Qequal, make_number (DEFAULT_HASH_SIZE), make_float (DEFAULT_REHASH_SIZE), make_float (DEFAULT_REHASH_THRESHOLD), Qnil, Qnil, Qnil); h = XHASH_TABLE (XCHAR_TABLE (table)->extras[1]); i = hash_lookup (h, category_set, &hash); if (i >= 0) return HASH_KEY (h, i); hash_put (h, category_set, Qnil, hash); return category_set; }
static Lisp_Object hash_get_category_set (Lisp_Object table, Lisp_Object category_set) { struct Lisp_Hash_Table *h; ptrdiff_t i; EMACS_UINT hash; if (NILP (XCHAR_TABLE (table)->extras[1])) set_char_table_extras (table, 1, make_hash_table (hashtest_equal, make_number (DEFAULT_HASH_SIZE), make_float (DEFAULT_REHASH_SIZE), make_float (DEFAULT_REHASH_THRESHOLD), Qnil)); h = XHASH_TABLE (XCHAR_TABLE (table)->extras[1]); i = hash_lookup (h, category_set, &hash); if (i >= 0) return HASH_KEY (h, i); hash_put (h, category_set, Qnil, hash); return category_set; }
void *hash_remove( HASH_HEADER *ht, int key ) { HASH_LINK **scan; scan = ht->buckets + HASH_KEY( ht, key ); while ( *scan && ( *scan )->key != key ) scan = &( *scan )->next; if ( *scan ) { HASH_LINK *temp; HASH_LINK *aux; int i; temp = ( *scan )->data; aux = *scan; *scan = aux->next; free( aux ); for ( i = 0; i < ht->klistlen; i++ ) if ( ht->keylist[i] == key ) break; if ( i < ht->klistlen ) { memmove( ht->keylist + i, ht->keylist + i + 1, ( ht->klistlen - i ) * sizeof( *ht->keylist ) ); ht->klistlen--; } return temp; } return NULL; }
} char *obj_register_class_in( hash_t *typemap, void *class, void *parent, char *name, err_t *out_err) { err_t x = ERROR_NONE; hash_val_t val; hash_t *namemap = NULL; hash_t *isamap = NULL; char *result = NULL; x = hash_lookup(typemap, HASH_KEY(_INTERNAL_NM_KEY), &val); if (x) goto DONE; namemap = (hash_t *)val; if (!namemap) { x = ERROR_NULL_VAL; goto DONE; } x = hash_lookup(typemap, HASH_KEY(_INTERNAL_ISA_KEY), &val); if (x) goto DONE; isamap = (hash_t *)val; if (!isamap) { x = ERROR_NULL_VAL; goto DONE; }
static json_t * lisp_to_json_toplevel_1 (Lisp_Object lisp) { json_t *json; ptrdiff_t count; if (VECTORP (lisp)) { ptrdiff_t size = ASIZE (lisp); json = json_check (json_array ()); count = SPECPDL_INDEX (); record_unwind_protect_ptr (json_release_object, json); for (ptrdiff_t i = 0; i < size; ++i) { int status = json_array_append_new (json, lisp_to_json (AREF (lisp, i))); if (status == -1) json_out_of_memory (); } eassert (json_array_size (json) == size); } else if (HASH_TABLE_P (lisp)) { struct Lisp_Hash_Table *h = XHASH_TABLE (lisp); json = json_check (json_object ()); count = SPECPDL_INDEX (); record_unwind_protect_ptr (json_release_object, json); for (ptrdiff_t i = 0; i < HASH_TABLE_SIZE (h); ++i) if (!NILP (HASH_HASH (h, i))) { Lisp_Object key = json_encode (HASH_KEY (h, i)); /* We can't specify the length, so the string must be null-terminated. */ check_string_without_embedded_nulls (key); const char *key_str = SSDATA (key); /* Reject duplicate keys. These are possible if the hash table test is not `equal'. */ if (json_object_get (json, key_str) != NULL) wrong_type_argument (Qjson_value_p, lisp); int status = json_object_set_new (json, key_str, lisp_to_json (HASH_VALUE (h, i))); if (status == -1) { /* A failure can be caused either by an invalid key or by low memory. */ json_check_utf8 (key); json_out_of_memory (); } } } else if (NILP (lisp)) return json_check (json_object ()); else if (CONSP (lisp)) { Lisp_Object tail = lisp; json = json_check (json_object ()); count = SPECPDL_INDEX (); record_unwind_protect_ptr (json_release_object, json); bool is_plist = !CONSP (XCAR (tail)); FOR_EACH_TAIL (tail) { const char *key_str; Lisp_Object value; Lisp_Object key_symbol; if (is_plist) { key_symbol = XCAR (tail); tail = XCDR (tail); CHECK_CONS (tail); value = XCAR (tail); if (EQ (tail, li.tortoise)) circular_list (lisp); } else { Lisp_Object pair = XCAR (tail); CHECK_CONS (pair); key_symbol = XCAR (pair); value = XCDR (pair); } CHECK_SYMBOL (key_symbol); Lisp_Object key = SYMBOL_NAME (key_symbol); /* We can't specify the length, so the string must be null-terminated. */ check_string_without_embedded_nulls (key); key_str = SSDATA (key); /* In plists, ensure leading ":" in keys is stripped. It will be reconstructed later in `json_to_lisp'.*/ if (is_plist && ':' == key_str[0] && key_str[1]) { key_str = &key_str[1]; } /* Only add element if key is not already present. */ if (json_object_get (json, key_str) == NULL) { int status = json_object_set_new (json, key_str, lisp_to_json (value)); if (status == -1) json_out_of_memory (); } } CHECK_LIST_END (tail, lisp); } else
void *qt_hash_put_helper(qt_dictionary *h, qt_key_t key, void *value, int put_choice) { uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); assert(h); unsigned bucket = BASE_SPINE_BUCKET(lkey); hash_entry *e = qt_malloc(sizeof(hash_entry)); // XXX: should be from a memory pool spine_element_t *child_id = &(h->base[bucket]); spine_element_t child_val = h->base[bucket]; spine_element_t *cur_id = NULL; spine_t *cur_spine = NULL; unsigned depth = 0; assert(e != NULL); e->hashed_key = lkey; e->key = key; e->value = value; e->next = NULL; hash_entry *crt; do { if (child_val.e == NULL) { // place the entry in the hash if ((child_val.e = CAS(&(child_id->e), NULL, e)) == NULL) { // put success: no potential colliding element was present return value; } } else if (SPINE_PTR_TEST(child_val)) { INCREMENT_COUNT(child_id, child_val); if (cur_id) { DECREMENT_COUNT(cur_id); } cur_id = child_id; cur_spine = SPINE_PTR(h, child_val); depth++; assert(depth <= 11); // otherwise, something has gone horribly wrong bucket = SPINE_BUCKET(lkey, depth); child_id = &cur_spine->elements[bucket]; child_val = cur_spine->elements[bucket]; } else if (child_val.e->hashed_key != lkey) { // upgrade to a spine spine_element_t newspine, cur; spine_t *realspine; unsigned bucket1 = SPINE_BUCKET(child_val.e->hashed_key, depth + 1); unsigned bucket2 = SPINE_BUCKET(lkey, depth + 1); newspine.s.id = allocate_spine(h, &realspine); realspine->parent = cur_id; realspine->elements[bucket1] = child_val; if (bucket1 != bucket2) { // both elements will be in the new spine newspine.s.ctr = SPINE_COUNT(2); // contains 2 elements realspine->elements[bucket2].e = e; if ((cur.e = CAS(&(child_id->e), child_val.e, newspine.e)) == child_val.e) { // success! if (cur_id) { DECREMENT_COUNT(cur_id); } return value; } else { child_val = cur; deallocate_spine(h, newspine.s.id); } } else { // collision in the new spine (unusual; will use unnecessary CAS) newspine.s.ctr = SPINE_COUNT(1); // contains 1 element (oldval) if ((cur.e = CAS(&(child_id->e), child_val.e, newspine.e)) == child_val.e) { // success continue; } else { child_val = cur; deallocate_spine(h, newspine.s.id); } } } else { // use the real user-equals operation to differentiate subcases // it is possible that the element is there or it may not be there hash_entry *head; do { head = child_id->e; e->next = head; crt = head; // find the entry, if it is in the list while (crt) { if (h->op_equals(crt->key, key)) { // already exists if (put_choice != PUT_IF_ABSENT) { void **crt_val_adr = &(crt->value); void *crt_val = crt->value; while((qthread_cas_ptr(crt_val_adr, \ crt_val, value)) != crt_val ) { crt_val = crt->value; } } if (cur_id) { DECREMENT_COUNT(cur_id); } return crt->value; } crt = crt->next; } // and try to insert it at the head of the list; // if the list changed, redu the work } while (qthread_cas_ptr(&(child_id->e), head, e) != head); // printf("IN put: (%s-%s)\n", child_id->e->key, child_id->e->value); // if (e->next !=NULL) // printf("next key is %s and value %s\n", e->next->key, e->next->value); return e->value; } } while (1); }
int qt_dictionary_remove(qt_dictionary *h, const qt_key_t key) { uint64_t lkey = (uint64_t)(uintptr_t)(h->op_hash(key)); HASH_KEY(lkey); HASH_KEY(lkey); assert(h); unsigned bucket = BASE_SPINE_BUCKET(lkey); spine_element_t *child_id = &(h->base[bucket]); spine_element_t child_val = h->base[bucket]; spine_element_t *cur_id = NULL; spine_t *cur_spine = NULL; unsigned depth = 0; // First, find and remove the item itself do { if (child_val.e == NULL) { // not found return 0; } else if (SPINE_PTR_TEST(child_val)) { cur_id = child_id; cur_spine = SPINE_PTR(h, child_val); depth++; assert(depth <= 11); // otherwise, something has gone horribly wrong bucket = SPINE_BUCKET(lkey, depth); child_id = &cur_spine->elements[bucket]; child_val = cur_spine->elements[bucket]; } else if (child_val.e->hashed_key == lkey) { hash_entry *e = child_id->e; hash_entry **prev = &(child_id->e); do { if (h->op_equals(e->key, key)) { spine_element_t cur; if ((cur.e = CAS(prev, e, e->next)) == e) { if (h->op_cleanup != NULL) { h->op_cleanup(child_val.e->key, NULL); } qt_free(child_val.e); // XXX should be into a mempool // Second, walk back up the parent pointers, removing empty spines (if any) // cur_id is the current spine pointer's location (if its null, we're in the base spine) while (cur_id) { spine_element_t oldval = *cur_id; spine_element_t newcount; spine_element_t tmp = oldval; uint32_t count; do { oldval = tmp; count = SPINE_PTR_COUNT(oldval) - 1; if (count == 0) { newcount.e = NULL; } else { newcount.s.id = oldval.s.id; newcount.s.ctr = SPINE_COUNT(count); } tmp.u = CAS(&(cur_id->u), oldval.u, newcount.u); } while (tmp.u != oldval.u); if (count == 0) { spine_t *s = SPINE_PTR(h, oldval); assert(s); cur_id = s->parent; deallocate_spine(h, oldval.s.id); } else { break; } } INCR(&(h->count), -1); return 1; } else {} } prev = &(e->next); e = e->next; } while(e != NULL); return 0; } else { return 0; } } while (1); }