/* DESCRIPTION deletes an element with the given key from the hash (if a hash is not unique and there're many elements with this key - the "first" matching element is deleted) RETURN 0 - deleted 1 - didn't (not found) -1 - out of memory NOTE see ldelete() for pin usage notes */ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen) { LF_SLIST * volatile *el; uint bucket, hashnr= calc_hash(hash, (uchar *)key, keylen); bucket= hashnr % hash->size; lf_rwlock_by_pins(pins); el= _lf_dynarray_lvalue(&hash->array, bucket); if (unlikely(!el)) return -1; /* note that we still need to initialize_bucket here, we cannot return "node not found", because an old bucket of that node may've been split and the node was assigned to a new bucket that was never accessed before and thus is not initialized. */ if (*el == NULL && unlikely(initialize_bucket(hash, el, bucket, pins))) return -1; if (ldelete(el, hash->charset, my_reverse_bits(hashnr) | 1, (uchar *)key, keylen, pins)) { lf_rwunlock_by_pins(pins); return 1; } my_atomic_add32(&hash->count, -1); lf_rwunlock_by_pins(pins); return 0; }
/* Allocate and return an new object. DESCRIPTION Pop an unused object from the stack or malloc it is the stack is empty. pin[0] is used, it's removed on return. */ void *_lf_alloc_new(LF_PINS *pins) { LF_ALLOCATOR *allocator= (LF_ALLOCATOR *)(pins->pinbox->free_func_arg); uchar *node; for (;;) { do { node= allocator->top; _lf_pin(pins, 0, node); } while (node != allocator->top && LF_BACKOFF); if (!node) { node= (void *)my_malloc(allocator->element_size, MYF(MY_WME)); if (allocator->constructor) allocator->constructor(node); #ifdef MY_LF_EXTRA_DEBUG if (likely(node != 0)) my_atomic_add32(&allocator->mallocs, 1); #endif break; } if (my_atomic_casptr((void **)(char *)&allocator->top, (void *)&node, anext_node(node))) break; } _lf_unpin(pins, 0); return node; }
/* DESCRIPTION inserts a new element to a hash. it will have a _copy_ of data, not a pointer to it. RETURN 0 - inserted 1 - didn't (unique key conflict) -1 - out of memory NOTE see linsert() for pin usage notes */ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data) { int csize, bucket, hashnr; LF_SLIST *node, * volatile *el; lf_rwlock_by_pins(pins); node= (LF_SLIST *)_lf_alloc_new(pins); if (unlikely(!node)) return -1; memcpy(node+1, data, hash->element_size); node->key= hash_key(hash, (uchar *)(node+1), &node->keylen); hashnr= calc_hash(hash, node->key, node->keylen); bucket= hashnr % hash->size; el= _lf_dynarray_lvalue(&hash->array, bucket); if (unlikely(!el)) return -1; if (*el == NULL && unlikely(initialize_bucket(hash, el, bucket, pins))) return -1; node->hashnr= my_reverse_bits(hashnr) | 1; /* normal node */ if (linsert(el, hash->charset, node, pins, hash->flags)) { _lf_alloc_free(pins, node); lf_rwunlock_by_pins(pins); return 1; } csize= hash->size; if ((my_atomic_add32(&hash->count, 1)+1.0) / csize > MAX_LOAD) my_atomic_cas32(&hash->size, &csize, csize*2); lf_rwunlock_by_pins(pins); return 0; }
/* add and sub a random number in a loop. Must get 0 at the end */ pthread_handler_t test_atomic_add(void *arg) { int m= (*(int *)arg)/2; GCC_BUG_WORKAROUND int32 x; for (x= ((int)(intptr)(&m)); m ; m--) { x= (x*m+0x87654321) & INT_MAX32; my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, x); my_atomic_rwlock_wrunlock(&rwl); my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, -x); my_atomic_rwlock_wrunlock(&rwl); } pthread_mutex_lock(&mutex); if (!--running_threads) pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); return 0; }
/* Get pins from a pinbox. Usually called via lf_alloc_get_pins() or lf_hash_get_pins(). SYNOPSYS pinbox - DESCRIPTION get a new LF_PINS structure from a stack of unused pins, or allocate a new one out of dynarray. NOTE It is assumed that pins belong to a thread and are not transferable between threads. */ LF_PINS *_lf_pinbox_get_pins(LF_PINBOX *pinbox) { struct st_my_thread_var *var; uint32 pins, next, top_ver; LF_PINS *el; /* We have an array of max. 64k elements. The highest index currently allocated is pinbox->pins_in_array. Freed elements are in a lifo stack, pinstack_top_ver. pinstack_top_ver is 32 bits; 16 low bits are the index in the array, to the first element of the list. 16 high bits are a version (every time the 16 low bits are updated, the 16 high bits are incremented). Versioniong prevents the ABA problem. */ top_ver= pinbox->pinstack_top_ver; do { if (!(pins= top_ver % LF_PINBOX_MAX_PINS)) { /* the stack of free elements is empty */ pins= my_atomic_add32((int32 volatile*) &pinbox->pins_in_array, 1)+1; if (unlikely(pins >= LF_PINBOX_MAX_PINS)) return 0; /* note that the first allocated element has index 1 (pins==1). index 0 is reserved to mean "NULL pointer" */ el= (LF_PINS *)_lf_dynarray_lvalue(&pinbox->pinarray, pins); if (unlikely(!el)) return 0; break; } el= (LF_PINS *)_lf_dynarray_value(&pinbox->pinarray, pins); next= el->link; } while (!my_atomic_cas32((int32 volatile*) &pinbox->pinstack_top_ver, (int32*) &top_ver, top_ver-pins+next+LF_PINBOX_MAX_PINS)); /* set el->link to the index of el in the dynarray (el->link has two usages: - if element is allocated, it's its own index - if element is free, it's its next element in the free stack */ el->link= pins; el->purgatory_count= 0; el->pinbox= pinbox; var= my_thread_var; /* Threads that do not call my_thread_init() should still be able to use the LF_HASH. */ el->stack_ends_here= (var ? & var->stack_ends_here : NULL); return el; }
/* 1. generate thread number 0..N-1 from b32 2. add it to bad 3. swap thread numbers in c32 4. (optionally) one more swap to avoid 0 as a result 5. subtract result from bad must get 0 in bad at the end */ pthread_handler_t test_atomic_fas(void *arg) { int m= *(int *)arg; int32 x; my_atomic_rwlock_wrlock(&rwl); x= my_atomic_add32(&b32, 1); my_atomic_rwlock_wrunlock(&rwl); my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, x); my_atomic_rwlock_wrunlock(&rwl); for (; m ; m--) { my_atomic_rwlock_wrlock(&rwl); x= my_atomic_fas32(&c32, x); my_atomic_rwlock_wrunlock(&rwl); } if (!x) { my_atomic_rwlock_wrlock(&rwl); x= my_atomic_fas32(&c32, x); my_atomic_rwlock_wrunlock(&rwl); } my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, -x); my_atomic_rwlock_wrunlock(&rwl); pthread_mutex_lock(&mutex); if (!--running_threads) pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); return 0; }
/* DESCRIPTION deletes an element with the given key from the hash (if a hash is not unique and there're many elements with this key - the "first" matching element is deleted) RETURN 0 - deleted 1 - didn't (not found) NOTE see l_delete() for pin usage notes */ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen) { LF_SLIST * volatile *el; uint bucket, hashnr; hashnr= hash->hash_function(hash->charset, (uchar *)key, keylen) & INT_MAX32; /* hide OOM errors - if we cannot initalize a bucket, try the previous one */ for (bucket= hashnr % hash->size; ;bucket= my_clear_highest_bit(bucket)) { el= lf_dynarray_lvalue(&hash->array, bucket); if (el && (*el || initialize_bucket(hash, el, bucket, pins) == 0)) break; if (unlikely(bucket == 0)) return 1; /* if there's no bucket==0, the hash is empty */ } if (l_delete(el, hash->charset, my_reverse_bits(hashnr) | 1, (uchar *)key, keylen, pins)) { return 1; } my_atomic_add32(&hash->count, -1); return 0; }