/* same as test_atomic_add, but my_atomic_add32 is emulated with my_atomic_cas32 - notice that the slowdown is proportional to the number of CPUs */ pthread_handler_t test_atomic_cas(void *arg) { int m= (*(int *)arg)/2, ok= 0; GCC_BUG_WORKAROUND int32 x, y; for (x= ((int)(intptr)(&m)); m ; m--) { my_atomic_rwlock_wrlock(&rwl); y= my_atomic_load32(&bad); my_atomic_rwlock_wrunlock(&rwl); x= (x*m+0x87654321) & INT_MAX32; do { my_atomic_rwlock_wrlock(&rwl); ok= my_atomic_cas32(&bad, &y, (uint32)y+x); my_atomic_rwlock_wrunlock(&rwl); } while (!ok) ; do { my_atomic_rwlock_wrlock(&rwl); ok= my_atomic_cas32(&bad, &y, y-x); my_atomic_rwlock_wrunlock(&rwl); } while (!ok) ; } pthread_mutex_lock(&mutex); if (!--running_threads) pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); return 0; }
/* add and sub a random number in a loop. Must get 0 at the end */ pthread_handler_t test_atomic_add(void *arg) { int m= (*(int *)arg)/2; GCC_BUG_WORKAROUND int32 x; for (x= ((int)(intptr)(&m)); m ; m--) { x= (x*m+0x87654321) & INT_MAX32; my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, x); my_atomic_rwlock_wrunlock(&rwl); my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, -x); my_atomic_rwlock_wrunlock(&rwl); } pthread_mutex_lock(&mutex); if (!--running_threads) pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); return 0; }
/* 1. generate thread number 0..N-1 from b32 2. add it to bad 3. swap thread numbers in c32 4. (optionally) one more swap to avoid 0 as a result 5. subtract result from bad must get 0 in bad at the end */ pthread_handler_t test_atomic_fas(void *arg) { int m= *(int *)arg; int32 x; my_atomic_rwlock_wrlock(&rwl); x= my_atomic_add32(&b32, 1); my_atomic_rwlock_wrunlock(&rwl); my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, x); my_atomic_rwlock_wrunlock(&rwl); for (; m ; m--) { my_atomic_rwlock_wrlock(&rwl); x= my_atomic_fas32(&c32, x); my_atomic_rwlock_wrunlock(&rwl); } if (!x) { my_atomic_rwlock_wrlock(&rwl); x= my_atomic_fas32(&c32, x); my_atomic_rwlock_wrunlock(&rwl); } my_atomic_rwlock_wrlock(&rwl); my_atomic_add32(&bad, -x); my_atomic_rwlock_wrunlock(&rwl); pthread_mutex_lock(&mutex); if (!--running_threads) pthread_cond_signal(&cond); pthread_mutex_unlock(&mutex); return 0; }
/* add and sub a random number in a loop. Must get 0 at the end */ pthread_handler_t test_atomic_add64(void *arg) { int m= (*(int *)arg)/2; int64 x; for (x= ((int64)(intptr)(&m)); m ; m--) { x= (x*m+0xfdecba987654321LL) & INT_MAX64; my_atomic_rwlock_wrlock(&rwl); my_atomic_add64(&a64, x); my_atomic_rwlock_wrunlock(&rwl); my_atomic_rwlock_wrlock(&rwl); my_atomic_add64(&a64, -x); my_atomic_rwlock_wrunlock(&rwl); } pthread_mutex_lock(&mutex); if (!--running_threads) { bad= (a64 != 0); pthread_cond_signal(&cond); } pthread_mutex_unlock(&mutex); return 0; }
/* Put pins back to a pinbox. Usually called via lf_alloc_put_pins() or lf_hash_put_pins(). DESCRIPTION empty the purgatory (XXX deadlock warning below!), push LF_PINS structure to a stack */ void _lf_pinbox_put_pins(LF_PINS *pins) { LF_PINBOX *pinbox= pins->pinbox; uint32 top_ver, nr; nr= pins->link; #ifndef DBUG_OFF { /* This thread should not hold any pin. */ int i; for (i= 0; i < LF_PINBOX_PINS; i++) DBUG_ASSERT(pins->pin[i] == 0); } #endif /* DBUG_OFF */ /* XXX this will deadlock if other threads will wait for the caller to do something after _lf_pinbox_put_pins(), and they would have pinned addresses that the caller wants to free. Thus: only free pins when all work is done and nobody can wait for you!!! */ while (pins->purgatory_count) { _lf_pinbox_real_free(pins); if (pins->purgatory_count) { my_atomic_rwlock_wrunlock(&pins->pinbox->pinarray.lock); pthread_yield(); my_atomic_rwlock_wrlock(&pins->pinbox->pinarray.lock); } } top_ver= pinbox->pinstack_top_ver; do { pins->link= top_ver % LF_PINBOX_MAX_PINS; } while (!my_atomic_cas32((int32 volatile*) &pinbox->pinstack_top_ver, (int32*) &top_ver, top_ver-pins->link+nr+LF_PINBOX_MAX_PINS)); return; }