void lf_dynarray_destroy(LF_DYNARRAY *array) { int i; for (i= 0; i < LF_DYNARRAY_LEVELS; i++) recursive_free(array->level[i], i); my_atomic_rwlock_destroy(&array->lock); }
void do_tests() { plan(4); bad= my_atomic_initialize(); ok(!bad, "my_atomic_initialize() returned %d", bad); my_atomic_rwlock_init(&rwl); b32= c32= 0; test_concurrently("my_atomic_add32", test_atomic_add, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_fas32", test_atomic_fas, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES); my_atomic_rwlock_destroy(&rwl); }
void do_tests() { plan(6); bad= my_atomic_initialize(); ok(!bad, "my_atomic_initialize() returned %d", bad); my_atomic_rwlock_init(&rwl); b32= c32= 0; test_concurrently("my_atomic_add32", test_atomic_add, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_fas32", test_atomic_fas, THREADS, CYCLES); b32= c32= 0; test_concurrently("my_atomic_cas32", test_atomic_cas, THREADS, CYCLES); { /* If b is not volatile, the wrong assembly code is generated on OSX Lion as the variable is optimized away as a constant. See Bug#62533 / Bug#13030056. Another workaround is to specify architecture explicitly using e.g. CFLAGS/CXXFLAGS= "-m64". */ volatile int64 b=0x1000200030004000LL; a64=0; my_atomic_add64(&a64, b); ok(a64==b, "add64"); } a64=0; test_concurrently("my_atomic_add64", test_atomic_add64, THREADS, CYCLES); my_atomic_rwlock_destroy(&rwl); /* workaround until we know why it crashes randomly on some machine (BUG#22320). */ sleep(2); }