void abstraction_test_cas ( void) { unsigned int loop, cpu_count; thread_state_t *thread_handles; struct abstraction_test_cas_state *atcs; LFDS611_ALIGN (LFDS611_ALIGN_SINGLE_POINTER) volatile lfds611_atom_t shared_counter; lfds611_atom_t local_total = 0; // TRD : number_logical_processors can be any value in its range /* * TRD : here we test lfds611_abstraction_cas * * we run one thread per CPU * we use lfds611_abstraction_cas() to increment a shared counter * every time a thread successfully increments the counter, * it increments a thread local counter * the threads run for ten seconds * after the threads finish, we total the local counters * they should equal the shared counter */ internal_display_test_name ("Atomic CAS"); cpu_count = abstraction_cpu_count (); shared_counter = 0; LFDS611_BARRIER_STORE; atcs = malloc (sizeof (struct abstraction_test_cas_state) * cpu_count); for (loop = 0; loop < cpu_count; loop++) { (atcs + loop)->shared_counter = &shared_counter; (atcs + loop)->local_counter = 0; } thread_handles = malloc (sizeof (thread_state_t) * cpu_count); for (loop = 0; loop < cpu_count; loop++) abstraction_thread_start (&thread_handles[loop], loop, abstraction_test_internal_thread_cas, atcs + loop); for (loop = 0; loop < cpu_count; loop++) abstraction_thread_wait (thread_handles[loop]); free (thread_handles); // TRD : results for (loop = 0; loop < cpu_count; loop++) local_total += (atcs + loop)->local_counter; if (local_total == shared_counter) puts ("passed"); if (local_total != shared_counter) puts ("failed"); // TRD : cleanup free (atcs); return; }
void test_slist_delete_all_elements( void ) { struct lfds611_slist_state *ss; struct lfds611_slist_element *se = NULL; size_t element_count = 0; unsigned int loop; enum lfds611_data_structure_validity dvs = LFDS611_VALIDITY_VALID; /* TRD : this test creates a list of 100,000 elements then simply calls delete_all_elements() we then count the number of elements remaining should be zero :-) */ internal_display_test_name( "Delete all elements" ); lfds611_slist_new( &ss, NULL, NULL ); for( loop = 0 ; loop < 1000000 ; loop++ ) lfds611_slist_new_head( ss, NULL ); lfds611_slist_single_threaded_physically_delete_all_elements( ss ); while( NULL != lfds611_slist_get_head_and_then_next(ss, &se) ) element_count++; if( element_count != 0 ) dvs = LFDS611_VALIDITY_INVALID_TEST_DATA; lfds611_slist_delete( ss ); internal_display_test_result( 1, "slist", dvs ); return; }
void test_lfds700_queue_enqueuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t *per_thread_counters, loop, number_elements, number_logical_processors; struct lfds700_list_asu_element *lasue; struct lfds700_misc_prng_state ps; struct lfds700_queue_element dummy_qe, *qe; struct lfds700_queue_state qs; struct lfds700_misc_validation_info vi; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_element *te; struct test_state *ts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : create an empty queue then run one thread per CPU where each thread busy-works, enqueuing elements from a freelist (one local freelist per thread) until 100000 elements are enqueued, per thread each element's void pointer of user data is a struct containing thread number and element number where element_number is a thread-local counter starting at 0 when we're done, we check that all the elements are present and increment on a per-thread basis */ internal_display_test_name( "Enqueuing" ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_misc_prng_init( &ps ); number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors ); lfds700_queue_init_valid_on_current_logical_core( &qs, &dummy_qe, &ps, NULL ); ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->qs = &qs; (ts+loop)->thread_number = loop; (ts+loop)->number_elements = number_elements; } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_enqueuer, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; /* TRD : first, validate the queue then dequeue we expect to find element numbers increment on a per thread basis */ vi.min_elements = vi.max_elements = number_elements * number_logical_processors; lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs ); per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) *(per_thread_counters+loop) = 0; while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_queue_dequeue(&qs, &qe, &ps) ) { te = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe ); if( te->thread_number >= number_logical_processors ) { dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; break; } if( te->counter > per_thread_counters[te->thread_number] ) dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS; if( te->counter < per_thread_counters[te->thread_number] ) dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( te->counter == per_thread_counters[te->thread_number] ) per_thread_counters[te->thread_number]++; } free( per_thread_counters ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) util_aligned_free( (ts+loop)->te_array ); free( ts ); lfds700_queue_cleanup( &qs, NULL ); internal_display_test_result( 1, "queue", dvs ); return; }
void abstraction_test_dcas( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; struct abstraction_test_dcas_state *atds; LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) volatile lfds601_atom_t shared_counter[2] = { 0, 0 }; lfds601_atom_t local_total = 0; /* TRD : here we test lfds601_abstraction_dcas we run one thread per CPU we use lfds601_abstraction_dcas() to increment a shared counter every time a thread successfully increments the counter, it increments a thread local counter the threads run for ten seconds after the threads finish, we total the local counters they should equal the shared counter */ internal_display_test_name( "Atomic DCAS" ); cpu_count = abstraction_cpu_count(); atds = malloc( sizeof(struct abstraction_test_dcas_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (atds+loop)->shared_counter = shared_counter; (atds+loop)->local_counter = 0; } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_dcas, atds+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); // TRD : results for( loop = 0 ; loop < cpu_count ; loop++ ) local_total += (atds+loop)->local_counter; if( local_total == shared_counter[0] ) puts( "passed" ); if( local_total != shared_counter[0] ) puts( "failed" ); // TRD : cleanup free( atds ); return; }
void queue_test_rapid_enqueuing_and_dequeuing( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; struct lfds611_queue_state *qs; struct queue_test_rapid_enqueuing_and_dequeuing_state *qtreds; struct lfds611_validation_info vi = { 50000, 50000 }; lfds611_atom_t user_data, thread, count, *per_thread_counters; enum lfds611_data_structure_validity dvs[2]; internal_display_test_name( "Rapid enqueuing and dequeuing (10 seconds)" ); cpu_count = abstraction_cpu_count(); lfds611_queue_new( &qs, 100000 ); for( loop = 0 ; loop < 50000 ; loop++ ) lfds611_queue_enqueue( qs, NULL ); qtreds = malloc( sizeof(struct queue_test_rapid_enqueuing_and_dequeuing_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (qtreds+loop)->qs = qs; (qtreds+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8); } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_rapid_enqueuer_and_dequeuer, qtreds+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs ); // TRD : now check results per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) *(per_thread_counters+loop) = 0; while( dvs[0] == LFDS611_VALIDITY_VALID and dvs[1] == LFDS611_VALIDITY_VALID and lfds611_queue_dequeue(qs, (void *) &user_data) ) { thread = user_data >> (sizeof(lfds611_atom_t)*8-8); count = (user_data << 8) >> 8; if( thread >= cpu_count ) { dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA; break; } if( per_thread_counters[thread] == 0 ) per_thread_counters[thread] = count; if( count < per_thread_counters[thread] ) dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( count >= per_thread_counters[thread] ) per_thread_counters[thread] = count+1; } free( per_thread_counters ); free( qtreds ); lfds611_queue_delete( qs, NULL, NULL ); internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] ); return; }
void freelist_test_internal_pushing( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; enum data_structure_validity dvs; struct freelist_test_pushing_state *ftps; struct freelist_element *fe; struct freelist_state *fs, *cleanup_fs; struct freelist_test_counter_and_thread_number *cnt, *counter_and_number_trackers; struct validation_info vi = { 1000000, 1000000 }; /* TRD : we create an empty freelist, which we will push to we then create one freelist per CPU, where this freelist contains 1,000,000/cpu_count number of elements and each element is an incrementing counter and unique ID (from 0 to number of CPUs) we then start one thread per CPU, where each thread is given one of the populated freelists and pops from that to push to the empty freelist the reason for this is to achieve memory pre-allocation which allows the pushing threads to run at maximum speed the threads end when their freelists are empty we then fully pop the now populated main freelist (onto a second freelist, so we can cleanly free all memory), checking that the counts increment on a per unique ID basis and that the number of elements we pop equals 1,000,000 (since each element has an incrementing counter which is unique on a per unique ID basis, we can know we didn't lose any elements) */ internal_display_test_name( "Pushing" ); cpu_count = abstraction_cpu_count(); ftps = malloc( sizeof(struct freelist_test_pushing_state) * cpu_count ); freelist_new( &fs, 0, NULL, NULL ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (ftps+loop)->thread_number = (atom_t) loop; freelist_new( &(ftps+loop)->source_fs, 1000000 / cpu_count, freelist_test_internal_pushing_init, (void *) (atom_t) loop ); (ftps+loop)->fs = fs; } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_pushing, ftps+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); // TRD : now fully pop and verify the main freelist freelist_new( &cleanup_fs, 0, NULL, NULL ); counter_and_number_trackers = malloc( sizeof(struct freelist_test_counter_and_thread_number) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (counter_and_number_trackers+loop)->counter = (1000000 / cpu_count) * loop; (counter_and_number_trackers+loop)->thread_number = (atom_t) loop; } freelist_query( fs, FREELIST_QUERY_VALIDATE, &vi, (void *) &dvs ); while( dvs == VALIDITY_VALID and freelist_pop(fs, &fe) ) { static int count = 0; freelist_get_user_data_from_element( fe, (void **) &cnt ); if( cnt->counter != (counter_and_number_trackers+cnt->thread_number)->counter++ ) dvs = VALIDITY_INVALID_MISSING_ELEMENTS; freelist_push( cleanup_fs, fe ); count++; } // TRD : clean up free( counter_and_number_trackers ); for( loop = 0 ; loop < cpu_count ; loop++ ) freelist_delete( (ftps+loop)->source_fs, NULL, NULL ); free( ftps ); freelist_delete( cleanup_fs, freelist_test_internal_pushing_delete, NULL ); freelist_delete( fs, NULL, NULL ); // TRD : print the test result internal_display_test_result( 1, "freelist", dvs ); return; }
void test_lfds700_stack_popping( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t loop, number_elements, number_logical_processors; struct lfds700_misc_prng_state ps; struct lfds700_list_asu_element *lasue; struct lfds700_stack_state ss; struct lfds700_misc_validation_info vi = { 0, 0 }; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_element *te_array; struct test_state *ts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : we create a stack we then populate the stack with 1,000,000 elements each void pointer of data points to the containing test element we then run one thread per CPU where each thread loops, popping as quickly as possible upon popping, a flag is set in the containing test element the threads run till the source stack is empty we then check the poppged flag, all should be raised then tidy up no CAS+GC code, as we only pop */ internal_display_test_name( "Popping" ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_misc_prng_init( &ps ); lfds700_stack_init_valid_on_current_logical_core( &ss, NULL ); number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element) ; te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); for( loop = 0 ; loop < number_elements ; loop++ ) { (te_array+loop)->popped_flag = LOWERED; LFDS700_STACK_SET_VALUE_IN_ELEMENT( (te_array+loop)->se, te_array+loop ); lfds700_stack_push( &ss, &(te_array+loop)->se, &ps ); } ts = util_aligned_malloc( sizeof(struct test_state) * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) (ts+loop)->ss = &ss; thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_popping, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; lfds700_stack_query( &ss, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, (void *) &dvs ); // TRD : now we check each element has popped_flag set to RAISED for( loop = 0 ; loop < number_elements ; loop++ ) if( (te_array+loop)->popped_flag == LOWERED ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; // TRD : cleanup lfds700_stack_cleanup( &ss, NULL ); util_aligned_free( te_array ); util_aligned_free( ts ); // TRD : print the test result internal_display_test_result( 1, "stack", dvs ); return; }
void test_lfds700_btree_au_random_adds_overwrite_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t actual_sum_insert_existing_count, expected_sum_insert_existing_count, index = 0, *key_count_array, loop, number_elements, number_logical_processors, random_value, subloop; struct lfds700_list_asu_element *lasue; struct lfds700_btree_au_element *baue = NULL; struct lfds700_btree_au_state baus; struct lfds700_misc_prng_state ps; struct lfds700_misc_validation_info vi; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_state *ts; test_pal_thread_state_t *thread_handles; void *key; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : we create a single btree_au we generate 10k elements per thread (one per logical processor) in an array we set a random number in each element, which is the key random numbers are generated are from 0 to 5000, so we must have some duplicates (we don't use value, so we always pass in a NULL for that when we insert) each thread loops, adds those elements into the btree, and counts the total number of insert fails (we don't count on a per value basis because of the performance hit - we'll be TLBing all the time) this test has the btree_au set to overwrite on add, so duplicates should be eliminated we then merge the per-thread arrays we should find in the tree one of every value, and the sum of the counts of each value (beyond the first value, which was inserted) in the merged arrays should equal the sum of the existing_baues returned from each thread when they inserted and found an existing element we check the count of unique values in the merged array and use that when calling the btree_au validation function we in-order walk and check that what we have in the tree matches what we have in the merged array and then check the fail counts */ internal_display_test_name( "Random adds and walking (overwrite on existing key)" ); lfds700_misc_prng_init( &ps ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE, NULL ); ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->baus = &baus; (ts+loop)->element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); (ts+loop)->number_elements = number_elements; (ts+loop)->insert_existing_count = 0; for( subloop = 0 ; subloop < number_elements ; subloop++ ) { random_value = LFDS700_MISC_PRNG_GENERATE( &ps ); ((ts+loop)->element_array+subloop)->key = (lfds700_pal_uint_t) floor( (number_elements/2) * ((double) random_value / (double) LFDS700_MISC_PRNG_MAX) ); } } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_adding, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; /* TRD : now for validation make an array equal to number_elements, set all to 0 iterate over every per-thread array, counting the number of each value into this array so we can know how many elements ought to have failed to be inserted as well as being able to work out the actual number of elements which should be present in the btree, for the btree validation call */ key_count_array = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_elements ); for( loop = 0 ; loop < number_elements ; loop++ ) *(key_count_array+loop) = 0; for( loop = 0 ; loop < number_logical_processors ; loop++ ) for( subloop = 0 ; subloop < number_elements ; subloop++ ) ( *(key_count_array+( (ts+loop)->element_array+subloop)->key) )++; // TRD : first, btree validation function vi.min_elements = number_elements; for( loop = 0 ; loop < number_elements ; loop++ ) if( *(key_count_array+loop) == 0 ) vi.min_elements--; vi.max_elements = vi.min_elements; lfds700_btree_au_query( &baus, LFDS700_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) &dvs ); /* TRD : now check the sum of per-thread insert failures is what it should be, which is the sum of key_count_array, but with every count minus one (for the single succesful insert) and where elements of 0 are ignored (i.e. do not have -1 applied) */ expected_sum_insert_existing_count = 0; for( loop = 0 ; loop < number_elements ; loop++ ) if( *(key_count_array+loop) != 0 ) expected_sum_insert_existing_count += *(key_count_array+loop) - 1; actual_sum_insert_existing_count = 0; for( loop = 0 ; loop < number_logical_processors ; loop++ ) actual_sum_insert_existing_count += (ts+loop)->insert_existing_count; if( expected_sum_insert_existing_count != actual_sum_insert_existing_count ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; /* TRD : now compared the combined array and an in-order walk of the tree ignoring array elements with the value 0, we should find an exact match */ if( dvs == LFDS700_MISC_VALIDITY_VALID ) { // TRD : in-order walk over btree_au and check key_count_array matches while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position(&baus, &baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) ) { key = LFDS700_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue ); while( *(key_count_array+index) == 0 ) index++; if( index++ != (lfds700_pal_uint_t) key ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; } } // TRD : cleanup free( key_count_array ); lfds700_btree_au_cleanup( &baus, NULL ); // TRD : cleanup for( loop = 0 ; loop < number_logical_processors ; loop++ ) util_aligned_free( (ts+loop)->element_array ); free( ts ); // TRD : print the test result internal_display_test_result( 1, "btree_au", dvs ); return; }
void test_lfds700_queue_rapid_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t loop, number_elements_with_dummy_element, number_elements_without_dummy_element, number_logical_processors, *per_thread_counters; struct lfds700_list_asu_element *lasue; struct lfds700_misc_prng_state ps; struct lfds700_queue_element *qe; struct lfds700_misc_validation_info vi; struct lfds700_queue_state qs; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_element *te_array, *te; struct test_state *ts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : we create a single queue with 50,000 elements we don't want too many elements, so we ensure plenty of element re-use each thread simply loops dequeuing and enqueuing where the user data indicates thread number and an increment counter vertification is that the counter increments on a per-thread basis */ internal_display_test_name( "Rapid enqueuing and dequeuing (%d seconds)", TEST_DURATION_IN_SECONDS ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_misc_prng_init( &ps ); number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element); if( number_elements_with_dummy_element > (10000 * number_logical_processors) + 1 ) number_elements_with_dummy_element = (10000 * number_logical_processors) + 1; number_elements_without_dummy_element = number_elements_with_dummy_element - 1; vi.min_elements = number_elements_without_dummy_element; vi.max_elements = number_elements_without_dummy_element; te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); lfds700_queue_init_valid_on_current_logical_core( &qs, &(te_array+number_elements_without_dummy_element)->qe, &ps, NULL ); // TRD : we assume the test will iterate at least once (or we'll have a false negative) for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ ) { (te_array+loop)->thread_number = loop; (te_array+loop)->counter = 0; LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, te_array+loop ); lfds700_queue_enqueue( &qs, &(te_array+loop)->qe, &ps ); } ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->qs = &qs; (ts+loop)->thread_number = loop; (ts+loop)->counter = 0; } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_rapid_enqueuer_and_dequeuer, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs ); // TRD : now check results per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) *(per_thread_counters+loop) = 0; while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_queue_dequeue(&qs, &qe, &ps) ) { te = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe ); if( te->thread_number >= number_logical_processors ) { dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; break; } if( per_thread_counters[te->thread_number] == 0 ) per_thread_counters[te->thread_number] = te->counter; if( te->counter > per_thread_counters[te->thread_number] ) dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS; if( te->counter < per_thread_counters[te->thread_number] ) dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( te->counter == per_thread_counters[te->thread_number] ) per_thread_counters[te->thread_number]++; } free( per_thread_counters ); lfds700_queue_cleanup( &qs, NULL ); util_aligned_free( te_array ); free( ts ); internal_display_test_result( 1, "queue", dvs ); return; }
void test_lfds700_ringbuffer_writing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs[2] = { LFDS700_MISC_VALIDITY_VALID, LFDS700_MISC_VALIDITY_VALID }; lfds700_pal_uint_t loop, number_elements_with_dummy_element, number_elements_without_dummy_element, number_logical_processors, *per_thread_counters; test_pal_thread_state_t *thread_handles; struct lfds700_list_asu_element *lasue; struct lfds700_misc_prng_state ps; struct lfds700_ringbuffer_element *re_array; struct lfds700_ringbuffer_state rs; struct lfds700_misc_validation_info vi; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_element *te, *te_array; struct test_state *ts; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : we create a single ringbuffer with n elements we create n test elements which are thread_number/counter pairs init them to safe values and fully populate the ringbuffer we create one thread per CPU where each thread busy-works writing for ten seconds; each thread has one extra element which it uses for the first write and after that it uses the element it picks up from overwriting the user data in each written element is a combination of the thread number and the counter after the threads are complete, we validate by checking the user data counters increment on a per thread basis */ internal_display_test_name( "Writing (%d seconds)", TEST_DURATION_IN_SECONDS ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_misc_prng_init( &ps ); number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) + sizeof(struct lfds700_ringbuffer_element) ); number_elements_without_dummy_element = number_elements_with_dummy_element - 1; vi.min_elements = number_elements_without_dummy_element; vi.max_elements = number_elements_without_dummy_element; re_array = util_aligned_malloc( sizeof(struct lfds700_ringbuffer_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); lfds700_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements_with_dummy_element, &ps, NULL ); te_array = util_malloc_wrapper( sizeof(struct lfds700_ringbuffer_element) * number_elements_without_dummy_element ); // TRD : init the test elements and write them into the ringbuffer for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ ) { te_array[loop].thread_number = 0; te_array[loop].datum = 0; lfds700_ringbuffer_write( &rs, NULL, &te_array[loop], NULL, NULL, NULL, &ps ); } ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->rs = &rs; (ts+loop)->thread_number = loop; (ts+loop)->write_count = 0; } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_writer, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; // TRD : now check results per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) *(per_thread_counters+loop) = 0; lfds700_ringbuffer_query( &rs, LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs ); while( dvs[0] == LFDS700_MISC_VALIDITY_VALID and dvs[1] == LFDS700_MISC_VALIDITY_VALID and lfds700_ringbuffer_read(&rs, NULL, (void **) &te, &ps) ) { if( te->thread_number >= number_logical_processors ) { dvs[0] = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; break; } if( per_thread_counters[te->thread_number] == 0 ) per_thread_counters[te->thread_number] = te->datum; if( te->datum < per_thread_counters[te->thread_number] ) dvs[0] = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( te->datum >= per_thread_counters[te->thread_number] ) per_thread_counters[te->thread_number] = te->datum+1; } free( per_thread_counters ); lfds700_ringbuffer_cleanup( &rs, NULL ); free( ts ); util_aligned_free( re_array ); free( te_array ); internal_display_test_result( 2, "queue", dvs[0], "freelist", dvs[1] ); return; }
void test_lfds700_queue_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t loop, number_elements_with_dummy_element, number_elements_without_dummy_element, number_logical_processors; struct lfds700_list_asu_element *lasue; struct lfds700_misc_prng_state ps; struct lfds700_queue_state qs; struct lfds700_misc_validation_info vi = { 0, 0 }; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_element *te_array; struct test_state *ts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : create a queue, add 1,000,000 elements use a single thread to enqueue every element each elements user data is an incrementing counter then run one thread per CPU where each busy-works dequeuing when an element is dequeued, we check (on a per-thread basis) the value dequeued is greater than the element previously dequeued note we have no variation in the test for CAS+GC vs DWCAS this is because all we do is dequeue what we actually want to stress test is the queue not CAS so it's better to let the dequeue run as fast as possible */ internal_display_test_name( "Dequeuing" ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_misc_prng_init( &ps ); number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element); number_elements_without_dummy_element = number_elements_with_dummy_element - 1; te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); lfds700_queue_init_valid_on_current_logical_core( &qs, &(te_array + number_elements_without_dummy_element)->qe, &ps, NULL ); for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ ) { LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, loop ); lfds700_queue_enqueue( &qs, &(te_array+loop)->qe, &ps ); } ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->qs = &qs; (ts+loop)->error_flag = LOWERED; } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_dequeuer, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; // TRD : check queue is empty lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs ); // TRD : check for raised error flags for( loop = 0 ; loop < number_logical_processors ; loop++ ) if( (ts+loop)->error_flag == RAISED ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; free( ts ); util_aligned_free( te_array ); lfds700_queue_cleanup( &qs, NULL ); internal_display_test_result( 1, "queue", dvs ); return; }
void freelist_test_internal_rapid_popping_and_pushing( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; struct freelist_state *fs; struct validation_info vi; enum data_structure_validity dvs; /* TRD : in these tests there is a fundamental antagonism between how much checking/memory clean up that we do and the likelyhood of collisions between threads in their lock-free operations the lock-free operations are very quick; if we do anything much at all between operations, we greatly reduce the chance of threads colliding so we have some tests which do enough checking/clean up that they can tell the freelist is valid and don't leak memory and here, this test now is one of those which does minimal checking - in fact, the nature of the test is that you can't do any real checking - but goes very quickly what we do is create a small freelist and then run one thread per CPU, where each thread simply pops and then immediately pushes the test runs for ten seconds after the test is done, the only check we do is to traverse the freelist, checking for loops and ensuring the number of elements is correct */ internal_display_test_name( "Rapid popping and pushing (10 seconds)" ); cpu_count = abstraction_cpu_count(); freelist_new( &fs, cpu_count, NULL, NULL ); thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_rapid_popping_and_pushing, fs ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); vi.min_elements = cpu_count; vi.max_elements = cpu_count; freelist_query( fs, FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs ); freelist_delete( fs, NULL, NULL ); // TRD : print the test result internal_display_test_result( 1, "freelist", dvs ); return; }
void freelist_test_internal_popping_and_pushing( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; enum data_structure_validity dvs; struct freelist_state *fs; struct freelist_test_popping_and_pushing_state *pps; struct validation_info vi; /* TRD : we have two threads per CPU the threads loop for ten seconds the first thread pushes 100000 elements then pops 100000 elements the second thread pops 100000 elements then pushes 100000 elements all pushes and pops go onto the single main freelist after time is up, all threads push what they have remaining onto the main freelist we then validate the main freelist */ internal_display_test_name( "Popping and pushing (10 seconds)" ); cpu_count = abstraction_cpu_count(); freelist_new( &fs, 100000 * cpu_count, NULL, NULL ); pps = malloc( sizeof(struct freelist_test_popping_and_pushing_state) * cpu_count * 2 ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (pps+loop)->fs = fs; freelist_new( &(pps+loop)->local_fs, 0, NULL, NULL ); (pps+loop+cpu_count)->fs = fs; freelist_new( &(pps+loop+cpu_count)->local_fs, 100000, NULL, NULL ); } thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 ); for( loop = 0 ; loop < cpu_count ; loop++ ) { abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping_and_pushing_start_popping, pps+loop ); abstraction_thread_start( &thread_handles[loop+cpu_count], loop, freelist_test_internal_thread_popping_and_pushing_start_pushing, pps+loop+cpu_count ); } for( loop = 0 ; loop < cpu_count * 2 ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); for( loop = 0 ; loop < cpu_count * 2 ; loop++ ) freelist_delete( (pps+loop)->local_fs, NULL, NULL ); free( pps ); vi.min_elements = vi.max_elements = 100000 * cpu_count * 2; freelist_query( fs, FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs ); freelist_delete( fs, NULL, NULL ); // TRD : print the test result internal_display_test_result( 1, "freelist", dvs ); return; }
void freelist_test_internal_popping( void ) { unsigned int loop, cpu_count, count; thread_state_t *thread_handles; enum data_structure_validity dvs = VALIDITY_VALID; struct freelist_state *fs; struct freelist_element *fe; struct freelist_test_popping_state *ftps; unsigned int *found_count; /* TRD : we create a freelist with 1,000,000 elements the creation function runs in a single thread and creates and pushes those elements onto the freelist each element contains a void pointer which is its element number we then run one thread per CPU where each thread loops, popping as quickly as possible each popped element is pushed onto a thread-local freelist the threads run till the source freelist is empty we then check the thread-local freelists we should find we have every element then tidy up */ internal_display_test_name( "Popping" ); cpu_count = abstraction_cpu_count(); freelist_new( &fs, 1000000, freelist_test_internal_popping_init, NULL ); ftps = malloc( sizeof(struct freelist_test_popping_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (ftps+loop)->fs = fs; freelist_new( &(ftps+loop)->fs_thread_local, 0, NULL, NULL ); } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping, ftps+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); // TRD : now we check the thread-local freelists found_count = malloc( sizeof(unsigned int) * 1000000 ); for( loop = 0 ; loop < 1000000 ; loop++ ) *(found_count+loop) = 0; for( loop = 0 ; loop < cpu_count ; loop++ ) { while( freelist_pop((ftps+loop)->fs_thread_local, &fe) ) { freelist_get_user_data_from_element( fe, (void **) &count ); (*(found_count+count))++; freelist_push( fs, fe ); } } for( loop = 0 ; loop < 1000000 and dvs == VALIDITY_VALID ; loop++ ) { if( *(found_count+loop) == 0 ) dvs = VALIDITY_INVALID_MISSING_ELEMENTS; if( *(found_count+loop) > 1 ) dvs = VALIDITY_INVALID_ADDITIONAL_ELEMENTS; } // TRD : cleanup free( found_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) freelist_delete( (ftps+loop)->fs_thread_local, NULL, NULL ); freelist_delete( fs, NULL, NULL ); // TRD : print the test result internal_display_test_result( 1, "freelist", dvs ); return; }
void test_slist_get_set_user_data( void ) { unsigned int loop, cpu_count; struct lfds611_slist_state *ss; struct lfds611_slist_element *se = NULL; struct slist_test_state *sts; thread_state_t *thread_handles; lfds611_atom_t thread_and_count, thread, count, *per_thread_counters, *per_thread_drop_flags; enum lfds611_data_structure_validity dvs = LFDS611_VALIDITY_VALID; /* TRD : create a list of (cpu_count*10) elements, user data 0 one thread per CPU each thread loops, setting user_data to ((thread_number << (sizeof(lfds611_atom_t)*8-8)) | count) validation is to scan list, count on a per thread basis should go down only once */ internal_display_test_name( "Get and set user data" ); cpu_count = abstraction_cpu_count(); lfds611_slist_new( &ss, NULL, NULL ); for( loop = 0 ; loop < cpu_count * 10 ; loop++ ) lfds611_slist_new_head( ss, NULL ); sts = malloc( sizeof(struct slist_test_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (sts+loop)->ss = ss; (sts+loop)->thread_and_count = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8); } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, slist_test_internal_thread_get_set_user_data, sts+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); // now validate per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count ); per_thread_drop_flags = malloc( sizeof(lfds611_atom_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { *(per_thread_counters+loop) = 0; *(per_thread_drop_flags+loop) = 0; } while( dvs == LFDS611_VALIDITY_VALID and NULL != lfds611_slist_get_head_and_then_next(ss, &se) ) { lfds611_slist_get_user_data_from_element( se, (void **) &thread_and_count ); thread = thread_and_count >> (sizeof(lfds611_atom_t)*8-8); count = (thread_and_count << 8) >> 8; if( thread >= cpu_count ) { dvs = LFDS611_VALIDITY_INVALID_TEST_DATA; break; } if( per_thread_counters[thread] == 0 ) { per_thread_counters[thread] = count; continue; } per_thread_counters[thread]++; if( count < per_thread_counters[thread] and per_thread_drop_flags[thread] == 1 ) { dvs = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; break; } if( count < per_thread_counters[thread] and per_thread_drop_flags[thread] == 0 ) { per_thread_drop_flags[thread] = 1; per_thread_counters[thread] = count; continue; } if( count < per_thread_counters[thread] ) dvs = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( count >= per_thread_counters[thread] ) per_thread_counters[thread] = count; } free( per_thread_drop_flags ); free( per_thread_counters ); free( sts ); lfds611_slist_delete( ss ); internal_display_test_result( 1, "slist", dvs ); return; }
void test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free( struct lfds700_list_asu_state *list_of_logical_processors ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t loop, number_logical_processors; struct lfds700_list_asu_element *lasue; struct lfds700_misc_prng_state ps; struct lfds700_queue_element *qe; struct lfds700_queue_state qs; struct lfds700_misc_validation_info vi; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; struct test_state *ts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : qt can be any value in its range /* TRD : one thread per logical core each thread loops for ten seconds mallocs and enqueues 1k elements, then dequeues and frees 1k elements */ internal_display_test_name( "Enqueuing with malloc dequeuing with free (%d seconds)", TEST_DURATION_IN_SECONDS ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_misc_prng_init( &ps ); qe = util_aligned_malloc( sizeof(struct lfds700_queue_element), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); lfds700_queue_init_valid_on_current_logical_core( &qs, qe, &ps, NULL ); ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) (ts+loop)->qs = &qs; thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_enqueuer_with_malloc_and_dequeuer_with_free, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); LFDS700_MISC_BARRIER_LOAD; vi.min_elements = vi.max_elements = 0; lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs ); free( ts ); lfds700_queue_cleanup( &qs, queue_element_cleanup_callback ); internal_display_test_result( 1, "queue", dvs ); return; }
void test_slist_new_delete_get( void ) { unsigned int loop, cpu_count; struct lfds611_slist_state *ss; struct lfds611_slist_element *se = NULL; struct slist_test_state *sts; thread_state_t *thread_handles; size_t total_create_count = 0, total_delete_count = 0, element_count = 0; enum lfds611_data_structure_validity dvs = LFDS611_VALIDITY_VALID; /* TRD : two threads per CPU first simply alternates between new_head() and new_next() (next on element created by head) second calls get_next, if NULL, then calls get_head, and deletes the element both threads keep count of created and deleted validate is to reconcile created, deleted and remaining in list */ internal_display_test_name( "New head/next, delete and get next" ); cpu_count = abstraction_cpu_count(); lfds611_slist_new( &ss, NULL, NULL ); sts = malloc( sizeof(struct slist_test_state) * cpu_count * 2 ); for( loop = 0 ; loop < cpu_count * 2 ; loop++ ) { (sts+loop)->ss = ss; (sts+loop)->create_count = 0; (sts+loop)->delete_count = 0; } thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 ); for( loop = 0 ; loop < cpu_count ; loop++ ) { abstraction_thread_start( &thread_handles[loop], loop, slist_test_internal_thread_new_delete_get_new_head_and_next, sts+loop ); abstraction_thread_start( &thread_handles[loop+cpu_count], loop, slist_test_internal_thread_new_delete_get_delete_and_get, sts+loop+cpu_count ); } for( loop = 0 ; loop < cpu_count * 2 ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); // TRD : now validate for( loop = 0 ; loop < cpu_count * 2 ; loop++ ) { total_create_count += (sts+loop)->create_count; total_delete_count += (sts+loop)->delete_count; } while( NULL != lfds611_slist_get_head_and_then_next(ss, &se) ) element_count++; if( total_create_count - total_delete_count - element_count != 0 ) dvs = LFDS611_VALIDITY_INVALID_TEST_DATA; free( sts ); lfds611_slist_delete( ss ); internal_display_test_result( 1, "slist", dvs ); return; }
void queue_test_dequeuing( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; struct lfds611_queue_state *qs; struct queue_test_dequeuing_state *qtds; struct lfds611_validation_info vi = { 0, 0 }; enum lfds611_data_structure_validity dvs[2]; /* TRD : create a queue with 1,000,000 elements use a single thread to enqueue every element each elements user data is an incrementing counter then run one thread per CPU where each busy-works dequeuing when an element is dequeued, we check (on a per-thread basis) the value deqeued is greater than the element previously dequeued */ internal_display_test_name( "Dequeuing" ); cpu_count = abstraction_cpu_count(); lfds611_queue_new( &qs, 1000000 ); for( loop = 0 ; loop < 1000000 ; loop++ ) lfds611_queue_enqueue( qs, (void *) (lfds611_atom_t) loop ); qtds = malloc( sizeof(struct queue_test_dequeuing_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (qtds+loop)->qs = qs; (qtds+loop)->error_flag = LOWERED; } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_dequeuer, qtds+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); // TRD : check queue is empty lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs ); // TRD : check for raised error flags for( loop = 0 ; loop < cpu_count ; loop++ ) if( (qtds+loop)->error_flag == RAISED ) dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA; free( qtds ); lfds611_queue_delete( qs, NULL, NULL ); internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] ); return; }
void test_lfds700_list_aos_new_ordered( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t loop, expected_element_number, number_elements_per_thread, number_elements_total, number_logical_processors, offset, temp; struct lfds700_list_aos_element *laose = NULL; struct lfds700_list_asu_element *lasue = NULL; struct lfds700_list_aos_state laoss; struct lfds700_misc_prng_state ps; struct lfds700_misc_validation_info vi; struct test_pal_logical_processor *lp; struct test_element *element_array, *element; struct test_state *ts; struct util_thread_starter_state *tts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : run one thread per logical processor we have a single array of 10k elements per thread this is set to be randomly ordered (but with contigious numbers from 0 to n) we give 10k to each thread (a pointer into the array at the correct point) which then loops through that array calling lfds700_list_aos_insert_element_by_position( LFDS700_LIST_AOS_POSITION_ORDERED ) verification should show list is sorted */ internal_display_test_name( "New ordered" ); lfds700_misc_prng_init( &ps ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_list_aos_init_valid_on_current_logical_core( &laoss, new_ordered_compare_function, LFDS700_LIST_AOS_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL ); /* TRD : create randomly ordered number array with unique elements unique isn't necessary - the list will sort anyway - but it permits slightly better validation */ number_elements_per_thread = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors ); // TRD : or the test takes a looooooong time... if( number_elements_per_thread > 10000 ) number_elements_per_thread = 10000; number_elements_total = number_elements_per_thread * number_logical_processors; element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_total, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); for( loop = 0 ; loop < number_elements_total ; loop++ ) (element_array+loop)->element_number = loop; for( loop = 0 ; loop < number_elements_total ; loop++ ) { offset = LFDS700_MISC_PRNG_GENERATE( &ps ); offset %= number_elements_total; temp = (element_array + offset)->element_number; (element_array + offset)->element_number = (element_array + loop)->element_number; (element_array + loop)->element_number = temp; } ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->laoss = &laoss; (ts+loop)->element_array = element_array + (loop*number_elements_per_thread); (ts+loop)->number_elements_per_thread = number_elements_per_thread; } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_ordered_thread, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); free( ts ); /* TRD : validate the resultant list iterate over the list we expect to find the list is sorted, which means that element_number will increment from zero */ LFDS700_MISC_BARRIER_LOAD; vi.min_elements = vi.max_elements = number_elements_total; lfds700_list_aos_query( &laoss, LFDS700_LIST_AOS_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs ); if( dvs == LFDS700_MISC_VALIDITY_VALID ) { expected_element_number = 0; // TRD : traverse the list and check combined_data_array matches while( dvs == LFDS700_MISC_VALIDITY_VALID and LFDS700_LIST_AOS_GET_START_AND_THEN_NEXT(laoss, laose) ) { element = LFDS700_LIST_AOS_GET_VALUE_FROM_ELEMENT( *laose ); if( element->element_number != expected_element_number++ ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; } } lfds700_list_aos_cleanup( &laoss, NULL ); util_aligned_free( element_array ); internal_display_test_result( 1, "list_aos", dvs ); return; }
void queue_test_enqueuing( void ) { unsigned int loop, cpu_count; thread_state_t *thread_handles; struct lfds611_queue_state *qs; struct queue_test_enqueuing_state *qtes; lfds611_atom_t user_data, thread, count, *per_thread_counters; struct lfds611_validation_info vi = { 1000000, 1000000 }; enum lfds611_data_structure_validity dvs[2]; /* TRD : create an empty queue with 1,000,000 elements in its freelist then run one thread per CPU where each thread busy-works, enqueuing elements (until there are no more elements) each element's void pointer of user data is (thread number | element number) where element_number is a thread-local counter starting at 0 where the thread_number occupies the top byte when we're done, we check that all the elements are present and increment on a per-thread basis */ internal_display_test_name( "Enqueuing" ); cpu_count = abstraction_cpu_count(); lfds611_queue_new( &qs, 1000000 ); qtes = malloc( sizeof(struct queue_test_enqueuing_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (qtes+loop)->qs = qs; (qtes+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8); } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_enqueuer, qtes+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); free( qtes ); /* TRD : first, validate the queue then dequeue we expect to find element numbers increment on a per thread basis */ lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, &vi, dvs ); per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) *(per_thread_counters+loop) = 0; while( dvs[0] == LFDS611_VALIDITY_VALID and dvs[1] == LFDS611_VALIDITY_VALID and lfds611_queue_dequeue(qs, (void *) &user_data) ) { thread = user_data >> (sizeof(lfds611_atom_t)*8-8); count = (user_data << 8) >> 8; if( thread >= cpu_count ) { dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA; break; } if( count < per_thread_counters[thread] ) dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( count > per_thread_counters[thread] ) dvs[0] = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS; if( count == per_thread_counters[thread] ) per_thread_counters[thread]++; } free( per_thread_counters ); lfds611_queue_delete( qs, NULL, NULL ); internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] ); return; }
void abstraction_test_increment ( void) { unsigned int loop, cpu_count; thread_state_t *thread_handles; LFDS611_ALIGN (LFDS611_ALIGN_SINGLE_POINTER) volatile lfds611_atom_t shared_counter, atomic_shared_counter; /* * TRD : here we test lfds611_abstraction_increment * * first, we run one thread per CPU where each thread increments * a shared counter 10,000,000 times - however, this first test * does NOT use atomic increment; it uses "++" * * second, we repeat the exercise, but this time using * lfds611_abstraction_increment() * * if the final value in the first test is less than (10,000,000*cpu_count) * then the system is sensitive to non-atomic increments; this means if * our atomic version of the test passes, we can have some degree of confidence * that it works * * if the final value in the first test is in fact correct, then we can't know * that our atomic version has changed anything * * and of course if the final value in the atomic test is wrong, we know things * are broken */ internal_display_test_name ("Atomic increment"); cpu_count = abstraction_cpu_count (); shared_counter = 0; atomic_shared_counter = 0; LFDS611_BARRIER_STORE; thread_handles = malloc (sizeof (thread_state_t) * cpu_count); // TRD : non-atomic for (loop = 0; loop < cpu_count; loop++) abstraction_thread_start (&thread_handles[loop], loop, abstraction_test_internal_thread_increment, (void *)&shared_counter); for (loop = 0; loop < cpu_count; loop++) abstraction_thread_wait (thread_handles[loop]); // TRD : atomic for (loop = 0; loop < cpu_count; loop++) abstraction_thread_start (&thread_handles[loop], loop, abstraction_test_internal_thread_atomic_increment, (void *)&atomic_shared_counter); for (loop = 0; loop < cpu_count; loop++) abstraction_thread_wait (thread_handles[loop]); free (thread_handles); // TRD : results if (shared_counter < (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count)) puts ("passed"); if (shared_counter == (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count)) puts ("indeterminate"); if (atomic_shared_counter < (10000000 * cpu_count)) puts ("failed"); return; }
void queue_test_enqueuing_and_dequeuing( void ) { unsigned int loop, subloop, cpu_count; thread_state_t *thread_handles; struct lfds611_queue_state *qs; struct queue_test_enqueuing_and_dequeuing_state *qteds; struct lfds611_validation_info vi = { 0, 0 }; enum lfds611_data_structure_validity dvs[2]; internal_display_test_name( "Enqueuing and dequeuing (10 seconds)" ); cpu_count = abstraction_cpu_count(); lfds611_queue_new( &qs, cpu_count ); qteds = malloc( sizeof(struct queue_test_enqueuing_and_dequeuing_state) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) { (qteds+loop)->qs = qs; (qteds+loop)->thread_number = loop; (qteds+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8); (qteds+loop)->cpu_count = cpu_count; (qteds+loop)->error_flag = LOWERED; (qteds+loop)->per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count ); for( subloop = 0 ; subloop < cpu_count ; subloop++ ) *((qteds+loop)->per_thread_counters+subloop) = 0; } thread_handles = malloc( sizeof(thread_state_t) * cpu_count ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_enqueuer_and_dequeuer, qteds+loop ); for( loop = 0 ; loop < cpu_count ; loop++ ) abstraction_thread_wait( thread_handles[loop] ); free( thread_handles ); lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs ); for( loop = 0 ; loop < cpu_count ; loop++ ) if( (qteds+loop)->error_flag == RAISED ) dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA; for( loop = 0 ; loop < cpu_count ; loop++ ) free( (qteds+loop)->per_thread_counters ); free( qteds ); lfds611_queue_delete( qs, NULL, NULL ); internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] ); return; }
void test_lfds700_list_asu_new_start( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes ) { enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; lfds700_pal_uint_t loop, number_elements, number_logical_processors, *per_thread_counters, subloop; struct lfds700_list_asu_element *lasue = NULL; struct lfds700_list_asu_state lasus; struct lfds700_misc_validation_info vi; struct test_element *element_array, *element; struct test_state *ts; struct test_pal_logical_processor *lp; struct util_thread_starter_state *tts; test_pal_thread_state_t *thread_handles; assert( list_of_logical_processors != NULL ); // TRD : memory_in_megabytes can be any value in its range /* TRD : run one thread per logical processor run for 250k elements each thread loops, calling lfds700_list_asu_new_element_by_position( LFDS700_LIST_ASU_POSITION_START ) data element contain s thread_number and element_number verification should show element_number decreasing on a per thread basis */ internal_display_test_name( "New start" ); lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors ); lfds700_list_asu_init_valid_on_current_logical_core( &lasus, NULL, NULL ); number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors ); element_array = util_aligned_malloc( sizeof(struct test_element) * number_logical_processors * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) for( subloop = 0 ; subloop < number_elements ; subloop++ ) { (element_array+(loop*number_elements)+subloop)->thread_number = loop; (element_array+(loop*number_elements)+subloop)->element_number = subloop; } ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) { (ts+loop)->lasus = &lasus; (ts+loop)->element_array = element_array + (loop*number_elements); (ts+loop)->number_elements = number_elements; } thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors ); util_thread_starter_new( &tts, number_logical_processors ); LFDS700_MISC_BARRIER_STORE; lfds700_misc_force_store(); loop = 0; lasue = NULL; while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) ) { lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_start_thread, ts+loop ); loop++; } util_thread_starter_run( tts ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) test_pal_thread_wait( thread_handles[loop] ); util_thread_starter_delete( tts ); free( thread_handles ); free( ts ); LFDS700_MISC_BARRIER_LOAD; /* TRD : validate the resultant list iterate over each element we expect to find element numbers increment on a per thread basis */ vi.min_elements = vi.max_elements = number_elements * number_logical_processors; lfds700_list_asu_query( &lasus, LFDS700_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs ); per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors ); for( loop = 0 ; loop < number_logical_processors ; loop++ ) *(per_thread_counters+loop) = number_elements - 1; lasue = NULL; while( dvs == LFDS700_MISC_VALIDITY_VALID and LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(lasus, lasue) ) { element = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue ); if( element->thread_number >= number_logical_processors ) { dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; break; } if( element->element_number < per_thread_counters[element->thread_number] ) dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS; if( element->element_number > per_thread_counters[element->thread_number] ) dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS; if( element->element_number == per_thread_counters[element->thread_number] ) per_thread_counters[element->thread_number]--; } free( per_thread_counters ); lfds700_list_asu_cleanup( &lasus, NULL ); util_aligned_free( element_array ); internal_display_test_result( 1, "list_asu", dvs ); return; }
void test_lfds700_hash_a_fail_and_overwrite_on_existing_key() { enum lfds700_hash_a_insert_result apr; enum lfds700_misc_validity dvs = LFDS700_MISC_VALIDITY_VALID; struct lfds700_hash_a_element hae_one, hae_two; struct lfds700_hash_a_state has; struct lfds700_btree_au_state *baus; struct lfds700_misc_prng_state ps; internal_display_test_name( "Fail and overwrite on existing key" ); lfds700_misc_prng_init( &ps ); baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 10, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES ); // TRD : fail on overwrite lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_FAIL, NULL ); LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 ); LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 0 ); apr = lfds700_hash_a_insert( &has, &hae_one, NULL, &ps ); if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 ); LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 ); apr = lfds700_hash_a_insert( &has, &hae_two, NULL, &ps ); if( apr != LFDS700_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; lfds700_hash_a_cleanup( &has, NULL ); // TRD : success on overwrite lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_OVERWRITE, NULL ); LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 ); LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 1 ); apr = lfds700_hash_a_insert( &has, &hae_one, NULL, &ps ); if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 ); LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 ); apr = lfds700_hash_a_insert( &has, &hae_two, NULL, &ps ); if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE ) dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA; lfds700_hash_a_cleanup( &has, NULL ); util_aligned_free( baus ); // TRD : print the test result internal_display_test_result( 1, "hash_a", dvs ); return; }