Esempio n. 1
0
void queue_test_dequeuing( void )
{
  unsigned int
    loop,
    cpu_count;

  thread_state_t
    *thread_handles;

  struct lfds601_queue_state
    *qs;

  struct queue_test_dequeuing_state
    *qtds;

  struct lfds601_validation_info
    vi = { 0, 0 };

  enum lfds601_data_structure_validity
    dvs[2];

  /* TRD : create a queue with 1,000,000 elements

           use a single thread to enqueue every element
           each elements user data is an incrementing counter

           then run one thread per CPU
           where each busy-works dequeuing

           when an element is dequeued, we check (on a per-thread basis) the
           value deqeued is greater than the element previously dequeued
  */

  internal_display_test_name( "Dequeuing" );

  cpu_count = abstraction_cpu_count();

  lfds601_queue_new( &qs, 1000000 );

  for( loop = 0 ; loop < 1000000 ; loop++ )
    lfds601_queue_enqueue( qs, (void *) (lfds601_atom_t) loop );

  qtds = malloc( sizeof(struct queue_test_dequeuing_state) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    (qtds+loop)->qs = qs;
    (qtds+loop)->error_flag = LOWERED;
  }

  thread_handles = malloc( sizeof(thread_state_t) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_dequeuer, qtds+loop );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_wait( thread_handles[loop] );

  free( thread_handles );

  // TRD : check queue is empty
  lfds601_queue_query( qs, LFDS601_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );

  // TRD : check for raised error flags
  for( loop = 0 ; loop < cpu_count ; loop++ )
    if( (qtds+loop)->error_flag == RAISED )
      dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;

  free( qtds );

  lfds601_queue_delete( qs, NULL, NULL );

  internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );

  return;
}
Esempio n. 2
0
void queue_test_enqueuing( void )
{
  unsigned int
    loop,
    cpu_count;

  thread_state_t
    *thread_handles;

  struct lfds601_queue_state
    *qs;

  struct queue_test_enqueuing_state
    *qtes;

  lfds601_atom_t
    user_data,
    thread,
    count,
    *per_thread_counters;

  struct lfds601_validation_info
    vi = { 1000000, 1000000 };

  enum lfds601_data_structure_validity
    dvs[2];

  /* TRD : create an empty queue with 1,000,000 elements in its freelist
           then run one thread per CPU
           where each thread busy-works, enqueuing elements (until there are no more elements)
           each element's void pointer of user data is (thread number | element number)
           where element_number is a thread-local counter starting at 0
           where the thread_number occupies the top byte

           when we're done, we check that all the elements are present
           and increment on a per-thread basis
  */

  internal_display_test_name( "Enqueuing" );

  cpu_count = abstraction_cpu_count();

  lfds601_queue_new( &qs, 1000000 );

  qtes = malloc( sizeof(struct queue_test_enqueuing_state) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    (qtes+loop)->qs = qs;
    (qtes+loop)->counter = (lfds601_atom_t) loop << (sizeof(lfds601_atom_t)*8-8);
  }

  thread_handles = malloc( sizeof(thread_state_t) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_enqueuer, qtes+loop );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_wait( thread_handles[loop] );

  free( thread_handles );

  free( qtes );

  /* TRD : first, validate the queue

           then dequeue
           we expect to find element numbers increment on a per thread basis
  */

  lfds601_queue_query( qs, LFDS601_QUEUE_QUERY_VALIDATE, &vi, dvs );

  per_thread_counters = malloc( sizeof(lfds601_atom_t) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    *(per_thread_counters+loop) = 0;

  while( dvs[0] == LFDS601_VALIDITY_VALID and dvs[1] == LFDS601_VALIDITY_VALID and lfds601_queue_dequeue(qs, (void *) &user_data) )
  {
    thread = user_data >> (sizeof(lfds601_atom_t)*8-8);
    count = (user_data << 8) >> 8;

    if( thread >= cpu_count )
    {
      dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;
      break;
    }

    if( count < per_thread_counters[thread] )
      dvs[0] = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;

    if( count > per_thread_counters[thread] )
      dvs[0] = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;

    if( count == per_thread_counters[thread] )
      per_thread_counters[thread]++;
  }

  free( per_thread_counters );

  lfds601_queue_delete( qs, NULL, NULL );

  internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );

  return;
}
void test_lfds700_ringbuffer_writing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
{
  enum lfds700_misc_validity
    dvs[2] = { LFDS700_MISC_VALIDITY_VALID, LFDS700_MISC_VALIDITY_VALID };

  lfds700_pal_uint_t
    loop,
    number_elements_with_dummy_element,
    number_elements_without_dummy_element,
    number_logical_processors,
    *per_thread_counters;

  test_pal_thread_state_t
    *thread_handles;

  struct lfds700_list_asu_element
    *lasue;

  struct lfds700_misc_prng_state
    ps;

  struct lfds700_ringbuffer_element
    *re_array;

  struct lfds700_ringbuffer_state
    rs;

  struct lfds700_misc_validation_info
    vi;

  struct test_pal_logical_processor
    *lp;

  struct util_thread_starter_state
    *tts;

  struct test_element
    *te,
    *te_array;

  struct test_state
    *ts;

  assert( list_of_logical_processors != NULL );
  // TRD : memory_in_megabytes can be any value in its range

  /* TRD : we create a single ringbuffer
           with n elements
           we create n test elements
           which are thread_number/counter pairs
           init them to safe values
           and fully populate the ringbuffer

           we create one thread per CPU
           where each thread busy-works writing
           for ten seconds; each thread has one extra element
           which it uses for the first write and after that
           it uses the element it picks up from overwriting

           the user data in each written element is a combination
           of the thread number and the counter

           after the threads are complete, we validate by
           checking the user data counters increment on a per thread
           basis
  */

  internal_display_test_name( "Writing (%d seconds)", TEST_DURATION_IN_SECONDS );

  lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );

  lfds700_misc_prng_init( &ps );

  number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) + sizeof(struct lfds700_ringbuffer_element) );
  number_elements_without_dummy_element = number_elements_with_dummy_element - 1;

  vi.min_elements = number_elements_without_dummy_element;
  vi.max_elements = number_elements_without_dummy_element;

  re_array = util_aligned_malloc( sizeof(struct lfds700_ringbuffer_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );

  lfds700_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements_with_dummy_element, &ps, NULL );

  te_array = util_malloc_wrapper( sizeof(struct lfds700_ringbuffer_element) * number_elements_without_dummy_element );

  // TRD : init the test elements and write them into the ringbuffer
  for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ )
  {
    te_array[loop].thread_number = 0;
    te_array[loop].datum = 0;
    lfds700_ringbuffer_write( &rs, NULL, &te_array[loop], NULL, NULL, NULL, &ps );
  }

  ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );

  for( loop = 0 ; loop < number_logical_processors ; loop++ )
  {
    (ts+loop)->rs = &rs;
    (ts+loop)->thread_number = loop;
    (ts+loop)->write_count = 0;
  }

  thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );

  util_thread_starter_new( &tts, number_logical_processors );

  LFDS700_MISC_BARRIER_STORE;

  lfds700_misc_force_store();

  loop = 0;
  lasue = NULL;

  while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
  {
    lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
    util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_writer, ts+loop );
    loop++;
  }

  util_thread_starter_run( tts );

  for( loop = 0 ; loop < number_logical_processors ; loop++ )
    test_pal_thread_wait( thread_handles[loop] );

  util_thread_starter_delete( tts );

  free( thread_handles );

  LFDS700_MISC_BARRIER_LOAD;

  // TRD : now check results
  per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );

  for( loop = 0 ; loop < number_logical_processors ; loop++ )
    *(per_thread_counters+loop) = 0;

  lfds700_ringbuffer_query( &rs, LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );

  while( dvs[0] == LFDS700_MISC_VALIDITY_VALID and dvs[1] == LFDS700_MISC_VALIDITY_VALID and lfds700_ringbuffer_read(&rs, NULL, (void **) &te, &ps) )
  {
    if( te->thread_number >= number_logical_processors )
    {
      dvs[0] = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
      break;
    }

    if( per_thread_counters[te->thread_number] == 0 )
      per_thread_counters[te->thread_number] = te->datum;

    if( te->datum < per_thread_counters[te->thread_number] )
      dvs[0] = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;

    if( te->datum >= per_thread_counters[te->thread_number] )
      per_thread_counters[te->thread_number] = te->datum+1;
  }

  free( per_thread_counters );

  lfds700_ringbuffer_cleanup( &rs, NULL );

  free( ts );

  util_aligned_free( re_array );

  free( te_array );

  internal_display_test_result( 2, "queue", dvs[0], "freelist", dvs[1] );

  return;
}
void test_lfds700_freelist_popping( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
{
  enum lfds700_misc_validity
    dvs = LFDS700_MISC_VALIDITY_VALID;

  lfds700_pal_uint_t
    loop,
    number_elements,
    number_logical_processors;

  struct lfds700_list_asu_element
    *lasue;

  struct lfds700_freelist_state
    fs;

  struct lfds700_misc_prng_state
    ps;

  struct lfds700_misc_validation_info
    vi = { 0, 0 };

  struct test_pal_logical_processor
    *lp;

  struct util_thread_starter_state
    *tts;

  struct test_element
    *te_array;

  struct test_state
    *ts;

  test_pal_thread_state_t
    *thread_handles;

  assert( list_of_logical_processors != NULL );
  // TRD : memory_in_megabytes can be any value in its range

  /* TRD : we create a freelist with 1,000,000 elements

           the creation function runs in a single thread and creates
           and pushes those elements onto the freelist

           each element contains a void pointer to the container test element

           we then run one thread per CPU
           where each thread loops, popping as quickly as possible
           each test element has a flag which indicates it has been popped

           the threads run till the source freelist is empty

           we then check the test elements
           every element should have been popped

           then tidy up

           we have no extra code for CAS/GC as we're only popping
  */

  internal_display_test_name( "Popping" );

  lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );

  lfds700_misc_prng_init( &ps );

  lfds700_freelist_init_valid_on_current_logical_core( &fs, NULL );

  number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element);

  te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );

  for( loop = 0 ; loop < number_elements ; loop++ )
  {
    (te_array+loop)->popped_flag = LOWERED;
    LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
    lfds700_freelist_push( &fs, &(te_array+loop)->fe, &ps );
  }

  ts = util_aligned_malloc( sizeof(struct test_state) * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );

  for( loop = 0 ; loop < number_logical_processors ; loop++ )
    (ts+loop)->fs = &fs;

  thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );

  util_thread_starter_new( &tts, number_logical_processors );

  LFDS700_MISC_BARRIER_STORE;

  lfds700_misc_force_store();

  loop = 0;
  lasue = NULL;

  while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
  {
    lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
    util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_popping, ts+loop );
    loop++;
  }

  util_thread_starter_run( tts );

  for( loop = 0 ; loop < number_logical_processors ; loop++ )
    test_pal_thread_wait( thread_handles[loop] );

  util_thread_starter_delete( tts );

  free( thread_handles );

  LFDS700_MISC_BARRIER_LOAD;

  lfds700_freelist_query( &fs, LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );

  // TRD : now we check each element has popped_flag set to RAISED
  for( loop = 0 ; loop < number_elements ; loop++ )
    if( (te_array+loop)->popped_flag == LOWERED )
      dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;

  // TRD : cleanup
  lfds700_freelist_cleanup( &fs, NULL );
  util_aligned_free( ts );
  util_aligned_free( te_array );

  // TRD : print the test result
  internal_display_test_result( 1, "freelist", dvs );

  return;
}
Esempio n. 5
0
void freelist_test_internal_rapid_popping_and_pushing( void )
{
  unsigned int
    loop,
    cpu_count;

  thread_state_t
    *thread_handles;

  struct freelist_state
    *fs;

  struct validation_info
    vi;

  enum data_structure_validity
    dvs;

  /* TRD : in these tests there is a fundamental antagonism between
           how much checking/memory clean up that we do and the
           likelyhood of collisions between threads in their lock-free
           operations

           the lock-free operations are very quick; if we do anything
           much at all between operations, we greatly reduce the chance
           of threads colliding

           so we have some tests which do enough checking/clean up that
           they can tell the freelist is valid and don't leak memory
           and here, this test now is one of those which does minimal
           checking - in fact, the nature of the test is that you can't
           do any real checking - but goes very quickly

           what we do is create a small freelist and then run one thread
           per CPU, where each thread simply pops and then immediately
           pushes

           the test runs for ten seconds

           after the test is done, the only check we do is to traverse
           the freelist, checking for loops and ensuring the number of
           elements is correct
  */

  internal_display_test_name( "Rapid popping and pushing (10 seconds)" );

  cpu_count = abstraction_cpu_count();

  freelist_new( &fs, cpu_count, NULL, NULL );

  thread_handles = malloc( sizeof(thread_state_t) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_rapid_popping_and_pushing, fs );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_wait( thread_handles[loop] );

  free( thread_handles );

  vi.min_elements = cpu_count;
  vi.max_elements = cpu_count;

  freelist_query( fs, FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );

  freelist_delete( fs, NULL, NULL );

  // TRD : print the test result
  internal_display_test_result( 1, "freelist", dvs );

  return;
}
Esempio n. 6
0
void freelist_test_internal_popping_and_pushing( void )
{
  unsigned int
    loop,
    cpu_count;

  thread_state_t
    *thread_handles;

  enum data_structure_validity
    dvs;

  struct freelist_state
    *fs;

  struct freelist_test_popping_and_pushing_state
    *pps;

  struct validation_info
    vi;

  /* TRD : we have two threads per CPU
           the threads loop for ten seconds
           the first thread pushes 100000 elements then pops 100000 elements
           the second thread pops 100000 elements then pushes 100000 elements
           all pushes and pops go onto the single main freelist

           after time is up, all threads push what they have remaining onto
           the main freelist

           we then validate the main freelist
  */

  internal_display_test_name( "Popping and pushing (10 seconds)" );

  cpu_count = abstraction_cpu_count();

  freelist_new( &fs, 100000 * cpu_count, NULL, NULL );

  pps = malloc( sizeof(struct freelist_test_popping_and_pushing_state) * cpu_count * 2 );

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    (pps+loop)->fs = fs;
    freelist_new( &(pps+loop)->local_fs, 0, NULL, NULL );

    (pps+loop+cpu_count)->fs = fs;
    freelist_new( &(pps+loop+cpu_count)->local_fs, 100000, NULL, NULL );
  }

  thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping_and_pushing_start_popping, pps+loop );
    abstraction_thread_start( &thread_handles[loop+cpu_count], loop, freelist_test_internal_thread_popping_and_pushing_start_pushing, pps+loop+cpu_count );
  }

  for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
    abstraction_thread_wait( thread_handles[loop] );

  free( thread_handles );

  for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
    freelist_delete( (pps+loop)->local_fs, NULL, NULL );

  free( pps );

  vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;

  freelist_query( fs, FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );

  freelist_delete( fs, NULL, NULL );

  // TRD : print the test result
  internal_display_test_result( 1, "freelist", dvs );

  return;
}
Esempio n. 7
0
void freelist_test_internal_popping( void )
{
  unsigned int
    loop,
    cpu_count,
    count;

  thread_state_t
    *thread_handles;

  enum data_structure_validity
    dvs = VALIDITY_VALID;

  struct freelist_state
    *fs;

  struct freelist_element
    *fe;

  struct freelist_test_popping_state
    *ftps;

  unsigned int
    *found_count;

  /* TRD : we create a freelist with 1,000,000 elements

           the creation function runs in a single thread and creates
           and pushes those elements onto the freelist

           each element contains a void pointer which is its element number

           we then run one thread per CPU
           where each thread loops, popping as quickly as possible
           each popped element is pushed onto a thread-local freelist

           the threads run till the source freelist is empty

           we then check the thread-local freelists
           we should find we have every element

           then tidy up
  */

  internal_display_test_name( "Popping" );

  cpu_count = abstraction_cpu_count();

  freelist_new( &fs, 1000000, freelist_test_internal_popping_init, NULL );
  ftps = malloc( sizeof(struct freelist_test_popping_state) * cpu_count );
  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    (ftps+loop)->fs = fs;
    freelist_new( &(ftps+loop)->fs_thread_local, 0, NULL, NULL );
  }

  thread_handles = malloc( sizeof(thread_state_t) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping, ftps+loop );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_wait( thread_handles[loop] );

  free( thread_handles );

  // TRD : now we check the thread-local freelists
  found_count = malloc( sizeof(unsigned int) * 1000000 );
  for( loop = 0 ; loop < 1000000 ; loop++ )
    *(found_count+loop) = 0;

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    while( freelist_pop((ftps+loop)->fs_thread_local, &fe) )
    {
      freelist_get_user_data_from_element( fe, (void **) &count );
      (*(found_count+count))++;
      freelist_push( fs, fe );
    }
  }

  for( loop = 0 ; loop < 1000000 and dvs == VALIDITY_VALID ; loop++ )
  {
    if( *(found_count+loop) == 0 )
      dvs = VALIDITY_INVALID_MISSING_ELEMENTS;

    if( *(found_count+loop) > 1 )
      dvs = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
  }

  // TRD : cleanup
  free( found_count );
  for( loop = 0 ; loop < cpu_count ; loop++ )
    freelist_delete( (ftps+loop)->fs_thread_local, NULL, NULL );
  freelist_delete( fs, NULL, NULL );

  // TRD : print the test result
  internal_display_test_result( 1, "freelist", dvs );

  return;
}
Esempio n. 8
0
void freelist_test_internal_pushing( void )
{
  unsigned int
    loop,
    cpu_count;

  thread_state_t
    *thread_handles;

  enum data_structure_validity
    dvs;

  struct freelist_test_pushing_state
    *ftps;

  struct freelist_element
    *fe;

  struct freelist_state
    *fs,
    *cleanup_fs;

  struct freelist_test_counter_and_thread_number
    *cnt,
    *counter_and_number_trackers;

  struct validation_info
    vi = { 1000000, 1000000 };

  /* TRD : we create an empty freelist, which we will push to

           we then create one freelist per CPU, where this freelist
           contains 1,000,000/cpu_count number of elements and
           each element is an incrementing counter and unique ID
           (from 0 to number of CPUs)

           we then start one thread per CPU, where each thread is
           given one of the populated freelists and pops from that
           to push to the empty freelist

           the reason for this is to achieve memory pre-allocation
           which allows the pushing threads to run at maximum speed

           the threads end when their freelists are empty

           we then fully pop the now populated main freelist (onto
           a second freelist, so we can cleanly free all memory),
           checking that the counts increment on a per unique ID basis
           and that the number of elements we pop equals 1,000,000
           (since each element has an incrementing counter which is
            unique on a per unique ID basis, we can know we didn't lose
            any elements)
  */

  internal_display_test_name( "Pushing" );

  cpu_count = abstraction_cpu_count();

  ftps = malloc( sizeof(struct freelist_test_pushing_state) * cpu_count );

  freelist_new( &fs, 0, NULL, NULL );

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    (ftps+loop)->thread_number = (atom_t) loop;
    freelist_new( &(ftps+loop)->source_fs, 1000000 / cpu_count, freelist_test_internal_pushing_init, (void *) (atom_t) loop );
    (ftps+loop)->fs = fs;
  }

  thread_handles = malloc( sizeof(thread_state_t) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_pushing, ftps+loop );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    abstraction_thread_wait( thread_handles[loop] );

  free( thread_handles );

  // TRD : now fully pop and verify the main freelist
  freelist_new( &cleanup_fs, 0, NULL, NULL );

  counter_and_number_trackers = malloc( sizeof(struct freelist_test_counter_and_thread_number) * cpu_count );

  for( loop = 0 ; loop < cpu_count ; loop++ )
  {
    (counter_and_number_trackers+loop)->counter = (1000000 / cpu_count) * loop;
    (counter_and_number_trackers+loop)->thread_number = (atom_t) loop;
  }

  freelist_query( fs, FREELIST_QUERY_VALIDATE, &vi, (void *) &dvs );

  while( dvs == VALIDITY_VALID and freelist_pop(fs, &fe) )
  {
    static int count = 0;

    freelist_get_user_data_from_element( fe, (void **) &cnt );

    if( cnt->counter != (counter_and_number_trackers+cnt->thread_number)->counter++ )
      dvs = VALIDITY_INVALID_MISSING_ELEMENTS;

    freelist_push( cleanup_fs, fe );

    count++;
  }

  // TRD : clean up
  free( counter_and_number_trackers );

  for( loop = 0 ; loop < cpu_count ; loop++ )
    freelist_delete( (ftps+loop)->source_fs, NULL, NULL );

  free( ftps );

  freelist_delete( cleanup_fs, freelist_test_internal_pushing_delete, NULL );
  freelist_delete( fs, NULL, NULL );

  // TRD : print the test result
  internal_display_test_result( 1, "freelist", dvs );

  return;
}
void test_lfds700_hash_a_fail_and_overwrite_on_existing_key()
{
  enum lfds700_hash_a_insert_result
    apr;

  enum lfds700_misc_validity
    dvs = LFDS700_MISC_VALIDITY_VALID;

  struct lfds700_hash_a_element
    hae_one,
    hae_two;

  struct lfds700_hash_a_state
    has;

  struct lfds700_btree_au_state
    *baus;

  struct lfds700_misc_prng_state
    ps;

  internal_display_test_name( "Fail and overwrite on existing key" );

  lfds700_misc_prng_init( &ps );

  baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 10, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );

  // TRD : fail on overwrite
  lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_FAIL, NULL );

  LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 );
  LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 0 );
  apr = lfds700_hash_a_insert( &has, &hae_one, NULL, &ps );

  if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS )
    dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;

  LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 );
  LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 );
  apr = lfds700_hash_a_insert( &has, &hae_two, NULL, &ps );

  if( apr != LFDS700_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY )
    dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;

  lfds700_hash_a_cleanup( &has, NULL );

  // TRD : success on overwrite
  lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_OVERWRITE, NULL );

  LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 );
  LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 1 );
  apr = lfds700_hash_a_insert( &has, &hae_one, NULL, &ps );

  if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS )
    dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;

  LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 );
  LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 );
  apr = lfds700_hash_a_insert( &has, &hae_two, NULL, &ps );

  if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE )
    dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;

  lfds700_hash_a_cleanup( &has, NULL );

  util_aligned_free( baus );

  // TRD : print the test result
  internal_display_test_result( 1, "hash_a", dvs );

  return;
}