void* cas_thread( object_t thread, void* arg ) { int loop = 0; cas_value_t val = *(cas_value_t*)arg; thread_sleep( 10 ); while( !thread_should_terminate( thread ) && ( loop < 65535 ) ) { while( !atomic_cas32( &val_32, val.val_32, 0 ) ) thread_yield(); while( !atomic_cas32( &val_32, 0, val.val_32 ) ) thread_yield(); while( !atomic_cas64( &val_64, val.val_64, 0 ) ) thread_yield(); while( !atomic_cas64( &val_64, 0, val.val_64 ) ) thread_yield(); while( !atomic_cas_ptr( &val_ptr, val.val_ptr, 0 ) ) thread_yield(); while( !atomic_cas_ptr( &val_ptr, 0, val.val_ptr ) ) thread_yield(); ++loop; thread_yield(); } return 0; }
void* event_thread( object_t thread, void* arg ) { event_block_t* block; event_t* event = 0; while( !thread_should_terminate( thread ) ) { block = event_stream_process( system_event_stream() ); event = 0; while( ( event = event_next( block, event ) ) ) { switch( event->id ) { case FOUNDATIONEVENT_TERMINATE: log_warn( HASH_TEST, WARNING_SUSPICIOUS, "Terminating tests due to event" ); process_exit( -2 ); break; default: break; } } thread_sleep( 10 ); } return 0; }
static void* _profile_io( object_t thread, void* arg ) { unsigned int system_info_counter = 0; profile_block_t system_info; FOUNDATION_UNUSED( arg ); memset( &system_info, 0, sizeof( profile_block_t ) ); system_info.data.id = PROFILE_ID_SYSTEMINFO; system_info.data.start = time_ticks_per_second(); string_copy( system_info.data.name, "sysinfo", 7 ); while( !thread_should_terminate( thread ) ) { thread_sleep( _profile_wait ); if( !atomic_load32( &_profile_root ) ) continue; profile_begin_block( "profile_io" ); if( atomic_load32( &_profile_root ) ) { profile_begin_block( "process" ); //This is thread safe in the sense that only completely closed and ended //blocks will be put as children to root block, so no additional blocks //will ever be added to child subtrees while we process it here _profile_process_root_block(); profile_end_block(); } if( system_info_counter++ > 10 ) { if( _profile_write ) _profile_write( &system_info, sizeof( profile_block_t ) ); system_info_counter = 0; } profile_end_block(); } if( atomic_load32( &_profile_root ) ) _profile_process_root_block(); if( _profile_write ) { profile_block_t terminate; memset( &terminate, 0, sizeof( profile_block_t ) ); terminate.data.id = PROFILE_ID_ENDOFSTREAM; _profile_write( &terminate, sizeof( profile_block_t ) ); } return 0; }
static void* event_thread( object_t thread, void* arg ) { event_block_t* block; event_t* event = 0; FOUNDATION_UNUSED( arg ); while( !thread_should_terminate( thread ) ) { block = event_stream_process( system_event_stream() ); event = 0; while( ( event = event_next( block, event ) ) ) { switch( event->id ) { case FOUNDATIONEVENT_START: #if FOUNDATION_PLATFORM_IOS || FOUNDATION_PLATFORM_ANDROID log_debug( HASH_TEST, "Application start event received" ); _test_should_start = true; #endif break; case FOUNDATIONEVENT_TERMINATE: #if FOUNDATION_PLATFORM_IOS || FOUNDATION_PLATFORM_ANDROID log_debug( HASH_TEST, "Application stop/terminate event received" ); _test_should_terminate = true; #else log_warn( HASH_TEST, WARNING_SUSPICIOUS, "Terminating tests due to event" ); process_exit( -2 ); #endif break; case FOUNDATIONEVENT_FOCUS_GAIN: _test_have_focus = true; break; case FOUNDATIONEVENT_FOCUS_LOST: _test_have_focus = false; break; default: break; } } thread_sleep( 10 ); } log_debug( HASH_TEST, "Application event thread exiting" ); return 0; }
static void* _profile_stream_thread( object_t thread, void* arg ) { FOUNDATION_UNUSED( arg ); thread_yield(); while( !thread_should_terminate( thread ) ) { profile_log( "Thread message" ); profile_begin_block( "Thread block" ); { profile_update_block(); profile_begin_block( "Thread subblock" ); { profile_log( "Sub message" ); profile_trylock( "Trylock" ); profile_lock( "Trylock" ); profile_wait( "Wait" ); profile_signal( "Signal" ); thread_sleep( 2 ); profile_unlock( "Trylock" ); profile_log( "End sub" ); } profile_end_block(); profile_trylock( "Trylock" ); thread_sleep( 1 ); profile_lock( "Trylock" ); thread_sleep( 4 ); profile_unlock( "Trylock" ); } profile_end_block(); thread_sleep( 4 ); atomic_add64( &_profile_generated_blocks, 12 ); } return 0; }
void* dec_thread( object_t thread, void* arg ) { int loop = 0; int icount = 0; while( !thread_should_terminate( thread ) && ( loop < 65535 ) ) { for( icount = 0; icount < 256; ++icount ) { atomic_decr32( &val_32 ); atomic_decr64( &val_64 ); } ++loop; thread_yield(); } return 0; }
static void* event_thread( object_t thread, void* arg ) { event_block_t* block; event_t* event = 0; while( !thread_should_terminate( thread ) ) { block = event_stream_process( system_event_stream() ); event = 0; while( ( event = event_next( block, event ) ) ) { switch( event->id ) { case FOUNDATIONEVENT_START: #if FOUNDATION_PLATFORM_IOS || FOUNDATION_PLATFORM_IOS log_infof( HASH_TEST, "Application start event received" ); _test_should_start = true; #endif break; case FOUNDATIONEVENT_TERMINATE: #if FOUNDATION_PLATFORM_IOS || FOUNDATION_PLATFORM_IOS log_infof( HASH_TEST, "Application terminate event received" ); _test_should_terminate = true; #else log_warn( HASH_TEST, WARNING_SUSPICIOUS, "Terminating tests due to event" ); process_exit( -2 ); #endif break; default: break; } } thread_sleep( 10 ); } return 0; }
static void* producer_thread( object_t thread, void* arg ) { uint8_t buffer[256] = {0}; producer_thread_arg_t* args = arg; unsigned int produced = 0; tick_t timestamp = 0; do { if( args->sleep_time ) thread_sleep( (int)args->sleep_time ); else thread_yield(); timestamp = args->max_delay ? time_current() + random64_range( 0, args->max_delay ) : 0; memcpy( buffer, ×tamp, sizeof( tick_t ) ); event_post( args->stream, random32_range( 1, 65535 ), random32_range( timestamp ? 8 : 0, 256 ), args->id, buffer, timestamp ); ++produced; } while( !thread_should_terminate( thread ) && ( time_current() < args->end_time ) ); return (void*)((uintptr_t)produced); }
void* add_thread( object_t thread, void* arg ) { int loop = 0; int32_t icount = 0; while( !thread_should_terminate( thread ) && ( loop < 65535 ) ) { for( icount = 0; icount < 128; ++icount ) { atomic_add32( &val_32, icount % 2 ? -icount : icount ); atomic_exchange_and_add64( &val_64, icount % 2 ? -icount : icount ); } for( icount = 0; icount < 128; ++icount ) { atomic_exchange_and_add32( &val_32, icount % 2 ? icount : -icount ); atomic_add64( &val_64, icount % 2 ? icount : -icount ); } ++loop; thread_yield(); } return 0; }