void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) { ASSERT( max_thread>=0, NULL ); #if _WIN32||_WIN64 ::rml::server::execution_resource_t me; server.register_master( me ); #endif /* _WIN32||_WIN64 */ client.server = &server; MyTeam team(server,size_t(max_thread)); MyServer::size_type n_thread = 0; for( int iteration=0; iteration<4; ++iteration ) { for( size_t i=0; i<team.max_thread; ++i ) team.info[i].ran = false; switch( iteration ) { default: n_thread = int(max_thread); break; case 1: // No change in number of threads break; case 2: // Decrease number of threads. n_thread = int(max_thread)/2; break; // Case 3 is same code as the default, but has effect of increasing the number of threads. } team.barrier = 0; REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) ); server.independent_thread_number_changed( n_extra ); if( checker ) { // Give RML time to respond to change in number of threads. Harness::Sleep(1); } int n_delivered = server.try_increase_load( n_thread, StrictTeam ); ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" ); if( n_delivered<0 ) { REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered ); server.independent_thread_number_changed( -n_extra ); n_delivered = 0; } else { team.n_thread = n_delivered; ::rml::job* job_array[JobArraySize]; job_array[n_delivered] = (::rml::job*)intptr_t(-1); server.get_threads( n_delivered, &team, job_array ); __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL ); for( int i=0; i<n_delivered; ++i ) { MyJob* j = static_cast<MyJob*>(job_array[i]); int s = j->state; ASSERT( s==MyJob::idle||s==MyJob::busy, NULL ); } server.independent_thread_number_changed( -n_extra ); REMARK("client %d: team size is %d\n", client.client_id(), n_delivered); if( checker ) { checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra ); } // Protocol requires that master wait until workers have called "done_processing" while( team.barrier!=n_delivered ) { ASSERT( team.barrier>=0, NULL ); ASSERT( team.barrier<=n_delivered, NULL ); __TBB_Yield(); } REMARK("client %d: team completed\n", client.client_id() ); for( int i=0; i<n_delivered; ++i ) { ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" ); } } for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) { ASSERT( !team.info[i].ran, "thread on team ran with illegal index" ); } } #if _WIN32||_WIN64 server.unregister_master( me ); #endif }
void FireUpJobs( MyServer& server, MyClient& client, int n_thread, int n_extra, Checker* checker ) { REMARK("client %d: calling adjust_job_count_estimate(%d)\n", client.client_id(),n_thread); // Exercise independent_thread_number_changed, even for zero values. server.independent_thread_number_changed( n_extra ); #if _WIN32||_WIN64 ::rml::server::execution_resource_t me; server.register_master( me ); #endif /* _WIN32||_WIN64 */ // Experiments indicate that when oversubscribing, the main thread should wait a little // while for the RML worker threads to do some work. if( checker ) { // Give RML time to respond to change in number of threads. Harness::Sleep(1); for( int k=0; k<n_thread; ++k ) client.job_array[k].processing_count = 0; } //close the gate to keep worker threads from returning to RML until a snapshot is taken client.close_the_gate(); server.adjust_job_count_estimate( n_thread ); int n_used = 0; if( checker ) { Harness::Sleep(100); for( int k=0; k<n_thread; ++k ) if( client.job_array[k].processing_count ) ++n_used; } // open the gate client.open_the_gate(); // Logic further below presumes that jobs never starve, so undo previous call // to independent_thread_number_changed before waiting on those jobs. server.independent_thread_number_changed( -n_extra ); REMARK("client %d: wait for each job to be processed at least once\n",client.client_id()); // Calculate the number of jobs that are expected to get threads. int expected = n_thread; // Wait for expected number of jobs to be processed. #if RML_USE_WCRM int default_concurrency = server.default_concurrency(); if( N_TestConnections>0 ) { if( default_concurrency+1>=8 && n_thread<=3 && N_TestConnections<=3 && (default_concurrency/int(N_TestConnections)-1)>=n_thread ) { #endif /* RML_USE_WCRM */ for(;;) { int n = 0; for( int k=0; k<n_thread; ++k ) if( client.job_array[k].processing_count!=0 ) ++n; if( n>=expected ) break; server.yield(); } #if RML_USE_WCRM } else if( n_thread>0 ) { for( int m=0; m<20; ++m ) { int n = 0; for( int k=0; k<n_thread; ++k ) if( client.job_array[k].processing_count!=0 ) ++n; if( n>=expected ) break; Harness::Sleep(1); } } } #endif /* RML_USE_WCRM */ server.adjust_job_count_estimate(-n_thread); #if _WIN32||_WIN64 server.unregister_master( me ); #endif // Give RML some time to respond if( checker ) { Harness::Sleep(1); checker->check_number_of_threads_delivered( n_used, n_thread, n_extra ); } }