void enkf_main_initialize_from_scratch_with_bool_vector(enkf_main_type * enkf_main , const stringlist_type * param_list ,const bool_vector_type * iens_mask , init_mode_type init_mode) { int num_cpu = 4; int ens_size = enkf_main_get_ensemble_size( enkf_main ); thread_pool_type * tp = thread_pool_alloc( num_cpu , true ); arg_pack_type ** arg_list = util_calloc( ens_size , sizeof * arg_list ); int i; int iens; for (iens = 0; iens < ens_size; iens++) { arg_list[iens] = arg_pack_alloc(); if (bool_vector_safe_iget(iens_mask , iens)) { arg_pack_append_ptr( arg_list[iens] , enkf_main ); arg_pack_append_const_ptr( arg_list[iens] , param_list ); arg_pack_append_int( arg_list[iens] , iens ); arg_pack_append_int( arg_list[iens] , init_mode ); thread_pool_add_job( tp , enkf_main_initialize_from_scratch_mt , arg_list[iens]); } } thread_pool_join( tp ); for (i = 0; i < ens_size; i++) { arg_pack_free( arg_list[i] ); } free( arg_list ); thread_pool_free( tp ); }
void ensemble_init( ensemble_type * ensemble , config_type * config) { /*1 : Loading ensembles and settings from the config instance */ /*1a: Loading the eclipse summary cases. */ { thread_pool_type * tp = thread_pool_alloc( LOAD_THREADS , true ); { int i,j; for (i=0; i < config_get_occurences( config , "CASE_LIST"); i++) { const stringlist_type * case_list = config_iget_stringlist_ref( config , "CASE_LIST" , i ); for (j=0; j < stringlist_get_size( case_list ); j++) ensemble_load_from_glob( ensemble , stringlist_iget( case_list , j ) , tp); } } thread_pool_join( tp ); thread_pool_free( tp ); } { const sum_case_type * tmp = vector_iget_const( ensemble->data , 0 ); ensemble->refcase = tmp->ecl_sum; } /*1b: Other config settings */ if (config_item_set( config , "NUM_INTERP" )) ensemble->num_interp = config_iget_as_int( config , "NUM_INTERP" , 0 , 0 ); /*2: Remaining initialization */ ensemble_init_time_interp( ensemble ); if (vector_get_size( ensemble->data ) < MIN_SIZE ) util_exit("Sorry - quantiles make no sense with with < %d realizations; should have ~> 100.\n" , MIN_SIZE); }
void thread_pool_destroy(t_thread_pool *p) { p->loop = 0; int i; i = 0; // wake up all threads if (pthread_cond_broadcast(&(p->condvar)) != 0) { write(2, "Error pthread_cond_broadcast\n", strlen("Error pthread_cond_broadcast\n")); exit(-1); } // destroy threads usleep(1000); while (i < p->nb_workers) { if (pthread_detach(p->threads[i]) != 0) { write(2, "Error pthread_join\n", strlen("Error pthread_join\n")); exit(-1); } i++; } usleep(1000); // destroy condvar and mutex pthread_mutex_destroy(&(p->lock)); pthread_cond_destroy(&(p->condvar)); // free the rest //free(p->threads); thread_pool_free(p); free(p); p = NULL; }
int main(int argc , char ** argv) { const int queue_timeout = 180; const int submit_timeout = 180; const int status_timeout = 180; const int number_of_jobs = 250; const int submit_threads = number_of_jobs / 10 ; const int status_threads = number_of_jobs + 1; const char * job = util_alloc_abs_path(argv[1]); rng_type * rng = rng_alloc( MZRAN , INIT_CLOCK ); test_work_area_type * work_area = test_work_area_alloc("job_queue"); job_type **jobs = alloc_jobs( rng , number_of_jobs , job); job_queue_type * queue = job_queue_alloc(number_of_jobs, "OK", "ERROR"); queue_driver_type * driver = queue_driver_alloc_local(); job_queue_manager_type * queue_manager = job_queue_manager_alloc( queue ); job_queue_set_driver(queue, driver); job_queue_manager_start_queue(queue_manager, 0, false , true); { thread_pool_type * status_pool = thread_pool_alloc( status_threads , true ); thread_pool_type * submit_pool = thread_pool_alloc( submit_threads , true ); submit_jobs( queue , number_of_jobs , jobs , submit_pool ); status_jobs( queue , number_of_jobs , jobs , status_pool ); if (!thread_pool_try_join( submit_pool , submit_timeout )) util_exit("Joining submit pool failed \n"); thread_pool_free( submit_pool ); job_queue_submit_complete(queue); if (!thread_pool_try_join( status_pool , status_timeout)) util_exit("Joining status pool failed \n"); thread_pool_free( status_pool ); } if (!job_queue_manager_try_wait(queue_manager , queue_timeout)) util_exit("job_queue never completed \n"); job_queue_manager_free(queue_manager); job_queue_free(queue); queue_driver_free(driver); check_jobs( number_of_jobs , jobs ); test_work_area_free(work_area); rng_free( rng ); }
static void aio_ctx_finalize(GSource *source) { AioContext *ctx = (AioContext *) source; thread_pool_free(ctx->thread_pool); aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); event_notifier_cleanup(&ctx->notifier); qemu_mutex_destroy(&ctx->bh_lock); g_array_free(ctx->pollfds, TRUE); }
/** * \brief Entry point of the program. * \param argc number of arguments. * \param argv array of arguments. * \return EXIT_SUCCESS or EXIT_FAILURE. */ int main(int argc, char** argv) { const size_t tasks_size = 20; thread_pool th = NULL; struct thread_pool_task tasks[tasks_size]; (void)argc; (void)argv; fprintf(stdout, "Begin\n"); th = thread_pool_new(10); fprintf(stdout, "Thread pool: %p\n", (void*)th); if(!th) { fprintf(stderr, "Failed to create pool errno=%d\n", errno); exit(EXIT_FAILURE); } for(unsigned int i = 0 ; i < tasks_size ; i++) { tasks[i].data = (void*)(uintptr_t)i; tasks[i].run = fcn_run; tasks[i].cleanup = fcn_cleanup; if(thread_pool_push(th, &tasks[i]) != 0) { fprintf(stderr, "Failed to add task %u\n", i); } } thread_pool_start(th); sleep(1); fprintf(stdout, "Stop stuff\n"); thread_pool_stop(th); fprintf(stdout, "Free stuff\n"); thread_pool_free(&th); fprintf(stdout, "OK\n"); fprintf(stdout, "End\n"); return EXIT_SUCCESS; }
void enkf_main_initialize_from_scratch(enkf_main_type * enkf_main , const stringlist_type * param_list , int iens1 , int iens2, init_mode_enum init_mode) { int num_cpu = 4; thread_pool_type * tp = thread_pool_alloc( num_cpu , true ); int ens_sub_size = (iens2 - iens1 + 1) / num_cpu; arg_pack_type ** arg_list = util_calloc( num_cpu , sizeof * arg_list ); int i; printf("Setting up ensemble members from %d to %d", iens1, iens2); if (init_mode == INIT_CONDITIONAL) { printf(" using conditional initialization (keep existing parameter values).\n"); } else if (init_mode == INIT_FORCE) { printf(" using forced initialization (initialize from scratch).\n"); } else if (init_mode == INIT_NONE) { printf(" not initializing at all.\n"); } fflush( stdout ); for (i = 0; i < num_cpu; i++) { arg_list[i] = arg_pack_alloc(); arg_pack_append_ptr( arg_list[i] , enkf_main ); arg_pack_append_const_ptr( arg_list[i] , param_list ); { int start_iens = i * ens_sub_size; int end_iens = start_iens + ens_sub_size; if (i == (num_cpu - 1)){ end_iens = iens2 + 1; /* Input is upper limit inclusive. */ if(ens_sub_size == 0) start_iens = iens1; /* Don't necessarily want to start from zero when ens_sub_size = 0*/ } arg_pack_append_int( arg_list[i] , start_iens ); arg_pack_append_int( arg_list[i] , end_iens ); } arg_pack_append_int( arg_list[i] , init_mode ); thread_pool_add_job( tp , enkf_main_initialize_from_scratch_mt , arg_list[i]); } thread_pool_join( tp ); for (i = 0; i < num_cpu; i++) arg_pack_free( arg_list[i] ); free( arg_list ); thread_pool_free( tp ); printf("Done setting up ensemble.\n"); }
void thread_test() { time_map_type * time_map = time_map_alloc( ); { int pool_size = 1000; thread_pool_type * tp = thread_pool_alloc( pool_size/2 , true ); thread_pool_add_job( tp , update_time_map , time_map ); thread_pool_join(tp); thread_pool_free(tp); } { int i; for (i=0; i < MAP_SIZE; i++) test_assert_true( time_map_iget( time_map , i ) == i ); } time_map_free( time_map ); }
void matrix_inplace_matmul_mt1(matrix_type * A, const matrix_type * B , int num_threads){ thread_pool_type * thread_pool = thread_pool_alloc( num_threads , false ); matrix_inplace_matmul_mt2( A , B , thread_pool ); thread_pool_free( thread_pool ); }
void job_queue_run_jobs(job_queue_type * queue , int num_total_run, bool verbose) { int trylock = pthread_mutex_trylock( &queue->run_mutex ); if (trylock != 0) util_abort("%s: another thread is already running the queue_manager\n",__func__); else if (!queue->user_exit) { /* OK - we have got an exclusive lock to the run_jobs code. */ //Check if queue is open. Fails hard if not open job_queue_check_open(queue); /* The number of threads in the thread pool running callbacks. Memory consumption can potentially be quite high while running the DONE callback - should therefor not use too many threads. */ const int NUM_WORKER_THREADS = 4; queue->work_pool = thread_pool_alloc( NUM_WORKER_THREADS , true ); { bool new_jobs = false; bool cont = true; int phase = 0; queue->running = true; do { bool local_user_exit = false; job_list_get_rdlock( queue->job_list ); /*****************************************************************/ if (queue->user_exit) {/* An external thread has called the job_queue_user_exit() function, and we should kill all jobs, do some clearing up and go home. Observe that we will go through the queue handling codeblock below ONE LAST TIME before exiting. */ job_queue_user_exit__( queue ); local_user_exit = true; } job_queue_check_expired(queue); /*****************************************************************/ { bool update_status = job_queue_update_status( queue ); if (verbose) { if (update_status || new_jobs) job_queue_print_summary(queue , update_status ); job_queue_update_spinner( &phase ); } { int num_complete = job_queue_status_get_count(queue->status, JOB_QUEUE_SUCCESS) + job_queue_status_get_count(queue->status, JOB_QUEUE_FAILED) + job_queue_status_get_count(queue->status, JOB_QUEUE_IS_KILLED); if ((num_total_run > 0) && (num_total_run == num_complete)) /* The number of jobs completed is equal to the number of jobs we have said we want to run; so we are finished. */ cont = false; else { if (num_total_run == 0) { /* We have not informed about how many jobs we will run. To check if we are complete we perform the two tests: 1. All the jobs which have been added with job_queue_add_job() have completed. 2. The user has used job_queue_complete_submit() to signal that no more jobs will be forthcoming. */ if ((num_complete == job_list_get_size( queue->job_list )) && queue->submit_complete) cont = false; } } } if (cont) { /* Submitting new jobs */ int max_submit = 5; /* This is the maximum number of jobs submitted in one while() { ... } below. Only to ensure that the waiting time before a status update is not too long. */ int total_active = job_queue_status_get_count(queue->status, JOB_QUEUE_PENDING) + job_queue_status_get_count(queue->status, JOB_QUEUE_RUNNING); int num_submit_new; { int max_running = job_queue_get_max_running( queue ); if (max_running > 0) num_submit_new = util_int_min( max_submit , max_running - total_active ); else /* If max_running == 0 that should be interpreted as no limit; i.e. the queue layer will attempt to send an unlimited number of jobs to the driver - the driver can reject the jobs. */ num_submit_new = util_int_min( max_submit , job_queue_status_get_count(queue->status, JOB_QUEUE_WAITING)); } new_jobs = false; if (job_queue_status_get_count(queue->status, JOB_QUEUE_WAITING) > 0) /* We have waiting jobs at all */ if (num_submit_new > 0) /* The queue can allow more running jobs */ new_jobs = true; if (new_jobs) { int submit_count = 0; int queue_index = 0; while ((queue_index < job_list_get_size( queue->job_list )) && (num_submit_new > 0)) { job_queue_node_type * node = job_list_iget_job( queue->job_list , queue_index ); if (job_queue_node_get_status(node) == JOB_QUEUE_WAITING) { { submit_status_type submit_status = job_queue_submit_job(queue , queue_index); if (submit_status == SUBMIT_OK) { num_submit_new--; submit_count++; } else if ((submit_status == SUBMIT_DRIVER_FAIL) || (submit_status == SUBMIT_QUEUE_CLOSED)) break; } } queue_index++; } } { /* Checking for complete / exited / overtime jobs */ int queue_index; for (queue_index = 0; queue_index < job_list_get_size( queue->job_list ); queue_index++) { job_queue_node_type * node = job_list_iget_job( queue->job_list , queue_index ); switch (job_queue_node_get_status(node)) { case(JOB_QUEUE_DONE): job_queue_handle_DONE(queue, node); break; case(JOB_QUEUE_EXIT): job_queue_handle_EXIT(queue, node); break; case(JOB_QUEUE_DO_KILL_NODE_FAILURE): job_queue_handle_DO_KILL_NODE_FAILURE(queue, node); break; case(JOB_QUEUE_DO_KILL): job_queue_handle_DO_KILL(queue, node); break; default: break; } } } } else /* print an updated status to stdout before exiting. */ if (verbose) job_queue_print_summary(queue , true); } job_list_unlock( queue->job_list ); if (local_user_exit) cont = false; /* This is how we signal that we want to get out . */ else { util_yield(); job_list_reader_wait( queue->job_list , queue->usleep_time , 8 * queue->usleep_time); } } while ( cont ); } if (verbose) printf("\n"); thread_pool_join( queue->work_pool ); thread_pool_free( queue->work_pool ); } /* Set the queue's "open" flag to false to signal that the queue is not ready to be used in a new job_queue_run_jobs or job_queue_add_job method call as it has not been reset yet. Not resetting the queue here implies that the queue object is still available for queries after this method has finished */ queue->open = false; queue->running = false; pthread_mutex_unlock( &queue->run_mutex ); }
void enkf_tui_run_manual_load__( void * arg ) { enkf_main_type * enkf_main = enkf_main_safe_cast( arg ); enkf_fs_type * fs = enkf_main_get_fs( enkf_main ); const int last_report = -1; const int ens_size = enkf_main_get_ensemble_size( enkf_main ); int step1,step2; bool_vector_type * iactive = bool_vector_alloc( 0 , false ); run_mode_type run_mode = ENSEMBLE_EXPERIMENT; enkf_main_init_run(enkf_main , run_mode); /* This is ugly */ step1 = 0; step2 = last_report; /** Observe that for the summary data it will load all the available data anyway. */ { char * prompt = util_alloc_sprintf("Which realizations to load (Ex: 1,3-5) <Enter for all> [M to return to menu] : [ensemble size:%d] : " , ens_size); char * select_string; util_printf_prompt(prompt , PROMPT_LEN , '=' , "=> "); select_string = util_alloc_stdin_line(); enkf_tui_util_sscanf_active_list( iactive , select_string , ens_size ); util_safe_free( select_string ); free( prompt ); } if (bool_vector_count_equal( iactive , true )) { int iens; arg_pack_type ** arg_list = util_calloc( ens_size , sizeof * arg_list ); thread_pool_type * tp = thread_pool_alloc( 4 , true ); /* num_cpu - HARD coded. */ for (iens = 0; iens < ens_size; iens++) { arg_pack_type * arg_pack = arg_pack_alloc(); arg_list[iens] = arg_pack; if (bool_vector_iget(iactive , iens)) { enkf_state_type * enkf_state = enkf_main_iget_state( enkf_main , iens ); arg_pack_append_ptr( arg_pack , enkf_state); /* 0: */ arg_pack_append_ptr( arg_pack , fs ); /* 1: */ arg_pack_append_int( arg_pack , step1 ); /* 2: This will be the load start parameter for the run_info struct. */ arg_pack_append_int( arg_pack , step1 ); /* 3: Step1 */ arg_pack_append_int( arg_pack , step2 ); /* 4: Step2 For summary data it will load the whole goddamn thing anyway.*/ arg_pack_append_bool( arg_pack , true ); /* 5: Interactive */ arg_pack_append_owned_ptr( arg_pack , stringlist_alloc_new() , stringlist_free__); /* 6: List of interactive mode messages. */ thread_pool_add_job( tp , enkf_state_load_from_forward_model_mt , arg_pack); } } thread_pool_join( tp ); thread_pool_free( tp ); printf("\n"); { qc_module_type * qc_module = enkf_main_get_qc_module( enkf_main ); runpath_list_type * runpath_list = qc_module_get_runpath_list( qc_module ); for (iens = 0; iens < ens_size; iens++) { if (bool_vector_iget(iactive , iens)) { const enkf_state_type * state = enkf_main_iget_state( enkf_main , iens ); runpath_list_add( runpath_list , iens , enkf_state_get_run_path( state ) , enkf_state_get_eclbase( state )); } } qc_module_export_runpath_list( qc_module ); } for (iens = 0; iens < ens_size; iens++) { if (bool_vector_iget(iactive , iens)) { stringlist_type * msg_list = arg_pack_iget_ptr( arg_list[iens] , 6 ); if (stringlist_get_size( msg_list )) enkf_tui_display_load_msg( iens , msg_list ); } } for (iens = 0; iens < ens_size; iens++) arg_pack_free( arg_list[iens]); free( arg_list ); } bool_vector_free( iactive ); }
int gurls( FILE* fin, uint32_t threads ) { struct thread_pool *pool; uint32_t i; char url[1024]; char saftey_ext[5] = "txt\0"; /* validate threads */ threads = threads > 30 ? 30 : threads == 0 ? 5 : threads; pool = thread_pool_new( threads ); if( !pool ) { return -1; } /* initialize curl */ curl_global_init( CURL_GLOBAL_ALL ); for( i = 0; fgets(url, 1024, fin) ; i++ ) { gurl_url_t* u; char* aux; if( *url == '\n' || *url == '\0' ) { break; } if( !is_url(url) ) { i--; continue; } /* create url object */ u = malloc( sizeof(gurl_url_t) ); if( !u ) { break; } /* remove trailing newline */ for( aux = url; aux-url < 1024 && *aux != '\n' && *aux != '\0'; aux++ ); *aux = '\0'; /* fill up url object */ strncpy( u->url, url, 1024 ); aux = get_extension( url ); if( !aux ) aux = saftey_ext; snprintf( u->filename, 1024, "file-%010d.%s", i, aux ); thread_pool_push( pool, gurl_url_download, u ); } /* send term signal to children */ thread_pool_terminate( pool ); /* cleanup */ thread_pool_free( pool ); curl_global_cleanup(); return 0; }