/* Destroy the threadpool */ void thpool_destroy(thpool_* thpool_p){ volatile int threads_total = thpool_p->num_threads_alive; /* End each thread 's infinite loop */ threads_keepalive = 0; /* Give one second to kill idle threads */ double TIMEOUT = 1.0; time_t start, end; double tpassed = 0.0; time (&start); while (tpassed < TIMEOUT && thpool_p->num_threads_alive){ bsem_post_all(thpool_p->jobqueue.has_jobs); time (&end); tpassed = difftime(end,start); } /* Poll remaining threads */ while (thpool_p->num_threads_alive){ bsem_post_all(thpool_p->jobqueue.has_jobs); sleep(1); } /* Job queue cleanup */ jobqueue_destroy(&thpool_p->jobqueue); /* Deallocs */ int n; for (n=0; n < threads_total; n++){ thread_destroy(thpool_p->threads[n]); } free(thpool_p->threads); free(thpool_p); }
/** * @brief Destroy the threadpool * * This will wait for the currently active threads to finish and then 'kill' * the whole threadpool to free up memory. * * @example * int main() { * threadpool thpool1 = thpool_init(2); * threadpool thpool2 = thpool_init(2); * .. * thpool_destroy(thpool1); * .. * return 0; * } * * @param threadpool the threadpool to destroy * @return nothing */ static void thpool_destroy(ThPool* thpool) { /* No need to destory if it's NULL */ if (thpool == NULL) return; volatile int threads_total = thpool->num_threads_alive; /* End each thread 's infinite loop */ thpool->threads_keepalive = 0; /* Give one second to kill idle threads */ double TIMEOUT = 1.0; time_t start, end; double tpassed = 0.0; time(&start); while (tpassed < TIMEOUT && thpool->num_threads_alive) { bsem_post_all(thpool->jobqueue->has_jobs); time(&end); tpassed = difftime(end, start); } /* Poll remaining threads */ while (thpool->num_threads_alive) { bsem_post_all(thpool->jobqueue->has_jobs); sleep(1); } /* cleanup current work groups */ thpool_cleanup(thpool); /* Job queue cleanup */ jobqueue_destroy(thpool); free(thpool->jobqueue); /* Deallocs */ int n; for (n = 0; n < threads_total; n++) { thread_destroy(thpool->threads[n]); } free(thpool->threads); free(thpool); if(cppadcg_pool_verbose) { fprintf(stdout, "thpool_destroy(): thread pool destroyed\n"); } }
/** * Add (allocated) multiple jobs to queue */ static void jobqueue_multipush(JobQueue* queue, Job* newjob[], int nJobs) { int i; pthread_mutex_lock(&queue->rwmutex); for(i = 0; i < nJobs; ++i) { jobqueue_push_internal(queue, newjob[i]); } bsem_post_all(queue->has_jobs); pthread_mutex_unlock(&queue->rwmutex); }
/** * Split work among the threads evenly considering the elapsed time of each job. */ static int jobqueue_push_static_jobs(ThPool* thpool, Job* newjobs[], const float avgElapsed[], int jobs2thread[], int nJobs, int lastElapsedChanged) { float total_duration, target_duration, next_duration, best_duration; int i, j, iBest; int added; int num_threads = thpool->num_threads; int* n_jobs; float* durations = NULL; WorkGroup** groups; WorkGroup* group; if(nJobs < num_threads) num_threads = nJobs; n_jobs = (int*) malloc(num_threads * sizeof(int)); if (n_jobs == NULL) { fprintf(stderr, "jobqueue_push_static_jobs(): Could not allocate memory\n"); return -1; } groups = (WorkGroup**) malloc(num_threads * sizeof(WorkGroup*)); if (groups == NULL) { fprintf(stderr, "jobqueue_push_static_jobs(): Could not allocate memory\n"); return -1; } for (i = 0; i < num_threads; ++i) { n_jobs[i] = 0; } total_duration = 0; for (i = 0; i < nJobs; ++i) { total_duration += avgElapsed[i]; } if (nJobs > 0 && (lastElapsedChanged || jobs2thread[0] < 0)) { durations = (float*) malloc(num_threads * sizeof(float)); if (durations == NULL) { fprintf(stderr, "jobqueue_push_static_jobs(): Could not allocate memory\n"); return -1; } for(i = 0; i < num_threads; ++i) { durations[i] = 0; } // decide in which work group to place each job target_duration = total_duration / num_threads; for (j = 0; j < nJobs; ++j) { added = 0; for (i = 0; i < num_threads; ++i) { next_duration = durations[i] + avgElapsed[j]; if (next_duration < target_duration) { durations[i] = next_duration; n_jobs[i]++; jobs2thread[j] = i; added = 1; break; } } if (!added) { best_duration = durations[0] + avgElapsed[j]; iBest = 0; for (i = 1; i < num_threads; ++i) { next_duration = durations[i] + avgElapsed[j]; if (next_duration < best_duration) { best_duration = next_duration; iBest = i; } } durations[iBest] = best_duration; n_jobs[iBest]++; jobs2thread[j] = iBest; } } } else { // reuse existing information for (j = 0; j < nJobs; ++j) { n_jobs[jobs2thread[j]]++; } } /** * create the work groups */ for (i = 0; i < num_threads; ++i) { group = (WorkGroup*) malloc(sizeof(WorkGroup)); group->size = 0; group->jobs = (Job*) malloc(n_jobs[i] * sizeof(Job)); groups[i] = group; } for (i = 0; i < num_threads - 1; ++i) { groups[i]->prev = groups[i + 1]; } groups[num_threads - 1]->prev = NULL; // place jobs on the work groups for (j = 0; j < nJobs; ++j) { i = jobs2thread[j]; group = groups[i]; group->jobs[group->size] = *newjobs[j]; // copy group->size++; free(newjobs[j]); } if (cppadcg_pool_verbose) { if (durations != NULL) { for (i = 0; i < num_threads; ++i) { fprintf(stdout, "jobqueue_push_static_jobs(): work group %i with %i jobs for %e s\n", i, groups[i]->size, durations[i]); } } else { for (i = 0; i < num_threads; ++i) { fprintf(stdout, "jobqueue_push_static_jobs(): work group %i with %i jobs\n", i, groups[i]->size); } } } /** * add to the queue */ pthread_mutex_lock(&thpool->jobqueue->rwmutex); groups[num_threads - 1]->prev = thpool->jobqueue->group_front; thpool->jobqueue->group_front = groups[0]; bsem_post_all(thpool->jobqueue->has_jobs); pthread_mutex_unlock(&thpool->jobqueue->rwmutex); // clean up free(durations); free(n_jobs); free(groups); return 0; }