void li_tasklet_pool_set_threads(liTaskletPool *pool, gint threads) { if (threads < 0) threads = -1; if (pool->threads == threads) return; if (NULL != pool->threadpool) { if (pool->threads > 0 && threads > 0) { /* pool was exclusive, stays exlusive. just change number of threads */ g_thread_pool_set_max_threads(pool->threadpool, threads, NULL); pool->threads = g_thread_pool_get_num_threads(pool->threadpool); /* as we already had exclusive threads running, pool->threads should be > 0 */ return; } /* stop old pool */ g_thread_pool_free(pool->threadpool, FALSE, TRUE); pool->threadpool = NULL; } if (threads != 0) { pool->threadpool = g_thread_pool_new(run_tasklet, pool, threads, (threads > 0), NULL); if (threads > 0) { /* exclusive pool, see how many threads we got */ threads = g_thread_pool_get_num_threads(pool->threadpool); if (threads == 0) { /* couldn't get exlusive threads, share threads instead */ g_thread_pool_free(pool->threadpool, FALSE, TRUE); pool->threadpool = g_thread_pool_new(run_tasklet, pool, -1, FALSE, NULL); threads = -1; } } } pool->threads = threads; }
static void test_thread_pools (void) { GThreadPool *pool1, *pool2, *pool3; guint runs; guint i; pool1 = g_thread_pool_new ((GFunc)test_thread_pools_entry_func, NULL, 3, FALSE, NULL); pool2 = g_thread_pool_new ((GFunc)test_thread_pools_entry_func, NULL, 5, TRUE, NULL); pool3 = g_thread_pool_new ((GFunc)test_thread_pools_entry_func, NULL, 7, TRUE, NULL); runs = 300; for (i = 0; i < runs; i++) { g_thread_pool_push (pool1, GUINT_TO_POINTER (i + 1), NULL); g_thread_pool_push (pool2, GUINT_TO_POINTER (i + 1), NULL); g_thread_pool_push (pool3, GUINT_TO_POINTER (i + 1), NULL); leftover_task_counter += 3; } g_thread_pool_free (pool1, TRUE, TRUE); g_thread_pool_free (pool2, FALSE, TRUE); g_thread_pool_free (pool3, FALSE, TRUE); g_assert (runs * 3 == abs_thread_counter + leftover_task_counter); g_assert (running_thread_counter == 0); }
void ccnet_job_manager_free (CcnetJobManager *mgr) { g_hash_table_destroy (mgr->jobs); g_thread_pool_free (mgr->thread_pool, TRUE, FALSE); g_free (mgr); }
/** * Uninitialize testing stuff */ void Informatics_done_testing (void) { Informatics_stop_testing (0, 0); if (active) { mutex_free (active); } if (unlink_mutex) { mutex_free (unlink_mutex); } if (suspended) { mutex_free (suspended); } if (lck_mutex) { mutex_free (lck_mutex); } if (pool) { g_thread_pool_free (pool, FALSE, FALSE); } }
static void tracker_extract_finalize (GObject *object) { TrackerExtractPrivate *priv; priv = TRACKER_EXTRACT_GET_PRIVATE (object); /* FIXME: Shutdown modules? */ g_hash_table_destroy (priv->single_thread_extractors); g_thread_pool_free (priv->thread_pool, TRUE, FALSE); if (!priv->disable_summary_on_finalize) { report_statistics (object); } #ifdef HAVE_LIBSTREAMANALYZER tracker_topanalyzer_shutdown (); #endif /* HAVE_STREAMANALYZER */ g_hash_table_destroy (priv->statistics_data); g_mutex_clear (&priv->task_mutex); G_OBJECT_CLASS (tracker_extract_parent_class)->finalize (object); }
static void kms_agnostic_bin2_dispose (GObject * object) { KmsAgnosticBin2 *self = KMS_AGNOSTIC_BIN2 (object); GST_DEBUG_OBJECT (object, "dispose"); KMS_AGNOSTIC_BIN2_LOCK (self); g_thread_pool_free (self->priv->remove_pool, FALSE, FALSE); if (self->priv->input_bin_src_caps) { gst_caps_unref (self->priv->input_bin_src_caps); self->priv->input_bin_src_caps = NULL; } if (self->priv->input_caps) { gst_caps_unref (self->priv->input_caps); self->priv->input_caps = NULL; } KMS_AGNOSTIC_BIN2_UNLOCK (self); /* chain up */ G_OBJECT_CLASS (kms_agnostic_bin2_parent_class)->dispose (object); }
void engine_teardownWorkerThreads(Engine* engine) { MAGIC_ASSERT(engine); if(engine->workerPool) { g_thread_pool_free(engine->workerPool, FALSE, TRUE); engine->workerPool = NULL; } }
/** * a_background_uninit: * * Uninitialize background feature. */ void a_background_uninit() { stop_all_threads = TRUE; // wait until these threads stop g_thread_pool_free ( thread_pool_remote, TRUE, TRUE ); // Don't wait for these g_thread_pool_free ( thread_pool_local, TRUE, FALSE ); #ifdef HAVE_LIBMAPNIK g_thread_pool_free ( thread_pool_local_mapnik, TRUE, FALSE ); #endif gtk_list_store_clear ( bgstore ); g_object_unref ( bgstore ); gtk_widget_destroy ( bgwindow ); }
void pocketvox_controller_start(PocketvoxController *controller) { GList* modules = NULL; gint i, n_threads; GThreadPool *thread_pool = NULL; g_return_if_fail(NULL != controller); controller->priv = G_TYPE_INSTANCE_GET_PRIVATE (controller, TYPE_POCKETVOX_CONTROLLER, PocketvoxControllerPrivate); PocketvoxControllerPrivate *priv = controller->priv; modules = g_hash_table_get_values(priv->modules); //create a GThreadPool to make dictionnaries loading smoother n_threads = g_get_num_processors(); thread_pool = g_thread_pool_new((GFunc)pocketvox_module_build_dictionnary, NULL, n_threads, TRUE, NULL); for(i = 0; i < g_list_length(modules); i++) { g_thread_pool_push(thread_pool, (PocketvoxModule *)g_list_nth_data(modules, i), NULL); //pocketvox_module_build_dictionnary((PocketvoxModule *)g_list_nth_data(modules, i)); } g_thread_pool_free(thread_pool, FALSE, TRUE); g_list_free(modules); priv->loop = g_main_loop_new(NULL, FALSE); g_main_loop_run(priv->loop); }
/** * @brief Free any memory associated with a scheduler_t. * * This will stop the interface if it is currently running, and free all the * memory associated with the different regular expression and similar * structures. * * @param scheduler */ void scheduler_destroy(scheduler_t* scheduler) { // TODO interface close // TODO repo close event_loop_destroy(); if(scheduler->main_log) { log_destroy(scheduler->main_log); main_log = NULL; } if(scheduler->process_name) g_free(scheduler->process_name); if(scheduler->sysconfig) fo_config_free(scheduler->sysconfig); if(scheduler->sysconfigdir) g_free(scheduler->sysconfigdir); if(scheduler->host_queue) g_list_free(scheduler->host_queue); if(scheduler->workers) g_thread_pool_free(scheduler->workers, FALSE, TRUE); if(scheduler->email_subject) g_free(scheduler->email_subject); if(scheduler->email_command) g_free(scheduler->email_command); g_sequence_free(scheduler->job_queue); g_regex_unref(scheduler->parse_agent_msg); g_regex_unref(scheduler->parse_db_email); g_regex_unref(scheduler->parse_interface_cmd); g_tree_unref(scheduler->meta_agents); g_tree_unref(scheduler->agents); g_tree_unref(scheduler->host_list); g_tree_unref(scheduler->job_list); g_free(scheduler); }
/** * Stop one thread pool */ void stop_thread_pool(const char *name, GThreadPool **pool) { log_message(DEBUG, DEBUG_AREA_MAIN, "Stopping thread pool '%s'", name); g_thread_pool_free(*pool, TRUE, TRUE); *pool = NULL; }
void dp_deep_step(DpDeepInfo*hdeepinfo) { int individ_id; gboolean immediate_stop = FALSE; gboolean wait_finish = TRUE; DpPopulation*population = hdeepinfo->population; DpPopulation*trial = hdeepinfo->trial; GError *gerror = NULL; if ( hdeepinfo->max_threads > 0 ) { if (hdeepinfo->gthreadpool == NULL) { hdeepinfo->gthreadpool = g_thread_pool_new ((GFunc) dp_deep_step_func, (gpointer) hdeepinfo, hdeepinfo->max_threads, hdeepinfo->exclusive, &gerror); } if ( gerror != NULL ) { g_error("%s", gerror->message); } for ( individ_id = 0; individ_id < population->size; individ_id++ ) { g_thread_pool_push (hdeepinfo->gthreadpool, GINT_TO_POINTER(individ_id + 1), &gerror); if ( gerror != NULL ) { g_error("%s", gerror->message); } } g_thread_pool_free (hdeepinfo->gthreadpool, immediate_stop, wait_finish); hdeepinfo->gthreadpool = NULL; } else { for ( individ_id = 0; individ_id < population->size; individ_id++ ) { dp_deep_step_func (GINT_TO_POINTER(individ_id + 1), (gpointer) hdeepinfo); } } dp_population_update(trial, 0, trial->size); trial->iter = population->iter + 1; hdeepinfo->population = trial; hdeepinfo->trial = population; }
static void _gck_call_base_finalize (GckCallClass *klass) { GMainContext *context; GSource *src; if (klass->thread_pool) { g_assert (g_thread_pool_unprocessed (klass->thread_pool) == 0); g_thread_pool_free (klass->thread_pool, FALSE, TRUE); klass->thread_pool = NULL; } if (klass->completed_id) { context = g_main_context_default (); g_return_if_fail (context); src = g_main_context_find_source_by_id (context, klass->completed_id); g_assert (src); g_source_destroy (src); klass->completed_id = 0; } if (klass->completed_queue) { g_assert (g_async_queue_length (klass->completed_queue)); g_async_queue_unref (klass->completed_queue); klass->completed_queue = NULL; } }
DpPopulation*dp_evaluation_population_init(DpEvaluationCtrl*hevalctrl, int size, double noglobal_eps) { DpPopulation*pop; int i, istart = 0; //gboolean immediate_stop = FALSE; gboolean immediate_stop = TRUE; //gboolean wait_finish = TRUE; gboolean wait_finish = FALSE; GError *gerror = NULL; GMainContext *gcontext = g_main_context_default(); gulong microseconds = G_USEC_PER_SEC / 1000; pop = dp_population_new(size, hevalctrl->eval->size, hevalctrl->eval_target->size, hevalctrl->eval_target->precond_size, hevalctrl->seed); if ( noglobal_eps == 0 ) { dp_evaluation_individ_set(hevalctrl, pop->individ[0]); pop->individ[0]->user_data = dp_target_eval_get_user_data(hevalctrl->eval_target); istart = 1; pop->individ[0]->cost = G_MAXDOUBLE; } for ( i = istart; i < size; i++) { dp_evaluation_individ_scramble(hevalctrl, pop->individ[i], noglobal_eps); pop->individ[i]->user_data = dp_target_eval_get_user_data(hevalctrl->eval_target); pop->individ[i]->cost = G_MAXDOUBLE; } #ifdef MPIZE /* MPI initialization steps */ int world_id = 0, world_count = 1; MPI_Comm_size(MPI_COMM_WORLD, &world_count); MPI_Comm_rank(MPI_COMM_WORLD, &world_id); int ind_per_node = (int)ceil(pop->size / world_count); int ind_per_last_node = pop->size - ind_per_node * (world_count - 1); dp_population_mpi_distribute(pop, world_id, world_count); #endif if ( hevalctrl->eval_max_threads > 0 ) { hevalctrl->gthreadpool = g_thread_pool_new ((GFunc) dp_evaluation_population_init_func, (gpointer) hevalctrl, hevalctrl->eval_max_threads, hevalctrl->exclusive, &gerror); if ( gerror != NULL ) { g_error("%s", gerror->message); } for ( i = pop->slice_a; i < pop->slice_b; i++) { g_thread_pool_push (hevalctrl->gthreadpool, (gpointer)(pop->individ[i]), &gerror); if ( gerror != NULL ) { g_error("%s", gerror->message); } } while(g_thread_pool_unprocessed (hevalctrl->gthreadpool) > 0) { g_main_context_iteration(gcontext, FALSE); g_usleep (microseconds); } g_thread_pool_free (hevalctrl->gthreadpool, immediate_stop, wait_finish); } else { for ( i = pop->slice_a; i < pop->slice_b; i++) { dp_evaluation_population_init_func ((gpointer)(pop->individ[i]), (gpointer) hevalctrl); } } #ifdef MPIZE dp_population_mpi_gather(pop, world_id, world_count); #endif dp_population_update(pop, 0, pop->size); return pop; }
void tpm_backend_thread_end(TPMBackendThread *tbt) { if (tbt->pool) { g_thread_pool_push(tbt->pool, (gpointer)TPM_BACKEND_CMD_END, NULL); g_thread_pool_free(tbt->pool, FALSE, TRUE); tbt->pool = NULL; } }
void test_func_mongo_ssl_multithread (void) { // 1. Many threads sharing the same context previously set up GThreadPool *thread_pool = g_thread_pool_new (ssl_query_thread, config.ssl_settings, THREAD_POOL_SIZE, TRUE, NULL); guint i; for (i = 0; i < THREAD_POOL_SIZE; ++i) g_thread_pool_push (thread_pool, config.ssl_settings, NULL); g_thread_pool_free (thread_pool, FALSE, TRUE); // 2. Many threads sharing the same context each manipulating the context srand (time (NULL)); thread_pool = g_thread_pool_new (ssl_ping_thread, config.ssl_settings, THREAD_POOL_SIZE, TRUE, NULL); for (i = 0; i < THREAD_POOL_SIZE; ++i) g_thread_pool_push (thread_pool, config.ssl_settings, NULL); g_thread_pool_free (thread_pool, FALSE, TRUE); }
/** * @brief Stop the fill thread of a resource * * @param resource The resource to stop the fill thread of * * This function takes care of stopping the fill thread for a * resource, either for pausing it or before freeing it. It's * accomplished by first setting @ref Resource::fill_pool attribute to * NULL, then it stops the pool, dropping further threads and waiting * for the last one to complete. */ static void r_stop_fill(Resource *resource) { GThreadPool *pool; if ( (pool = resource->fill_pool) ) { g_atomic_pointer_set(&resource->fill_pool, NULL); g_thread_pool_free(pool, true, true); } }
void nsp_jobs_free(NspJobs *jobs) { if (jobs == NULL) { return; } g_thread_pool_free(jobs->pool, FALSE, FALSE); }
/** * @brief Disconnect and cleanup clients * * This function is not under CLEANUP_DESTRUCTOR conditional and is * actually called during shutdown to ensure that all the clients are * sent disconnections, rather than dropping connections and waiting * for timeout. */ void clients_cleanup() { clients_each(client_disconnect, NULL); #ifdef CLEANUP_DESTRUCTOR g_ptr_array_free(clients_list, true); g_thread_pool_free(client_threads, true, false); g_mutex_free(clients_list_lock); #endif }
static void rtp_fill_pool_free(RTP_session *session) { Resource *resource = session->track->parent; g_mutex_lock(resource->lock); resource->eor = true; g_mutex_unlock(resource->lock); g_thread_pool_free(session->fill_pool, true, true); session->fill_pool = NULL; resource->eor = false; }
static void g_threaded_socket_service_finalize (GObject *object) { GThreadedSocketService *service = G_THREADED_SOCKET_SERVICE (object); g_thread_pool_free (service->priv->thread_pool, FALSE, TRUE); G_OBJECT_CLASS (g_threaded_socket_service_parent_class) ->finalize (object); }
/** * Suspend all testing * * @return zero on success, non-zero otherwise */ int Informatics_SuspendTesting (void) { mutex_lock (suspended); /* It the simpliest way to free pool */ g_thread_pool_free (pool, FALSE, TRUE); create_testing_pool (); return 0; }
/** * a_background_uninit: * * Uninitialize background feature. */ void a_background_uninit() { /* wait until all running threads stop */ stop_all_threads = TRUE; g_thread_pool_free ( thread_pool, TRUE, TRUE ); gtk_list_store_clear ( bgstore ); g_object_unref ( bgstore ); gtk_widget_destroy ( bgwindow ); }
static gboolean manage_fuse_mt (GIOChannel *source, GIOCondition condition, gpointer data) { int res; char *buf; size_t bufsize; struct fuse *fuse; struct fuse_session *se; struct fuse_chan *ch; GThreadPool *pool; GError *error; ThreadsData *info; fuse = (struct fuse*) data; error = NULL; pool = g_thread_pool_new (manage_request, fuse, -1, FALSE, &error); if (pool == NULL) { g_warning ("Unable to start thread pool: %s", error->message); g_error_free (error); return NULL; } se = fuse_get_session (fuse); ch = fuse_session_next_chan (se, NULL); bufsize = fuse_chan_bufsize (ch); while (1) { buf = (char*) malloc (bufsize); res = fuse_chan_recv (&ch, buf, bufsize); if (res == -EINTR) { free (buf); continue; } else if (res <= 0) { free (buf); break; } info = do_threads_data (buf, res); error = NULL; g_thread_pool_push (pool, info, &error); if (error != NULL) { g_warning ("Unable to start processing request: %s", error->message); g_error_free (error); free_threads_data (info); } } g_thread_pool_free (pool, TRUE, TRUE); return NULL; }
static void default_cleanup (GstTaskPool * pool) { GST_OBJECT_LOCK (pool); if (pool->pool) { /* Shut down all the threads, we still process the ones scheduled * because the unref happens in the thread function. * Also wait for currently running ones to finish. */ g_thread_pool_free (pool->pool, FALSE, TRUE); pool->pool = NULL; } GST_OBJECT_UNLOCK (pool); }
void push_vm_destroy(push_vm_t *vm, push_bool_t kill_all) { g_return_if_null(vm); if (kill_all) { push_vm_kill_all(vm); } g_thread_pool_free(vm->threads, kill_all, TRUE); g_mutex_free(vm->mutex); g_cond_free(vm->wait_cond); g_slice_free(push_vm_t, vm); }
int emc_start_server(struct emc_server_context *ctx) { log_printf(DEBUG_LEVEL_INFO, "INFO EMC server ready"); while (ctx->continue_processing) ev_loop(loop, 0 /* or: EVLOOP_NONBLOCK */ ); log_printf(DEBUG_LEVEL_INFO, "INFO EMC server shutting down"); g_thread_pool_free(ctx->pool_tls_handshake, TRUE, TRUE); g_thread_pool_free(ctx->pool_reader, TRUE, TRUE); g_async_queue_unref(ctx->work_queue); g_mutex_free(ctx->tls_client_list_mutex); g_tree_destroy(ctx->nuauth_directory); ev_default_destroy(); emc_close_servers(ctx); return 0; }
static void test_thread_stop_unused (void) { GThreadPool *pool; guint i; guint limit = 100; /* Spawn a few threads. */ g_thread_pool_set_max_unused_threads (-1); pool = g_thread_pool_new ((GFunc) g_usleep, NULL, -1, FALSE, NULL); for (i = 0; i < limit; i++) g_thread_pool_push (pool, GUINT_TO_POINTER (1000), NULL); DEBUG_MSG (("[unused] ===> pushed %d threads onto the idle pool", limit)); /* Wait for the threads to migrate. */ g_usleep (G_USEC_PER_SEC); DEBUG_MSG (("[unused] current threads %d", test_count_threads())); DEBUG_MSG (("[unused] stopping unused threads")); g_thread_pool_stop_unused_threads (); for (i = 0; i < 5; i++) { if (g_thread_pool_get_num_unused_threads () == 0 && test_count_threads () == 0) break; DEBUG_MSG (("[unused] waiting ONE second for threads to die")); /* Some time for threads to die. */ g_usleep (G_USEC_PER_SEC); } DEBUG_MSG (("[unused] stopped idle threads, %d remain, %d threads still exist", g_thread_pool_get_num_unused_threads (), test_count_threads ())); g_assert (g_thread_pool_get_num_unused_threads () == test_count_threads ()); g_assert (g_thread_pool_get_num_unused_threads () == 0); g_thread_pool_set_max_unused_threads (MAX_THREADS); DEBUG_MSG (("[unused] cleaning up thread pool")); g_thread_pool_free (pool, FALSE, TRUE); }
void run_actual_probes(void) { GThreadPool *gtpool = NULL; gtpool = g_thread_pool_new(probe, NULL, 10, TRUE, NULL); if (gtpool == NULL) { LOG(LOG_ERR, "could not create threadpool"); return; } g_hash_table_foreach(cache, add_probe, gtpool); g_thread_pool_free (gtpool, FALSE, TRUE); }
API void freeOpenGLLodMap(OpenGLLodMap *lodmap) { g_hash_table_remove(maps, lodmap->quadtree); g_thread_pool_free(lodmap->loadingPool, false, true); freeQuadtree(lodmap->quadtree); deleteOpenGLMaterial("lodmap"); freeOpenGLPrimitive(lodmap->heightmap); freeVector(lodmap->viewerPosition); freeOpenGLLodMapDataSource(lodmap->source); free(lodmap); }