static gboolean _check_collision (GstGLContext * context, GstGLContext * collision) { GThread *thread, *collision_thread; gboolean ret = FALSE; if (!context || !collision) return FALSE; thread = gst_gl_context_get_thread (context); collision_thread = gst_gl_context_get_thread (collision); if (!thread || !collision_thread) { ret = FALSE; goto out; } if (collision == context) { ret = TRUE; goto out; } out: if (thread) g_thread_unref (thread); if (collision_thread) g_thread_unref (collision_thread); return ret; }
void rtcdc_loop(struct rtcdc_peer_connection *peer) { if (peer == NULL) return; while (!peer->initialized) g_usleep(50000); GThread *thread_ice = g_thread_new("ICE thread", &ice_thread, peer); GThread *thread_sctp = g_thread_new("SCTP thread", &sctp_thread, peer); GThread *thread_startup = g_thread_new("Startup thread", &startup_thread, peer); struct ice_transport *ice = peer->transport->ice; g_main_loop_run(ice->loop); peer->exit_thread = TRUE; g_thread_join(thread_ice); g_thread_join(thread_sctp); g_thread_join(thread_startup); g_thread_unref(thread_ice); g_thread_unref(thread_sctp); g_thread_unref(thread_startup); }
static GstGLContext * _get_gl_context_for_thread_unlocked (GstGLDisplay * display, GThread * thread) { GstGLContext *context = NULL; GList *prev = NULL, *l = display->priv->contexts; while (l) { GWeakRef *ref = l->data; GThread *context_thread; context = g_weak_ref_get (ref); if (!context) { /* remove dead contexts */ g_weak_ref_clear (l->data); display->priv->contexts = g_list_delete_link (display->priv->contexts, l); l = prev ? prev->next : display->priv->contexts; continue; } context_thread = gst_gl_context_get_thread (context); if (thread != NULL && thread == context_thread) { g_thread_unref (context_thread); gst_object_unref (context); prev = l; l = l->next; continue; } if (context_thread) g_thread_unref (context_thread); return context; } return NULL; }
void mmsvc_core_worker_exit(Client client) { LOGD("Enter"); if (!client) { LOGE("Error - null client"); return; } mmsvc_core_connection_close(client->ch[MUSED_CHANNEL_MSG].fd); mmsvc_core_connection_close(client->ch[MUSED_CHANNEL_DATA].fd); if (!client->ch[MUSED_CHANNEL_MSG].p_gthread) { LOGE("Error - null p_gthread"); return; } LOGD("%p thread exit\n", client->ch[MUSED_CHANNEL_MSG].p_gthread); g_thread_unref(client->ch[MUSED_CHANNEL_MSG].p_gthread); if (client->ch[MUSED_CHANNEL_DATA].p_gthread) g_thread_unref(client->ch[MUSED_CHANNEL_DATA].p_gthread); MMSVC_FREE(client); mmsvc_core_config_get_instance()->free(); LOGD("Leave"); g_thread_exit(NULL); }
gpointer ssl_ping_thread (gpointer _c) { mongo_ssl_ctx *c = (mongo_ssl_ctx*) _c; GThread *s, *f; s = g_thread_new ("ping_success", ssl_ping_success_thread, c); f = g_thread_new ("ping_fail", ssl_ping_fail_thread, c); g_thread_join (s); g_thread_join (f); g_thread_unref (s); g_thread_unref (f); return NULL; }
static void kms_loop_dispose (GObject * obj) { KmsLoop *self = KMS_LOOP (obj); GST_DEBUG_OBJECT (obj, "Dispose"); KMS_LOOP_LOCK (self); if (self->priv->thread != NULL) { if (g_thread_self () != self->priv->thread) { GThread *aux = self->priv->thread; kms_loop_idle_add (self, (GSourceFunc) quit_main_loop, self); self->priv->thread = NULL; KMS_LOOP_UNLOCK (self); g_thread_join (aux); KMS_LOOP_LOCK (self); } else { /* self thread does not need to wait for itself */ quit_main_loop (self); g_thread_unref (self->priv->thread); self->priv->thread = NULL; } } KMS_LOOP_UNLOCK (self); G_OBJECT_CLASS (kms_loop_parent_class)->dispose (obj); }
/** * pk_backend_job_thread_setup: **/ static gpointer pk_backend_job_thread_setup (gpointer thread_data) { PkBackendJobThreadHelper *helper = (PkBackendJobThreadHelper *) thread_data; /* run original function with automatic locking */ pk_backend_thread_start (helper->backend, helper->job, helper->func); helper->func (helper->job, helper->job->priv->params, helper->user_data); pk_backend_job_finished (helper->job); pk_backend_thread_stop (helper->backend, helper->job, helper->func); /* set idle IO priority */ #ifdef PK_BUILD_DAEMON if (helper->job->priv->background == TRUE) { g_debug ("setting ioprio class to idle"); pk_ioprio_set_idle (0); } #endif /* unref the thread here as it holds a reference itself and we do * not need to join() this at any stage */ g_thread_unref (helper->job->priv->thread); /* destroy helper */ g_object_unref (helper->job); if (helper->destroy_func != NULL) helper->destroy_func (helper->user_data); g_free (helper); /* no return value */ return NULL; }
/** * pk_backend_job_thread_create: * @func: (scope call): **/ gboolean pk_backend_job_thread_create (PkBackendJob *job, PkBackendJobThreadFunc func, gpointer user_data, GDestroyNotify destroy_func) { PkBackendJobThreadHelper *helper = NULL; g_return_val_if_fail (PK_IS_BACKEND_JOB (job), FALSE); g_return_val_if_fail (func != NULL, FALSE); g_return_val_if_fail (pk_is_thread_default (), FALSE); /* create a helper object to allow us to call a _setup() function */ helper = g_new0 (PkBackendJobThreadHelper, 1); helper->job = g_object_ref (job); helper->backend = job->priv->backend; helper->func = func; helper->user_data = user_data; /* create a thread and unref it immediately as we do not need to join() * this at any stage */ g_thread_unref (g_thread_new ("PK-Backend", pk_backend_job_thread_setup, helper)); return TRUE; }
static void _kqtime_freeStatsThreadWorkerHelper(GThread* thread, KQTimeStatsWorker* worker) { if(thread) { /* tell the thread to exit */ if(worker && worker->commands) { KQTimeCommand* exitCommand = g_new0(KQTimeCommand, 1); exitCommand->type = KQTIME_CMD_EXIT; g_async_queue_push(worker->commands, exitCommand); } /* wait for the thread to exit */ g_thread_join(thread); g_thread_unref(thread); } if(worker->logFile) { fclose(worker->logFile); } if(worker->gzLogFile) { gzflush(worker->gzLogFile, Z_FINISH); gzclose(worker->gzLogFile); } if(worker) { if(worker->commands) { g_async_queue_unref(worker->commands); } g_free(worker); } }
static void _set_window_handle_cb (GstSetWindowHandleCb * data) { GstGLContext *context = gst_gl_window_get_context (data->window); GstGLWindowClass *window_class = GST_GL_WINDOW_GET_CLASS (data->window); GThread *thread = NULL; /* deactivate if necessary */ if (context) { thread = gst_gl_context_get_thread (context); if (thread) { /* This is only thread safe iff the context thread == g_thread_self() */ g_assert (thread == g_thread_self ()); gst_gl_context_activate (context, FALSE); } } window_class->set_window_handle (data->window, data->handle); /* reactivate */ if (context && thread) gst_gl_context_activate (context, TRUE); if (context) gst_object_unref (context); if (thread) g_thread_unref (thread); }
static gpointer gst_switch_server_controller (GstSwitchServer *srv) { GSocket *socket; GError *error; gint bound_port; srv->controller_socket = gst_switch_server_listen (srv, srv->controller_port, &bound_port); if (!srv->controller_socket) { return NULL; } while (srv->controller_thread && srv->controller_socket && srv->cancellable) { socket = g_socket_accept (srv->controller_socket, srv->cancellable, &error); if (!socket) { ERROR ("accept: %s", error->message); continue; } gst_switch_server_allow_tcp_control (srv, socket); } GST_SWITCH_SERVER_LOCK_CONTROLLER (srv); g_thread_unref (srv->controller_thread); srv->controller_thread = NULL; GST_SWITCH_SERVER_UNLOCK_CONTROLLER (srv); return NULL; }
static gpointer gst_switch_server_audio_acceptor (GstSwitchServer *srv) { GSocket *socket; GError *error; gint bound_port; srv->audio_acceptor_socket = gst_switch_server_listen (srv, srv->audio_acceptor_port, &bound_port); if (!srv->audio_acceptor_socket) { return NULL; } while (srv->audio_acceptor && srv->audio_acceptor_socket && srv->cancellable) { socket = g_socket_accept (srv->audio_acceptor_socket, srv->cancellable, &error); if (!socket) { ERROR ("accept: %s", error->message); continue; } gst_switch_server_serve (srv, socket, GST_SERVE_AUDIO_STREAM); } GST_SWITCH_SERVER_LOCK_AUDIO_ACCEPTOR (srv); g_thread_unref (srv->audio_acceptor); srv->audio_acceptor = NULL; GST_SWITCH_SERVER_UNLOCK_AUDIO_ACCEPTOR (srv); return NULL; }
static void input_selector_push_eos (gint stream, gboolean active) { GstPad *pad = stream == 1 ? stream1_pad : stream2_pad; if (active) { fail_unless (gst_pad_push_event (pad, gst_event_new_eos ())); } else { /* The non-active pads will block when receving eos, so we need to do it * from a separate thread. This makes this test racy, but it should only * cause false positives, not false negatives */ GThread *t = g_thread_new ("selector-test-push-eos", (GThreadFunc) input_selector_do_push_eos, pad); /* Sleep half a second to allow the other thread to execute, this is not * a definitive solution but there is no way to know when the * EOS has reached input-selector and blocked there, so this is just * to reduce the possibility of this test being racy (false positives) */ g_usleep (0.5 * G_USEC_PER_SEC); g_thread_unref (t); } input_selector_check_eos (active); }
static int _ocprocess_worker_start(_ocprocess_cb cb, void *ctx, _free_context free_ctx) { GError *error; GThread *thread; struct icd_ioty_worker *worker; RETV_IF(NULL == cb, IOTCON_ERROR_INVALID_PARAMETER); worker = calloc(1, sizeof(struct icd_ioty_worker)); if (NULL == worker) { ERR("calloc() Fail(%d)", errno); return IOTCON_ERROR_OUT_OF_MEMORY; } worker->cb = cb; worker->ctx = ctx; worker->free_ctx = free_ctx; /* TODO : consider thread pool mechanism */ thread = g_thread_try_new("worker_thread", _ocprocess_worker_thread, worker, &error); if (NULL == thread) { ERR("g_thread_try_new() Fail(%s)", error->message); g_error_free(error); free(worker); return IOTCON_ERROR_SYSTEM; } /* DO NOT join thread. It was already detached by calling g_thread_unref() */ g_thread_unref(thread); /* DO NOT FREE worker. It MUST be freed in the _ocprocess_worker_thread() */ return IOTCON_ERROR_NONE; }
static void closure_destroy (WebdavBackendSearchClosure *closure) { e_flag_free (closure->running); if (closure->thread) g_thread_unref (closure->thread); g_free (closure); }
void ArScriptRun(void) { if(main_thread != NULL) { g_thread_unref(main_thread); } main_thread = g_thread_new("main_runtime",main_runtime,NULL); }
/** * Frees server's structure * @param server_struct is the structure to be freed */ void free_server_struct_t(server_struct_t *server_struct) { if (server_struct != NULL) { MHD_stop_daemon(server_struct->d); print_debug(_("\tMHD daemon stopped.\n")); free_variable(server_struct->backend); /** we need a backend function to be called to free th backend structure */ print_debug(_("\tbackend variable freed.\n")); g_thread_unref(server_struct->data_thread); print_debug(_("\tdata thread unrefed.\n")); g_thread_unref(server_struct->meta_thread); print_debug(_("\tmeta thread unrefed.\n")); free_options_t(server_struct->opt); print_debug(_("\toption structure freed.\n")); free_variable(server_struct); print_debug(_("\tmain structure freed.\n")); } }
cDBusMainLoop::~cDBusMainLoop(void) { if (_loop != NULL) g_main_loop_quit(_loop); if (_thread != NULL) { g_thread_join(_thread); g_thread_unref(_thread); _thread = NULL; } }
/* Helper thread to create a SCTP association that will use this DTLS stack */ void *janus_dtls_sctp_setup_thread(void *data) { if(data == NULL) { JANUS_LOG(LOG_ERR, "No DTLS stack??\n"); g_thread_unref(g_thread_self()); return NULL; } janus_dtls_srtp *dtls = (janus_dtls_srtp *)data; if(dtls->sctp == NULL) { JANUS_LOG(LOG_ERR, "No SCTP stack??\n"); g_thread_unref(g_thread_self()); return NULL; } janus_sctp_association *sctp = (janus_sctp_association *)dtls->sctp; /* Do the accept/connect stuff now */ JANUS_LOG(LOG_VERB, "[%"SCNu64"] Started thread: setup of the SCTP association\n", sctp->handle_id); janus_sctp_association_setup(sctp); g_thread_unref(g_thread_self()); return NULL; }
void Socket::connect(int socket, in_addr_t sa, int port, gpointer data, on_connected_func func) { ConnectData *connect_data = new ConnectData(); connect_data->sd = socket; connect_data->sa = sa; connect_data->port = port; connect_data->data = data; connect_data->func = func; g_thread_unref(g_thread_new("connect_thread", connect_thread, connect_data)); }
gpointer ssl_query_thread (gpointer _c) { mongo_ssl_ctx *c = (mongo_ssl_ctx*) _c; guint tries; bson *test_doc = NULL; gchar *test_string; GThread *writer = g_thread_new ("insert", ssl_insert_thread, c); GThread *deleter; mongo_sync_connection *conn = mongo_sync_ssl_connect (config.primary_host, config.primary_port, TRUE, c); gboolean success = FALSE; g_thread_join (writer); sleep (1); for(tries = 1; tries <= THREAD_POOL_SIZE; ++tries) { test_doc = bson_new (); test_string = g_strdup_printf ("%s:%d", "ssl_insert_thread", tries); bson_append_string (test_doc, test_string, "ok", -1); bson_finish (test_doc); if (mongo_sync_cmd_query (conn, config.ns, 0, 0, 1, test_doc, NULL) != NULL) { success = TRUE; break; } bson_free (test_doc); g_free (test_string); sleep (1); } ok (success, "mongo_sync_cmd_query () works with writer threads over SSL"); deleter = g_thread_new ("delete", ssl_delete_thread, conn); g_thread_join (deleter); g_thread_unref (deleter); mongo_sync_disconnect (conn); g_thread_unref (writer); return NULL; }
GST_END_TEST GST_START_TEST (valve_test) { GstElement *pipeline = gst_pipeline_new (__FUNCTION__); GstElement *videotestsrc = gst_element_factory_make ("videotestsrc", NULL); GstElement *fakesink = gst_element_factory_make ("fakesink", NULL); GstElement *agnosticbin = gst_element_factory_make ("agnosticbin", NULL); GstElement *valve = gst_element_factory_make ("valve", NULL); GstElement *decoder = gst_element_factory_make ("vp8dec", NULL); GstElement *fakesink2 = gst_element_factory_make ("fakesink", "fakesink"); gboolean ret; GstBus *bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); GThread *thread; g_object_set_data (G_OBJECT (pipeline), VALVE_KEY, valve); loop = g_main_loop_new (NULL, TRUE); g_object_set (G_OBJECT (pipeline), "async-handling", TRUE, NULL); g_object_set (G_OBJECT (videotestsrc), "is-live", TRUE, NULL); gst_bus_add_signal_watch (bus); g_signal_connect (bus, "message", G_CALLBACK (bus_msg), pipeline); g_object_set_data (G_OBJECT (fakesink2), DECODER_KEY, decoder); g_object_set_data (G_OBJECT (decoder), AGNOSTIC_KEY, agnosticbin); mark_point (); gst_bin_add_many (GST_BIN (pipeline), videotestsrc, agnosticbin, fakesink, valve, decoder, fakesink2, NULL); mark_point (); ret = gst_element_link_many (videotestsrc, agnosticbin, fakesink, NULL); fail_unless (ret); mark_point (); ret = gst_element_link_many (agnosticbin, valve, decoder, fakesink2, NULL); fail_unless (ret); mark_point (); gst_element_set_state (pipeline, GST_STATE_PLAYING); thread = g_thread_new ("toggle", toggle_thread, pipeline); g_thread_unref (thread); g_timeout_add_seconds (10, timeout_check, pipeline); mark_point (); g_main_loop_run (loop); mark_point (); gst_element_set_state (pipeline, GST_STATE_NULL); gst_bus_remove_signal_watch (bus); g_object_unref (pipeline); g_object_unref (bus); g_main_loop_unref (loop); }
gboolean incoming_callback (GSocketService *service, GSocketConnection *connection, GObject *source_object, gpointer user_data) { g_print("Received Connection from client!\n"); g_object_ref(connection); Connection* con = malloc(sizeof *con); con->gSockConnection = connection; GThread* handlerThread = g_thread_new(NULL, connectionHandler, con); g_thread_unref(handlerThread); return FALSE; }
gpointer connectionHandler(gpointer connection) { if (!isAthorized(connection)) { connection_close(connection); free(connection); return NULL; } gint shell_stdin; gint shell_stdout; gint shell_stderr; #ifdef __WIN32__ gchar* argv[] = {"cmd.exe", NULL}; #elif __UNIX__ gchar* argv[] = {"zsh", NULL}; #endif GError* error = NULL; gboolean success = g_spawn_async_with_pipes(".", argv, NULL, G_SPAWN_SEARCH_PATH, NULL, NULL, NULL, &shell_stdin, &shell_stdout, &shell_stderr, &error); if (!success) { g_error(error->message); return NULL; } Client* client = client_new(shell_stdin, shell_stdout, shell_stderr, connection); client->isShellActive = TRUE; GThread* readingThread = g_thread_new(NULL, (GThreadFunc) client_reading_loop, client); GThread* writingOutThread = g_thread_new(NULL, (GThreadFunc) client_writing_shell_out_loop, client); GThread* writingErrThread = g_thread_new(NULL, (GThreadFunc) client_writing_shell_err_loop, client); g_print("threads started\n"); g_thread_join(readingThread); g_thread_join(writingOutThread); g_thread_join(writingErrThread); g_thread_unref(readingThread); g_thread_unref(writingOutThread); g_thread_unref(writingErrThread); g_print("Client disconnected\n"); client_free(client); return NULL; }
static void soup_session_sync_queue_message (SoupSession *session, SoupMessage *msg, SoupSessionCallback callback, gpointer user_data) { SoupMessageQueueItem *item; GThread *thread; item = soup_session_append_queue_item (session, msg, FALSE, FALSE, callback, user_data); thread = g_thread_new ("SoupSessionSync:queue_message", queue_message_thread, item); g_thread_unref (thread); }
void pragha_async_launch (GThreadFunc worker_func, GSourceFunc finish_func, gpointer user_data) { AsyncSimple *as; as = g_slice_new0(AsyncSimple); as->func_w = worker_func; as->func_f = finish_func; as->userdata = user_data; as->finished_data = NULL; g_thread_unref(g_thread_new("Launch async", pragha_async_worker, as)); }
int main(int argc, char *argv[]) { guint id; prog_path = g_path_get_dirname(argv[0]); if (!g_path_is_absolute(prog_path)) { gchar *rel_path = prog_path; prog_path = g_build_filename( g_get_current_dir(), rel_path, NULL); g_free(rel_path); } loop = g_main_loop_new(NULL, FALSE); create_queue = g_async_queue_new(); create_thread = g_thread_new( "create_queue", create_queue_consumer_thread, NULL); id = g_bus_own_name( G_BUS_TYPE_SESSION, API_MNGR_NAME, G_BUS_NAME_OWNER_FLAGS_ALLOW_REPLACEMENT | G_BUS_NAME_OWNER_FLAGS_REPLACE, on_bus_acquired, NULL, on_bus_lost, NULL, NULL); g_main_loop_run(loop); g_async_queue_unref(create_queue); create_queue = NULL; g_thread_unref(create_thread); create_thread = NULL; g_free(prog_path); g_bus_unown_name(id); g_main_loop_unref(loop); return 0; }
/** * e_alert_sink_submit_thread_job: * @alert_sink: an #EAlertSink instance * @description: user-friendly description of the job, to be shown in UI * @alert_ident: in case of an error, this alert identificator is used * for EAlert construction * @alert_arg_0: (allow-none): in case of an error, use this string as * the first argument to the EAlert construction; the second argument * is the actual error message; can be #NULL, in which case only * the error message is passed to the EAlert construction * @func: function to be run in a dedicated thread * @user_data: (allow-none): custom data passed into @func; can be #NULL * @free_user_data: (allow-none): function to be called on @user_data, * when the job is over; can be #NULL * * Runs the @func in a dedicated thread. Any error is propagated to UI. * The cancellable passed into the @func is a #CamelOperation, thus * the caller can overwrite progress and description message on it. * * Returns: (transfer full): Newly created #EActivity on success. * The caller is responsible to g_object_unref() it when done with it. * * Note: The @free_user_data, if set, is called in the main thread. * * Note: This function should be called only from the main thread. * * Since: 3.16 **/ EActivity * e_alert_sink_submit_thread_job (EAlertSink *alert_sink, const gchar *description, const gchar *alert_ident, const gchar *alert_arg_0, EAlertSinkThreadJobFunc func, gpointer user_data, GDestroyNotify free_user_data) { EActivity *activity; GCancellable *cancellable; EAlertSinkThreadJobData *job_data; GThread *thread; g_return_val_if_fail (E_IS_ALERT_SINK (alert_sink), NULL); g_return_val_if_fail (description != NULL, NULL); g_return_val_if_fail (func != NULL, NULL); activity = e_activity_new (); cancellable = camel_operation_new (); e_activity_set_alert_sink (activity, alert_sink); e_activity_set_cancellable (activity, cancellable); e_activity_set_text (activity, description); camel_operation_push_message (cancellable, "%s", description); job_data = g_new0 (EAlertSinkThreadJobData, 1); job_data->activity = g_object_ref (activity); job_data->alert_ident = g_strdup (alert_ident); job_data->alert_arg_0 = g_strdup (alert_arg_0); job_data->error = NULL; job_data->func = func; job_data->user_data = user_data; job_data->free_user_data = free_user_data; thread = g_thread_try_new (G_STRFUNC, e_alert_sink_thread_job, job_data, &job_data->error); g_object_unref (cancellable); if (thread) { g_thread_unref (thread); } else { g_prefix_error (&job_data->error, _("Failed to create a thread: ")); g_timeout_add (1, e_alert_sink_thread_job_done_cb, job_data); } return activity; }
void Socket::resolve(std::string& host, gpointer data, on_resolved_func func) { initWinSock(); std::map<std::string, in_addr_t>::iterator iter; iter = dns_map.find(host); if (iter != dns_map.end()) { func(data, true, iter->second); return; } DnsQueryData *query_data = new DnsQueryData(); query_data->host = host; query_data->data = data; query_data->func = func; g_thread_unref(g_thread_new("dns_thread", dns_thread, query_data)); }
static void connection_dialog_connect(GtkWidget *widget, gpointer data) { const gchar *hostname = gtk_entry_get_text(GTK_ENTRY(gtk_bin_get_child(GTK_BIN(e_host)))); const gchar *port = gtk_entry_get_text(GTK_ENTRY(e_port)); const gchar *password = gtk_entry_get_text(GTK_ENTRY(e_password)); gint result; gintptr fd; if(gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(r_serial))) { /* Serial port */ gchar *serial = gtk_combo_box_text_get_active_text(GTK_COMBO_BOX_TEXT(c_serial)); if(serial) { connection_dialog_unlock(FALSE); g_snprintf(ui.window_title, 100, "%s / %s", APP_NAME, serial); conf_update_string_const(&conf.serial, serial); conf.network = FALSE; result = tuner_open_serial(serial, &fd); if(result == CONN_SUCCESS) connection_dialog_connected(TUNER_THREAD_SERIAL, fd); else connection_serial_state(result); g_free(serial); } } else if(strlen(hostname) && atoi(port) > 0) { /* Network */ connection_dialog_unlock(FALSE); connecting = conn_new(hostname, port, password); g_snprintf(ui.window_title, 100, "%s / %s", APP_NAME, hostname); conf_add_host(hostname); conf.port = atoi(port); conf.network = TRUE; if(gtk_toggle_button_get_active(GTK_TOGGLE_BUTTON(c_password))) { conf_update_string_const(&conf.password, password); } g_thread_unref(g_thread_new("open_socket", tuner_open_socket, connecting)); } }