static gpointer sync_thread_func(gpointer data) { EeeAccountsManager *mgr = data; int i; g_return_val_if_fail(IS_EEE_ACCOUNTS_MANAGER(mgr), NULL); while (TRUE) { loop: switch (g_atomic_int_get(&mgr->priv->sync_request)) { case SYNC_REQ_PAUSE: g_usleep(1000000); g_thread_yield(); break; case SYNC_REQ_START: g_usleep(5000000); g_atomic_int_set(&mgr->priv->sync_request, SYNC_REQ_RESTART); break; case SYNC_REQ_RUN: for (i = 0; i < 30; i++) { g_usleep(1000000); if (g_atomic_int_get(&mgr->priv->sync_request) != SYNC_REQ_RUN) { goto loop; } } case SYNC_REQ_RESTART: g_atomic_int_set(&mgr->priv->sync_request, SYNC_REQ_RUN); run_idle(sync_starter, mgr); if (g_atomic_int_get(&mgr->priv->sync_request) != SYNC_REQ_RUN) { break; } eee_accounts_manager_sync_phase1(mgr); if (g_atomic_int_get(&mgr->priv->sync_request) != SYNC_REQ_RUN) { break; } run_idle(sync_completer, mgr); break; case SYNC_REQ_STOP: return NULL; } } return NULL; }
/* nbd-ops -p pid -f fsname -c command */ int main(int argc, char *argv[]) { GError *error = NULL; GOptionContext *context; context = g_option_context_new("- tapdisk ops..."); g_option_context_add_main_entries(context, entries, NULL); g_option_context_set_help_enabled(context, TRUE); g_option_group_set_error_hook(g_option_context_get_main_group(context), (GOptionErrorFunc)error_func); if (!g_option_context_parse(context, &argc, &argv, &error)) { g_message("option parsing failed: %s\n", error->message); exit(EXIT_FAILURE); } g_message("fsname: %s\n", fsname); g_message("cmd: %s\n", cmd); g_message("param1: %d\n" ,param1); g_option_context_free(context); char ctrl_region_file[128]; sprintf(ctrl_region_file,"%s%s%s","/tmp/",fsname,"-ctrl"); int fd = open(ctrl_region_file,O_RDWR); if(fd == -1){ return -1; } // int offset = sysconf(_SC_PAGE_SIZE); char *addr = mmap(NULL,sizeof(CTRL_REGION_T), PROT_WRITE, MAP_SHARED, fd, 0); if (addr == MAP_FAILED) { g_message("%s -- mmap failed\n", __func__); exit(EXIT_FAILURE); } g_assert(addr !=NULL); CTRL_REGION_T *ctrl_region; ctrl_region = (CTRL_REGION_T*)addr; gint *_is_start_clean = &ctrl_region->is_start_clean; gint *_copy_waterlevel = &ctrl_region->copy_waterlevel; if(0 == strncmp(cmd,"start_merge",strlen("start_merge"))){ g_message("cmd is srart merge : %s",cmd); g_atomic_int_set(_is_start_clean,1); }else if(0 == strncmp(cmd,"stop_merge",strlen("stop_merge"))){ g_message("cmd is stop merge : %s",cmd); g_atomic_int_set(_is_start_clean,0); }else if (0 == strncmp(cmd, "set_copy_waterlevel", strlen("set_copy_waterlevel"))){ g_message("cmd is set copy waterlevel: %s", cmd); g_atomic_int_set(_copy_waterlevel,param1); }else if (0 == strncmp(cmd, "query_stat", strlen("query_stat"))){ g_message("cmd is query_stat: %s", cmd); g_message("is_start_clean:%d", ctrl_region->is_start_clean); g_message("copy_waterlevel:%d",ctrl_region->copy_waterlevel); }else{ g_assert(0); } return 0; }
void file_read_func_zlib(gpointer data, gpointer user_data) { tp_args_t* tp_arg = (tp_args_t*) data; gchar buf[READBUFZ]; size_t nread=0, currPos=tp_arg->startPos, readSz=READBUFZ; FILE* fd = fopen(tp_arg->filen, "r"); gint processed = 0, written = 0; gzFile zfd = gzopen(tp_arg->tmpFilen, "wb9"); if (!fd) { /* handle error */ perror("couln't open input file"); g_atomic_int_set(&(tp_arg->error), TRUE); return; } if (!zfd) { /* handle error */ perror("couln't open tempfile"); g_atomic_int_set(&(tp_arg->error), TRUE); return; } fseek(fd, tp_arg->startPos, SEEK_SET); while (currPos < tp_arg->endPos && !feof(fd) && !_recieved_SIGINT) { /* Read a chunk */ if (currPos + readSz > tp_arg->endPos) readSz = tp_arg->endPos - currPos; nread = fread(&(buf[0]), 1, readSz, fd); currPos += nread; if (tp_arg->verbose) { processed = (currPos - tp_arg->startPos)/(double)(tp_arg->endPos - tp_arg->startPos) * 100; g_atomic_int_set(&(tp_arg->processed), processed); } /* zip and write the chunk */ written = gzwrite(zfd, &(buf[0]), nread); if (written != nread) { /* handle error */ fprintf(stderr, "Zlib Error. Supposed to write %d bytes but only wrote %d\n", nread, written); g_atomic_int_set(&(tp_arg->error), TRUE); break; } } /* clean up*/ fclose(fd); gzclose(zfd); g_atomic_int_set(&(tp_arg->done), TRUE); }
/* Plugin implementation */ int janus_videocall_init(janus_callbacks *callback, const char *config_path) { if(g_atomic_int_get(&stopping)) { /* Still stopping from before */ return -1; } if(callback == NULL || config_path == NULL) { /* Invalid arguments */ return -1; } /* Read configuration */ char filename[255]; g_snprintf(filename, 255, "%s/%s.cfg", config_path, JANUS_VIDEOCALL_PACKAGE); JANUS_LOG(LOG_VERB, "Configuration file: %s\n", filename); janus_config *config = janus_config_parse(filename); if(config != NULL) { janus_config_print(config); janus_config_item *events = janus_config_get_item_drilldown(config, "general", "events"); if(events != NULL && events->value != NULL) notify_events = janus_is_true(events->value); if(!notify_events && callback->events_is_enabled()) { JANUS_LOG(LOG_WARN, "Notification of events to handlers disabled for %s\n", JANUS_VIDEOCALL_NAME); } } janus_config_destroy(config); config = NULL; sessions = g_hash_table_new(g_str_hash, g_str_equal); janus_mutex_init(&sessions_mutex); messages = g_async_queue_new_full((GDestroyNotify) janus_videocall_message_free); /* This is the callback we'll need to invoke to contact the gateway */ gateway = callback; g_atomic_int_set(&initialized, 1); GError *error = NULL; /* Start the sessions watchdog */ watchdog = g_thread_try_new("videocall watchdog", &janus_videocall_watchdog, NULL, &error); if(error != NULL) { g_atomic_int_set(&initialized, 0); JANUS_LOG(LOG_ERR, "Got error %d (%s) trying to launch the VideoCall watchdog thread...\n", error->code, error->message ? error->message : "??"); return -1; } /* Launch the thread that will handle incoming messages */ handler_thread = g_thread_try_new("videocall handler", janus_videocall_handler, NULL, &error); if(error != NULL) { g_atomic_int_set(&initialized, 0); JANUS_LOG(LOG_ERR, "Got error %d (%s) trying to launch the VideoCall handler thread...\n", error->code, error->message ? error->message : "??"); return -1; } JANUS_LOG(LOG_INFO, "%s initialized!\n", JANUS_VIDEOCALL_NAME); return 0; }
/* events sent to this element directly, mainly from the application */ static gboolean gst_decklink_src_send_event (GstElement * element, GstEvent * event) { GstDecklinkSrc *src; gboolean result = FALSE; src = GST_DECKLINK_SRC (element); GST_DEBUG_OBJECT (src, "handling event %p %" GST_PTR_FORMAT, event, event); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_EOS: g_atomic_int_set (&src->pending_eos, TRUE); GST_INFO_OBJECT (src, "EOS pending"); result = TRUE; break; break; case GST_EVENT_TAG: case GST_EVENT_CUSTOM_DOWNSTREAM: case GST_EVENT_CUSTOM_BOTH: /* Insert TAG, CUSTOM_DOWNSTREAM, CUSTOM_BOTH in the dataflow */ GST_OBJECT_LOCK (src); src->pending_events = g_list_append (src->pending_events, event); g_atomic_int_set (&src->have_events, TRUE); GST_OBJECT_UNLOCK (src); event = NULL; result = TRUE; break; case GST_EVENT_CUSTOM_DOWNSTREAM_OOB: case GST_EVENT_CUSTOM_BOTH_OOB: /* insert a random custom event into the pipeline */ GST_DEBUG_OBJECT (src, "pushing custom OOB event downstream"); result = gst_pad_push_event (src->videosrcpad, gst_event_ref (event)); result |= gst_pad_push_event (src->audiosrcpad, event); /* we gave away the ref to the event in the push */ event = NULL; break; case GST_EVENT_CUSTOM_UPSTREAM: /* drop */ case GST_EVENT_SEGMENT: /* sending random SEGMENT downstream can break sync - drop */ default: GST_LOG_OBJECT (src, "dropping %s event", GST_EVENT_TYPE_NAME (event)); break; } /* if we still have a ref to the event, unref it now */ if (event) gst_event_unref (event); return result; }
void ps_gstsink_destroy (void) { if (!g_atomic_int_get(&initialized)) return; g_atomic_int_set(&stopping, 1); g_async_queue_push (messages, &exit_message); if (handler_thread != NULL) { g_thread_join (handler_thread); handler_thread = NULL; } if (watchdog != NULL) { g_thread_join (watchdog); watchdog = NULL; } usleep(500000); ps_mutex_lock(&sessions_mutex); /* Cleanup session data */ GHashTableIter iter; gpointer value; g_hash_table_iter_init (&iter, sessions); while (g_hash_table_iter_next (&iter, NULL, &value)) { ps_gstsink_session * session = value; if (!session->destroyed && session->vplayer != NULL) { ps_video_player * player = session->vplayer; gst_object_unref (player->vbus); gst_element_set_state (player->vpipeline, GST_STATE_NULL); if (gst_element_get_state (player->vpipeline, NULL, NULL, GST_CLOCK_TIME_NONE) == GST_STATE_CHANGE_FAILURE) { PS_LOG (LOG_ERR, "Unable to stop GSTREAMER video player..!!\n"); } gst_object_unref (GST_OBJECT(player->vpipeline)); } /*g_hash_table_iter_remove (&iter); session->handle = NULL; g_free(session); session = NULL;*/ g_hash_table_remove(sessions, session->handle); old_sessions = g_list_append(old_sessions, session); } g_hash_table_destroy(sessions); ps_mutex_unlock(&sessions_mutex); g_async_queue_unref(messages); messages = NULL; sessions = NULL; g_atomic_int_set(&initialized, 0); g_atomic_int_set(&stopping, 0); PS_LOG(LOG_INFO, "%s destroyed!\n", PS_GSTSINK_NAME); }
static inline void _push_mandatory_events (GstAggregator * self) { GstAggregatorPrivate *priv = self->priv; if (g_atomic_int_get (&self->priv->send_stream_start)) { gchar s_id[32]; GST_INFO_OBJECT (self, "pushing stream start"); /* stream-start (FIXME: create id based on input ids) */ g_snprintf (s_id, sizeof (s_id), "agg-%08x", g_random_int ()); if (!gst_pad_push_event (self->srcpad, gst_event_new_stream_start (s_id))) { GST_WARNING_OBJECT (self->srcpad, "Sending stream start event failed"); } g_atomic_int_set (&self->priv->send_stream_start, FALSE); } if (self->priv->srccaps) { GST_INFO_OBJECT (self, "pushing caps: %" GST_PTR_FORMAT, self->priv->srccaps); if (!gst_pad_push_event (self->srcpad, gst_event_new_caps (self->priv->srccaps))) { GST_WARNING_OBJECT (self->srcpad, "Sending caps event failed"); } gst_caps_unref (self->priv->srccaps); self->priv->srccaps = NULL; } if (g_atomic_int_get (&self->priv->send_segment)) { if (!g_atomic_int_get (&self->priv->flush_seeking)) { GstEvent *segev = gst_event_new_segment (&self->segment); if (!self->priv->seqnum) self->priv->seqnum = gst_event_get_seqnum (segev); else gst_event_set_seqnum (segev, self->priv->seqnum); GST_DEBUG_OBJECT (self, "pushing segment %" GST_PTR_FORMAT, segev); gst_pad_push_event (self->srcpad, segev); g_atomic_int_set (&self->priv->send_segment, FALSE); } } if (priv->tags && priv->tags_changed) { gst_pad_push_event (self->srcpad, gst_event_new_tag (gst_tag_list_ref (priv->tags))); priv->tags_changed = FALSE; } }
static gboolean activate_push (GstPad * pad, gboolean active) { gboolean result = TRUE; GstOmxBaseFilter *self; self = GST_OMX_BASE_FILTER (gst_pad_get_parent (pad)); if (active) { GST_DEBUG_OBJECT (self, "activate"); /* task may carry on */ g_atomic_int_set (&self->last_pad_push_return, GST_FLOW_OK); /* we do not start the task yet if the pad is not connected */ if (gst_pad_is_linked (pad)) { if (self->ready) { /** @todo link callback function also needed */ g_omx_port_resume (self->in_port); g_omx_port_resume (self->out_port); GST_INFO_OBJECT (self, "start srcpad task"); result = gst_pad_start_task (pad, output_loop, pad); } } } else { GST_DEBUG_OBJECT (self, "deactivate"); /* persuade task to bail out */ g_atomic_int_set (&self->last_pad_push_return, GST_FLOW_WRONG_STATE); if (self->ready) { /** @todo disable this until we properly reinitialize the buffers. */ #if 0 /* flush all buffers */ OMX_SendCommand (self->gomx->omx_handle, OMX_CommandFlush, OMX_ALL, NULL); #endif /* unlock loops */ g_omx_port_pause (self->in_port); g_omx_port_pause (self->out_port); } /* make sure streaming finishes */ result = gst_pad_stop_task (pad); } gst_object_unref (self); return result; }
static gboolean on_immediate_for_many_tasks(HrtTask *task, HrtWatcherFlags flags, void *data) { void *sleeping; TestFixture *fixture = data; int i, j; g_assert(flags == HRT_WATCHER_FLAG_NONE); for (i = 0; i < NUM_TASKS; ++i) { if (fixture->tasks[i].task == task) break; } g_assert(i < NUM_TASKS); g_assert(g_atomic_int_get(&fixture->tasks[i].in_an_immediate) == 0); for (j = 0; j < NUM_TASKS; ++j) { if (g_atomic_int_get(&fixture->tasks[j].in_an_immediate)) { g_assert(i != j); fixture->tasks[i].saw_another_immediate_in_an_immediate_count += 1; break; } } g_atomic_int_set(&fixture->tasks[i].in_an_immediate, 1); /* Use a flag to verify that multiple immediates don't run at once * on same task. */ sleeping = g_object_get_data(G_OBJECT(task), "sleeping"); g_assert(sleeping == NULL); g_object_set_data(G_OBJECT(task), "sleeping", GINT_TO_POINTER(1)); g_usleep(G_USEC_PER_SEC / 20); sleeping = g_object_get_data(G_OBJECT(task), "sleeping"); g_assert(sleeping != NULL); g_object_set_data(G_OBJECT(task), "sleeping", NULL); g_atomic_int_set(&fixture->tasks[i].in_an_immediate, 0); fixture->tasks[i].immediates_run_count += 1; return FALSE; }
/* * _handle_command_line() * * Parse out the command line options from argc,argv * gdaemon_glib [-u|--userid name] [-f|--forcepid] [-p|--pidfile fname] * [-d|--debug XX] [-v|--version] [-h|--help] * * Returns: TRUE if all params where handled * FALSE is any error occurs or an option required shutdown */ static gint _handle_command_line(int argc, char **argv, GOptionContext **context) { GError *gerror = NULL; GOptionEntry entries[] = { {"userid", 'u', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_STRING, &gd_pch_effective_userid, "Runtime userid", "name"}, {"pidfile", 'p', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_STRING, &gd_pch_pid_filename, "PID Filename", PACKAGE_PIDFILE}, {"debug", 'd', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_INT, &i_debug, "Turn on debug messages", "[0|1]"}, {"forcepid", 'f', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_NONE, &gd_b_force, "Force overwite of pid file", "cleanup after prior errors"}, {"version", 'v', G_OPTION_FLAG_IN_MAIN, G_OPTION_ARG_NONE, &gd_b_version, "Program version info", NULL}, {NULL} }; /* Get command line parms */ *context = g_option_context_new (" => SHR Phone FSO Daemon"); g_option_context_add_main_entries(*context, entries, NULL); g_option_context_set_ignore_unknown_options(*context, FALSE); if (!(g_option_context_parse(*context, &argc, &argv, &gerror))) { g_warning ("Parse command line failed: %s", gerror->message); g_option_context_free(*context); g_error_free(gerror); g_atomic_int_set(&gd_flag_exit, 0); /* flag an exit */ return (EXIT_FAILURE); } if (gd_b_version) { g_print ("SHR PhoneFSO Daemon\n%s Version %s\n%s\n\n", PACKAGE_NAME, PACKAGE_VERSION, "GPLv2 (2009) the SHR Team"); g_option_context_free(*context); g_atomic_int_set(&gd_flag_exit, 0); /* flag an exit */ return (EXIT_FAILURE); } if (gd_pch_pid_filename == NULL) { gd_pch_pid_filename = PACKAGE_PIDFILE; } return (EXIT_SUCCESS); }
static void cockpit_ssh_transport_constructed (GObject *object) { CockpitSshTransport *self = COCKPIT_SSH_TRANSPORT (object); CockpitSshData *data; static GSourceFuncs source_funcs = { cockpit_ssh_source_prepare, cockpit_ssh_source_check, cockpit_ssh_source_dispatch, NULL, }; G_OBJECT_CLASS (cockpit_ssh_transport_parent_class)->constructed (object); g_return_if_fail (self->data->creds != NULL); g_warn_if_fail (ssh_options_set (self->data->session, SSH_OPTIONS_USER, cockpit_creds_get_user (self->data->creds)) == 0); self->io = g_source_new (&source_funcs, sizeof (CockpitSshSource)); ((CockpitSshSource *)self->io)->transport = self; g_source_attach (self->io, self->data->context); /* Setup for connect thread */ self->connect_fd = ssh_get_fd (self->data->session); g_atomic_int_set (&self->connecting, 1); self->data->connecting = &self->connecting; data = self->data; self->data = NULL; self->connect_thread = g_thread_new ("ssh-transport-connect", cockpit_ssh_connect_thread, data); g_debug ("%s: constructed", self->logname); }
static gboolean _bus_stop_stream_cb (GstBus *bus, GstMessage *message, gpointer user_data) { FsStreamTransmitter *st = user_data; GstState oldstate, newstate, pending; if (GST_MESSAGE_TYPE (message) != GST_MESSAGE_STATE_CHANGED || G_OBJECT_TYPE (GST_MESSAGE_SRC (message)) != GST_TYPE_PIPELINE) return bus_error_callback (bus, message, user_data); gst_message_parse_state_changed (message, &oldstate, &newstate, &pending); if (newstate != GST_STATE_PLAYING) return TRUE; if (pending != GST_STATE_VOID_PENDING) ts_fail ("New state playing, but pending is %d", pending); GST_DEBUG ("Stopping stream transmitter"); fs_stream_transmitter_stop (st); g_object_unref (st); GST_DEBUG ("Stopped stream transmitter"); g_atomic_int_set(&running, FALSE); g_main_loop_quit (loop); return TRUE; }
static GstFlowReturn _flush (GstAggregator * self) { GstFlowReturn ret = GST_FLOW_OK; GstAggregatorPrivate *priv = self->priv; GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self); GST_DEBUG_OBJECT (self, "Flushing everything"); g_atomic_int_set (&priv->send_segment, TRUE); g_atomic_int_set (&priv->flush_seeking, FALSE); g_atomic_int_set (&priv->tags_changed, FALSE); if (klass->flush) ret = klass->flush (self); return ret; }
static void on_prompt_prompted (GObject *source, GAsyncResult *result, gpointer user_data) { GSimpleAsyncResult *res = G_SIMPLE_ASYNC_RESULT (user_data); PerformClosure *closure = g_simple_async_result_get_op_res_gpointer (res); SecretPrompt *self = SECRET_PROMPT (source); GError *error = NULL; GVariant *retval; retval = g_dbus_proxy_call_finish (G_DBUS_PROXY (self), result, &error); if (retval) g_variant_unref (retval); if (closure->vanished) g_clear_error (&error); if (error != NULL) { g_simple_async_result_take_error (res, error); perform_prompt_complete (res, TRUE); } else { closure->prompting = TRUE; g_atomic_int_set (&self->pv->prompted, 1); /* And now we wait for the signal */ } g_object_unref (res); }
void janus_serial_create_session(janus_plugin_session *handle, int *error) { if(g_atomic_int_get(&stopping) || !g_atomic_int_get(&initialized)) { *error = -1; return; } janus_serial_session *session = (janus_serial_session *)calloc(1, sizeof(janus_serial_session)); if(session == NULL) { JANUS_LOG(LOG_FATAL, "Memory error!\n"); *error = -2; return; } session->handle = handle; session->has_audio = FALSE; session->has_video = FALSE; session->audio_active = TRUE; session->video_active = TRUE; session->bitrate = 0; /* No limit */ session->destroyed = 0; g_atomic_int_set(&session->hangingup, 0); handle->plugin_handle = session; janus_mutex_lock(&sessions_mutex); g_hash_table_insert(sessions, handle, session); janus_mutex_unlock(&sessions_mutex); return; }
static void gst_glimage_sink_on_close (GstGLImageSink * gl_sink) { gst_gl_context_set_error (gl_sink->context, "Output window was closed"); g_atomic_int_set (&gl_sink->to_quit, 1); }
void janus_videocall_create_session(janus_plugin_session *handle, int *error) { if(g_atomic_int_get(&stopping) || !g_atomic_int_get(&initialized)) { *error = -1; return; } janus_videocall_session *session = (janus_videocall_session *)g_malloc0(sizeof(janus_videocall_session)); if(session == NULL) { JANUS_LOG(LOG_FATAL, "Memory error!\n"); *error = -2; return; } session->handle = handle; session->has_audio = FALSE; session->has_video = FALSE; session->audio_active = TRUE; session->video_active = TRUE; session->bitrate = 0; /* No limit */ session->peer = NULL; session->username = NULL; janus_mutex_init(&session->rec_mutex); session->destroyed = 0; g_atomic_int_set(&session->hangingup, 0); handle->plugin_handle = session; return; }
void YapServerDeadlockPriv::setThreadTimeout(const gint intervalInMs) { if (g_atomic_int_get(&deadlockThreadTimerSourceTimeoutMs) < intervalInMs) { g_atomic_int_set(&deadlockThreadTimerSourceTimeoutMs, intervalInMs); } }
static void iris_scheduler_finalize (GObject *object) { IrisScheduler *scheduler; IrisSchedulerPrivate *priv; scheduler = IRIS_SCHEDULER (object); priv = scheduler->priv; /* For the benefit of our threads */ g_atomic_int_set (&scheduler->in_finalize, TRUE); g_mutex_lock (priv->mutex); /* Release all of our threads */ g_list_foreach (priv->thread_list, release_thread, scheduler); g_list_free (priv->thread_list); g_mutex_unlock (priv->mutex); if (priv->rrobin != NULL) iris_rrobin_unref (priv->rrobin); g_mutex_free (priv->mutex); G_OBJECT_CLASS (iris_scheduler_parent_class)->finalize (object); }
static void gst_net_time_provider_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstNetTimeProvider *self = GST_NET_TIME_PROVIDER (object); GstClock **clock_p = &self->clock; switch (prop_id) { case PROP_PORT: self->port = g_value_get_int (value); break; case PROP_ADDRESS: g_free (self->address); if (g_value_get_string (value) == NULL) self->address = g_strdup (DEFAULT_ADDRESS); else self->address = g_strdup (g_value_get_string (value)); break; case PROP_CLOCK: gst_object_replace ((GstObject **) clock_p, (GstObject *) g_value_get_object (value)); break; case PROP_ACTIVE: g_atomic_int_set (&self->active.active, g_value_get_boolean (value)); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } }
/** * Advance a sequencer that isn't using the system timer. * @param seq Sequencer object * @param msec Time to advance sequencer to (absolute time since sequencer start). * @since 1.1.0 */ void fluid_sequencer_process(fluid_sequencer_t* seq, unsigned int msec) { /* process prequeue */ fluid_evt_entry* tmp; fluid_evt_entry* next; fluid_mutex_lock(seq->mutex); /* get the preQueue */ tmp = seq->preQueue; seq->preQueue = NULL; seq->preQueueLast = NULL; fluid_mutex_unlock(seq->mutex); /* walk all the preQueue and process them in order : inserts and removes */ while (tmp) { next = tmp->next; if (tmp->entryType == FLUID_EVT_ENTRY_REMOVE) { _fluid_seq_queue_remove_entries_matching(seq, tmp); } else { _fluid_seq_queue_insert_entry(seq, tmp); } tmp = next; } /* send queued events */ g_atomic_int_set(&seq->currentMs, msec); _fluid_seq_queue_send_queued_events(seq); }
/* * _thread_handle_signals() * * Trap linux signals for the whole multi-threaded application. * * Params: * main_loop -- closing this shuts down the app orderly * * Returns/Affects: * returns and/or set the atomic gint gd_flag_exit * returns last signal */ static gpointer _thread_handle_signals(gpointer main_loop) { sigset_t signal_set; siginfo_t signal_info; gint sig = 0; gint rval = 0; sigfillset (&signal_set); g_debug("signal handler: startup successful"); while (g_atomic_int_get(&gd_flag_exit)) { /* wait for any and all signals */ sig = sigwaitinfo (&signal_set, &signal_info); if (!sig) { g_message("signal handler: sigwaitinfo() returned an error => {%s}", g_strerror(errno)); continue; } /* when we get this far, we've caught a signal */ rval = _process_signals ( &signal_info ); g_atomic_int_set(&gd_flag_exit, rval); } /* end-while */ g_main_loop_quit(main_loop); pthread_sigmask (SIG_UNBLOCK, &signal_set, NULL); g_debug("signal handler: shutdown complete"); g_thread_exit ( GINT_TO_POINTER(sig) ); return (NULL); }
static void set_property(GObject *obj, guint prop_id, const GValue *value, GParamSpec *pspec) { GstDspVEnc *self = GST_DSP_VENC(obj); switch (prop_id) { case ARG_QUALITY: { if (GST_STATE(self) == GST_STATE_NULL) { guint quality; quality = g_value_get_uint(value); g_atomic_int_set(&self->quality, quality); } else { GST_WARNING_OBJECT(self, "encoding quality property can be set only in NULL state"); } break; } default: G_OBJECT_WARN_INVALID_PROPERTY_ID(obj, prop_id, pspec); break; } }
int main(int argc, char **argv) { #ifdef AGS_WITH_LIBINSTPATCH /* initialize the CUnit test registry */ if(CUE_SUCCESS != CU_initialize_registry()){ return CU_get_error(); } /* add a suite to the registry */ pSuite = CU_add_suite("AgsFuncitonalFFPlayerTest\0", ags_functional_ffplayer_test_init_suite, ags_functional_ffplayer_test_clean_suite); if(pSuite == NULL){ CU_cleanup_registry(); return CU_get_error(); } g_atomic_int_set(&is_available, FALSE); ags_test_init(&argc, &argv, AGS_FUNCTIONAL_FFPLAYER_TEST_CONFIG); ags_functional_test_util_do_run(argc, argv, ags_functional_ffplayer_test_add_test, &is_available); pthread_join(ags_functional_test_util_self()[0], NULL); return(-1); #else return(0); #endif }
static gboolean connect_branch (gpointer pipeline) { GstElement *tee = gst_bin_get_by_name (GST_BIN (pipeline), "tee"); GstElement *queue, *sink; GstPad *tee_src; if (tee == NULL) { g_atomic_int_set (&error, TRUE); goto end; } queue = gst_element_factory_make ("queue", NULL); sink = gst_element_factory_make ("appsink", NULL); g_object_set (G_OBJECT (sink), "emit-signals", TRUE, "sync", FALSE, NULL); g_signal_connect_data (G_OBJECT (sink), "new-sample", G_CALLBACK (new_sample), NULL, NULL, 0); gst_bin_add_many (GST_BIN (pipeline), queue, sink, NULL); gst_element_link (queue, sink); gst_element_sync_state_with_parent (queue); gst_element_sync_state_with_parent (sink); tee_src = gst_element_get_request_pad (tee, "src_%u"); gst_pad_add_probe (tee_src, GST_PAD_PROBE_TYPE_BLOCKING, link_to_tee, g_object_ref (queue), g_object_unref); g_object_unref (tee); end: return G_SOURCE_REMOVE; }
/** * iris_scheduler_queue: * @scheduler: An #IrisScheduler * @func: An #IrisCallback * @data: data for @func * @destroy_notify: an optional callback after execution to free data * * Queues a new work item to be executed by one of the scheduler's work * threads. * * @destroy_notify, if non-%NULL, should <emphasis>only</emphasis> handle * freeing data. If the work is unqueued and does not run, @destroy_notify will * still be called, and could potentially not execute for a long time after * @func completes. * * The order in which the items will be executed is impossible to * guarantee, since threads can preempt each other at any point. One way to * ensure ordered processing is to use an #IrisReceiver that has been set as * 'exclusive' using iris_arbiter_coordinate(), so that only one message will * be processed at a time (and in the order that they were posted). * */ void iris_scheduler_queue (IrisScheduler *scheduler, IrisCallback func, gpointer data, GDestroyNotify destroy_notify) { IrisSchedulerPrivate *priv; g_return_if_fail (scheduler != NULL); priv = scheduler->priv; /* Lazy initialization of the scheduler. By holding off until we * need this, we attempt to reduce our total thread usage. */ if (G_UNLIKELY (!priv->initialized)) { g_mutex_lock (priv->mutex); if (G_LIKELY (!g_atomic_int_get (&priv->initialized))) { iris_scheduler_manager_prepare (scheduler); g_atomic_int_set (&priv->initialized, TRUE); } g_mutex_unlock (priv->mutex); } IRIS_SCHEDULER_GET_CLASS (scheduler)->queue (scheduler, func, data, destroy_notify); }
static void bus_message (GstBus * bus, GstMessage * msg, gpointer pipe) { switch (GST_MESSAGE_TYPE (msg)) { case GST_MESSAGE_ERROR:{ gchar *error_file = g_strdup_printf ("error-%s", GST_OBJECT_NAME (pipe)); GST_ERROR ("Error: %" GST_PTR_FORMAT, msg); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipe), GST_DEBUG_GRAPH_SHOW_ALL, error_file); g_free (error_file); GST_ERROR ("Error received on bus in pipeline: %s", GST_OBJECT_NAME (pipe)); g_atomic_int_set (&error, 1); g_main_loop_quit (loop); break; } case GST_MESSAGE_WARNING:{ gchar *warn_file = g_strdup_printf ("warning-%s", GST_OBJECT_NAME (pipe)); GST_WARNING ("Warning: %" GST_PTR_FORMAT, msg); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipe), GST_DEBUG_GRAPH_SHOW_ALL, warn_file); g_free (warn_file); break; } case GST_MESSAGE_EOS: GST_DEBUG ("Received eos event"); g_main_loop_quit (loop); break; default: break; } }
static MMServer *_mmsvc_core_create_new_server_from_fd(int fd[], int type) { MMServer *server; int i; LOGD("Enter"); server = malloc(sizeof(MMServer)); g_return_val_if_fail(server != NULL, NULL); server->fd = fd[MUSED_CHANNEL_MSG]; server->data_fd = fd[MUSED_CHANNEL_DATA]; server->type = type; server->stop = 0; server->retval = 0; /*initiate server */ g_atomic_int_set(&server->running, 1); for (i = 0; i < MUSED_CHANNEL_MAX; i++) { if (!_mmsvc_core_attach_server(fd[i], _mmsvc_core_connection_handler, (gpointer) i)) { LOGD("Fail to attach server fd %d", fd[i]); MMSVC_FREE(server); return NULL; } } LOGD("Leave"); return server; }
/** Schedule immediate synchronization if necessary. * * 'Necessary' means: * - calendar is online * - calendar is in immediate sync mode * - last connection to the 3E server was successfull * * @param cb 3E calendar backend. */ void e_cal_backend_3e_do_immediate_sync(ECalBackend3e *cb) { if (e_cal_backend_3e_calendar_needs_immediate_sync(cb)) { g_atomic_int_set(&cb->priv->sync_request, SYNC_NOW); } }
static gboolean _unresponsive_timeout (GstClock * clock, GstClockTime time, GstClockID id, gpointer user_data) { GstAggregatorPad *aggpad; GstAggregator *self; if (user_data == NULL) return FALSE; aggpad = GST_AGGREGATOR_PAD (user_data); /* avoid holding the last reference to the parent element here */ PAD_LOCK_EVENT (aggpad); self = GST_AGGREGATOR (gst_pad_get_parent (GST_PAD (aggpad))); GST_DEBUG_OBJECT (aggpad, "marked unresponsive"); g_atomic_int_set (&aggpad->unresponsive, TRUE); if (self) { QUEUE_PUSH (self); gst_object_unref (self); } PAD_UNLOCK_EVENT (aggpad); return TRUE; }