static liHandlerResult core_handle_throttle_ip(liVRequest *vr, gpointer param, gpointer *context) { throttle_ip_pools *pools = param; liThrottleState *state = vr_get_throttle_out_state(vr); UNUSED(context); if (NULL != state) { refcounted_pool_entry *entry = create_ip_pool(vr->wrk->srv, pools, &vr->coninfo->remote_addr); if (NULL != entry) { if (!li_throttle_add_pool(vr->wrk, state, entry->pool)) { /* we already had a reference */ g_atomic_int_add(&pools->refcount, -1); LI_FORCE_ASSERT(g_atomic_int_get(&pools->refcount) > 0); g_atomic_int_add(&entry->refcount, -1); LI_FORCE_ASSERT(g_atomic_int_get(&entry->refcount) > 0); } else { GArray *vr_ip_pools = (GArray*) g_ptr_array_index(vr->plugin_ctx, pools->plugin_id); vr_ip_pools_entry ventry; if (NULL == vr_ip_pools) { vr_ip_pools = g_array_new(FALSE, TRUE, sizeof(vr_ip_pools_entry)); g_ptr_array_index(vr->plugin_ctx, pools->plugin_id) = vr_ip_pools; } ventry.pools = pools; ventry.remote_addr_copy = li_sockaddr_dup(vr->coninfo->remote_addr); g_array_append_val(vr_ip_pools, ventry); } } } return LI_HANDLER_GO_ON; }
static GstFlowReturn do_alloc_buffer (GstBufferPool * pool, GstBuffer ** buffer, GstBufferPoolAcquireParams * params) { GstBufferPoolPrivate *priv = pool->priv; GstFlowReturn result; gint cur_buffers, max_buffers; GstBufferPoolClass *pclass; pclass = GST_BUFFER_POOL_GET_CLASS (pool); if (G_UNLIKELY (!pclass->alloc_buffer)) goto no_function; max_buffers = priv->max_buffers; /* increment the allocation counter */ cur_buffers = g_atomic_int_add (&priv->cur_buffers, 1); if (max_buffers && cur_buffers >= max_buffers) goto max_reached; result = pclass->alloc_buffer (pool, buffer, params); if (G_UNLIKELY (result != GST_FLOW_OK)) goto alloc_failed; /* lock all metadata and mark as pooled, we want this to remain on * the buffer and we want to remove any other metadata that gets added * later */ gst_buffer_foreach_meta (*buffer, mark_meta_pooled, pool); /* un-tag memory, this is how we expect the buffer when it is * released again */ GST_BUFFER_FLAG_UNSET (*buffer, GST_BUFFER_FLAG_TAG_MEMORY); GST_LOG_OBJECT (pool, "allocated buffer %d/%d, %p", cur_buffers, max_buffers, *buffer); return result; /* ERRORS */ no_function: { GST_ERROR_OBJECT (pool, "no alloc function"); return GST_FLOW_NOT_SUPPORTED; } max_reached: { GST_DEBUG_OBJECT (pool, "max buffers reached"); g_atomic_int_add (&priv->cur_buffers, -1); return GST_FLOW_EOS; } alloc_failed: { GST_WARNING_OBJECT (pool, "alloc function failed"); g_atomic_int_add (&priv->cur_buffers, -1); return result; } }
int main (int argc, char *argv[]) { gint i; gint atomic = -5; gpointer atomic_pointer = NULL; gpointer biggest_pointer = (gpointer)((gsize)atomic_pointer - 1); for (i = 0; i < 15; i++) g_atomic_int_inc (&atomic); g_assert (atomic == 10); for (i = 0; i < 9; i++) g_assert (!g_atomic_int_dec_and_test (&atomic)); g_assert (g_atomic_int_dec_and_test (&atomic)); g_assert (atomic == 0); g_assert (g_atomic_int_add (&atomic, 5) == 0); g_assert (atomic == 5); g_assert (g_atomic_int_add (&atomic, -10) == 5); g_assert (atomic == -5); g_atomic_int_add (&atomic, 20); g_assert (atomic == 15); g_atomic_int_add (&atomic, -35); g_assert (atomic == -20); g_assert (atomic == g_atomic_int_get (&atomic)); g_assert (g_atomic_int_compare_and_exchange (&atomic, -20, 20)); g_assert (atomic == 20); g_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12)); g_assert (atomic == 20); g_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT)); g_assert (atomic == G_MAXINT); g_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT)); g_assert (atomic == G_MININT); g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, NULL, biggest_pointer)); g_assert (atomic_pointer == biggest_pointer); g_assert (atomic_pointer == g_atomic_pointer_get (&atomic_pointer)); g_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, biggest_pointer, NULL)); g_assert (atomic_pointer == NULL); return 0; }
static void gst_interleave_release_pad (GstElement * element, GstPad * pad) { GstInterleave *self = GST_INTERLEAVE (element); GList *l; GstAudioChannelPosition position; g_return_if_fail (GST_IS_INTERLEAVE_PAD (pad)); /* Take lock to make sure we're not changing this when processing buffers */ GST_OBJECT_LOCK (self->collect); g_atomic_int_add (&self->channels, -1); if (gst_pad_has_current_caps (pad)) g_atomic_int_add (&self->configured_sinkpads_counter, -1); position = GST_INTERLEAVE_PAD_CAST (pad)->channel; g_value_array_remove (self->input_channel_positions, position); /* Update channel numbers */ GST_OBJECT_LOCK (self); for (l = GST_ELEMENT_CAST (self)->sinkpads; l != NULL; l = l->next) { GstInterleavePad *ipad = GST_INTERLEAVE_PAD (l->data); if (GST_INTERLEAVE_PAD_CAST (pad)->channel < ipad->channel) ipad->channel--; } GST_OBJECT_UNLOCK (self); /* Update the src caps if we already have them */ if (self->sinkcaps) { if (self->channels > 0) { GstCaps *srccaps; GstStructure *s; srccaps = gst_caps_copy (self->sinkcaps); s = gst_caps_get_structure (srccaps, 0); gst_structure_set (s, "channels", G_TYPE_INT, self->channels, NULL); gst_interleave_set_channel_positions (self, s); gst_pad_set_active (self->src, TRUE); gst_pad_set_caps (self->src, srccaps); gst_caps_unref (srccaps); } else { gst_caps_replace (&self->sinkcaps, NULL); } } GST_OBJECT_UNLOCK (self->collect); gst_collect_pads_remove_pad (self->collect, pad); gst_element_remove_pad (element, pad); }
void test_atomic (void) { gint i; gint atomic = -5; gpointer atomic_pointer = NULL; gpointer biggest_pointer = (gpointer)((gsize)atomic_pointer - 1); for (i = 0; i < 15; i++) g_atomic_int_inc (&atomic); cut_assert_equal_int (10, atomic); for (i = 0; i < 9; i++) cut_assert (!g_atomic_int_dec_and_test (&atomic)); cut_assert (g_atomic_int_dec_and_test (&atomic)); cut_assert_equal_int (0, atomic); cut_assert_equal_int (0, g_atomic_int_exchange_and_add (&atomic, 5)); cut_assert_equal_int (5, atomic); cut_assert_equal_int (5, g_atomic_int_exchange_and_add (&atomic, -10)); cut_assert_equal_int (-5, atomic); g_atomic_int_add (&atomic, 20); cut_assert_equal_int (15, atomic); g_atomic_int_add (&atomic, -35); cut_assert_equal_int (-20, atomic); cut_assert_equal_int (atomic, g_atomic_int_get (&atomic)); cut_assert (g_atomic_int_compare_and_exchange (&atomic, -20, 20)); cut_assert_equal_int (20, atomic); cut_assert (!g_atomic_int_compare_and_exchange (&atomic, 42, 12)); cut_assert_equal_int (20, atomic); cut_assert (g_atomic_int_compare_and_exchange (&atomic, 20, G_MAXINT)); cut_assert_equal_int (G_MAXINT, atomic); cut_assert (g_atomic_int_compare_and_exchange (&atomic, G_MAXINT, G_MININT)); cut_assert_equal_int (G_MININT, atomic); cut_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, NULL, biggest_pointer)); cut_assert_equal_pointer (biggest_pointer, atomic_pointer); cut_assert_equal_pointer (atomic_pointer, g_atomic_pointer_get (&atomic_pointer)); cut_assert (g_atomic_pointer_compare_and_exchange (&atomic_pointer, biggest_pointer, NULL)); cut_assert (biggest_pointer); }
static GstFlowReturn do_alloc_buffer (GstBufferPool * pool, GstBuffer ** buffer, GstBufferPoolAcquireParams * params) { GstBufferPoolPrivate *priv = pool->priv; GstFlowReturn result; gint cur_buffers, max_buffers; GstBufferPoolClass *pclass; pclass = GST_BUFFER_POOL_GET_CLASS (pool); if (G_UNLIKELY (!pclass->alloc_buffer)) goto no_function; max_buffers = priv->max_buffers; /* increment the allocation counter */ cur_buffers = g_atomic_int_add (&priv->cur_buffers, 1); if (max_buffers && cur_buffers >= max_buffers) goto max_reached; result = pclass->alloc_buffer (pool, buffer, params); if (G_UNLIKELY (result != GST_FLOW_OK)) goto alloc_failed; gst_buffer_foreach_meta (*buffer, mark_meta_pooled, pool); GST_LOG_OBJECT (pool, "allocated buffer %d/%d, %p", cur_buffers, max_buffers, buffer); return result; /* ERRORS */ no_function: { GST_ERROR_OBJECT (pool, "no alloc function"); return GST_FLOW_NOT_SUPPORTED; } max_reached: { GST_DEBUG_OBJECT (pool, "max buffers reached"); g_atomic_int_add (&priv->cur_buffers, -1); return GST_FLOW_EOS; } alloc_failed: { GST_WARNING_OBJECT (pool, "alloc function failed"); g_atomic_int_add (&priv->cur_buffers, -1); return result; } }
static int save_block_content_cb (char *content, int clen, int end, void *cbarg) { BlockTxClient *client = cbarg; TransferTask *task = client->info->task; int n; n = seaf_block_manager_write_block (seaf->block_mgr, client->block, content, clen); if (n < 0) { seaf_warning ("Failed to write block %s.\n", client->curr_block_id); client->info->result = BLOCK_CLIENT_FAILED; return -1; } /* Update global transferred bytes. */ g_atomic_int_add (&(task->tx_bytes), clen); g_atomic_int_add (&(seaf->sync_mgr->recv_bytes), clen); while (1) { gint recv_bytes = g_atomic_int_get (&(seaf->sync_mgr->recv_bytes)); if (seaf->sync_mgr->download_limit > 0 && recv_bytes > seaf->sync_mgr->download_limit) { G_USLEEP (100000); } else { break; } } if (end) { seaf_block_manager_close_block (seaf->block_mgr, client->block); if (seaf_block_manager_commit_block (seaf->block_mgr, client->block) < 0) { seaf_warning ("Failed to commit block %s.\n", client->curr_block_id); client->info->result = BLOCK_CLIENT_FAILED; return -1; } seaf_block_manager_block_handle_free (seaf->block_mgr, client->block); /* Set this handle to invalid. */ client->block = NULL; seaf_debug ("Get block %s succeeded.\n", client->curr_block_id); if (transfer_next_block (client) < 0) return -1; } return 0; }
/** * fm_init * @config: (allow-none): configuration file data * * Initializes libfm data. This API should be always called before any * other Libfm function is called. It is idempotent. * * Returns: %FALSE in case of duplicate call. * * Since: 0.1.0 */ gboolean fm_init(FmConfig* config) { #if GLIB_CHECK_VERSION(2, 30, 0) if (g_atomic_int_add(&init_done, 1) != 0) #else if (g_atomic_int_exchange_and_add(&init_done, 1) != 0) #endif return FALSE; /* duplicate call */ #ifdef ENABLE_NLS bindtextdomain(GETTEXT_PACKAGE, PACKAGE_LOCALE_DIR); bind_textdomain_codeset(GETTEXT_PACKAGE, "UTF-8"); #endif #if !GLIB_CHECK_VERSION(2, 36, 0) g_type_init(); #endif #if !GLIB_CHECK_VERSION(2, 32, 0) g_thread_init(NULL); #endif g_thread_pool_set_max_idle_time(10000); /* is 10 sec enough? */ if(config) fm_config = (FmConfig*)g_object_ref(config); else { /* create default config object */ fm_config = fm_config_new(); fm_config_load_from_file(fm_config, NULL); } #ifdef USE_UDISKS /* extension point should be added before any other GIO monitor call otherwise it will be ignored by GIO because GIO initializes it once */ _fm_udisks_init(); #endif _fm_file_init(); _fm_path_init(); _fm_icon_init(); _fm_monitor_init(); _fm_mime_type_init(); _fm_file_info_init(); /* should be called only after _fm_mime_type_init() */ _fm_folder_init(); _fm_archiver_init(); _fm_thumbnailer_init(); // must be called after mime-types are initialized _fm_thumbnail_loader_init(); _fm_terminal_init(); /* should be called after config initialization */ _fm_templates_init(); _fm_folder_config_init(); #ifdef HAVE_ACTIONS /* generated by vala */ _fm_file_actions_init(); #endif fm_qdata_id = g_quark_from_static_string("fm_qdata_id"); return TRUE; }
BufferQueue_Producer *bq_producer_ref(BufferQueue_Producer *producer) { g_assert(producer && g_atomic_int_get(&producer->ref_count) > 0); g_atomic_int_add(&producer->ref_count, 1); return producer; }
void janus_source_hangup_media(janus_plugin_session *handle) { JANUS_LOG(LOG_INFO, "No WebRTC media anymore\n"); if (g_atomic_int_get(&stopping) || !g_atomic_int_get(&initialized)) return; janus_source_session *session = (janus_source_session *)handle->plugin_handle; if (!session) { JANUS_LOG(LOG_ERR, "No session associated with this handle...\n"); return; } if (session->destroyed) return; if (g_atomic_int_add(&session->hangingup, 1)) return; /* Send an event to the browser and tell it's over */ json_t *event = json_object(); json_object_set_new(event, "source", json_string("event")); json_object_set_new(event, "result", json_string("done")); int ret = gateway->push_event(handle, &janus_source_plugin, NULL, event, NULL); JANUS_LOG(LOG_VERB, " >> Pushing event: %d (%s)\n", ret, janus_get_api_error(ret)); json_decref(event); /* Reset controls */ session->audio_active = TRUE; session->video_active = TRUE; session->bitrate = 0; }
static gboolean gst_hls_demux_get_next_fragment (GstHLSDemux * demux) { GstBuffer *buf; guint avail; const gchar *next_fragment_uri; GstClockTime duration; GstClockTime timestamp; gboolean discont; if (!gst_m3u8_client_get_next_fragment (demux->client, &discont, &next_fragment_uri, &duration, ×tamp)) { GST_INFO_OBJECT (demux, "This playlist doesn't contain more fragments"); demux->end_of_playlist = TRUE; gst_task_start (demux->task); return FALSE; } GST_INFO_OBJECT (demux, "Fetching next fragment %s", next_fragment_uri); if (!gst_hls_demux_fetch_location (demux, next_fragment_uri)) { /* FIXME: The gst_m3u8_get_next_fragment increments the sequence number but another thread might call get_next_fragment and this decrement will not redownload the failed fragment, but might duplicate the download of a succeeded fragment */ g_atomic_int_add (&demux->client->sequence, -1); return FALSE; } avail = gst_adapter_available (demux->download); buf = gst_adapter_take_buffer (demux->download, avail); GST_BUFFER_DURATION (buf) = duration; GST_BUFFER_TIMESTAMP (buf) = timestamp; /* We actually need to do this every time we switch bitrate */ if (G_UNLIKELY (demux->do_typefind)) { GstCaps *caps = gst_type_find_helper_for_buffer (NULL, buf, NULL); if (!demux->input_caps || !gst_caps_is_equal (caps, demux->input_caps)) { gst_caps_replace (&demux->input_caps, caps); /* gst_pad_set_caps (demux->srcpad, demux->input_caps); */ GST_INFO_OBJECT (demux, "Input source caps: %" GST_PTR_FORMAT, demux->input_caps); demux->do_typefind = FALSE; } else gst_caps_unref (caps); } gst_buffer_set_caps (buf, demux->input_caps); if (discont) { GST_DEBUG_OBJECT (demux, "Marking fragment as discontinuous"); GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); } g_queue_push_tail (demux->queue, buf); gst_task_start (demux->task); gst_adapter_clear (demux->download); return TRUE; }
static void S_throttle_pool_rearm_workers(liThrottlePool *pool, guint worker_count, guint time_diff) { guint i; gint64 connections = 0; gint64 wrk_connections[worker_count]; gint64 fill; for (i = 0; i < worker_count; ++i) { wrk_connections[i] = g_atomic_int_get((gint*) &pool->workers[i].connections); connections += wrk_connections[i]; } if (0 == connections) return; time_diff = MIN(time_diff, 1000); fill = MIN((guint64) pool->burst, ((guint64) pool->rate * time_diff) / 1000u); throttle_debug("rearm workers: refill %i after %u (or more) msecs (rate %u, burst %u)\n", (guint) fill, (guint) time_diff, pool->rate, pool->burst); for (i = 0; i < worker_count; ++i) { gint wrk_fill; if (0 == wrk_connections[i]) continue; wrk_fill = (fill * wrk_connections[i]) / connections; throttle_debug("rearm worker %u: refill %u\n", i, wrk_fill); g_atomic_int_add(&pool->workers[i].magazine, wrk_fill); } }
static void throttle_unregister(liThrottlePoolWorkerState *pwstate, liThrottlePoolState *pstate) { if (NULL != pstate->pool_link.data) { g_queue_unlink(&pwstate->waiting, &pstate->pool_link); pstate->pool_link.data = NULL; g_atomic_int_add((gint*) &pwstate->connections, -1); } }
/* * Create a event source object for connection. The object * will be added to "Event Context" for polling. */ __export HmWatch * hm_listen_watch_create(HmConnection *conn, HmWatchFuncs *funcs, gint size) { GSource *source; HmWatch *watch; G_ASSERT(conn != NULL && funcs != NULL && size >= sizeof(HmWatch)); source = g_source_new(&hm_watch_funcs, size); watch = (HmWatch*)source; watch->buffer = NULL; watch->lock = g_mutex_new(); watch->conn = conn; watch->funcs = funcs; watch->r_fd.fd = hm_connection_get_fd(conn); watch->r_fd.events = READ_COND; watch->r_fd.revents = 0; hm_watch_init_time(watch); watch->w_pending = 0; watch->killed = 0; watch->heavy_io_load = hm_connection_is_heavy(conn); watch->block_size = hm_connection_get_buffer_size(conn); hm_watch_set_callback(watch, hm_watch_listen_dispatch, NULL); g_source_add_poll(source, &watch->r_fd); g_atomic_int_add(&total_watch_count, 1); return watch; }
/** * gst_rtsp_session_prevent_expire: * @session: a #GstRTSPSession * * Prevent @session from expiring. */ void gst_rtsp_session_prevent_expire (GstRTSPSession * session) { g_return_if_fail (GST_IS_RTSP_SESSION (session)); g_atomic_int_add (&session->priv->expire_count, 1); }
/* * Event source function - destructor of the source, called * before the source object destroyed. we decrease the reference * count of ioc channel since the source object owes the count * of it. */ static void hm_watch_finalize(GSource *source) { HmWatch *watch; G_ASSERT(source != NULL); g_atomic_int_add(&total_watch_count, -1); watch = (HmWatch*)source; hm_debug( "Net IO '%p' finalized. total %d left.", NET_IO(watch), g_atomic_int_get(&total_watch_count) ); if (watch->funcs && watch->funcs->finalize) { (*watch->funcs->finalize)(watch); } hm_watch_destroy_private(watch->priv_data); watch->priv_data = NULL; if (watch->conn) /* we need to flush the rest of data!!! */ { hm_connection_close(watch->conn); } if (watch->buffer) { hm_net_buf_free(watch->buffer); } g_mutex_free(watch->lock); }
static void owr_media_renderer_init(OwrMediaRenderer *renderer) { OwrMediaRendererPrivate *priv; GstBus *bus; GSource *bus_source; gchar *bin_name; renderer->priv = priv = OWR_MEDIA_RENDERER_GET_PRIVATE(renderer); priv->media_type = DEFAULT_MEDIA_TYPE; priv->source = DEFAULT_SOURCE; priv->disabled = DEFAULT_DISABLED; bin_name = g_strdup_printf("media-renderer-%u", g_atomic_int_add(&unique_bin_id, 1)); priv->pipeline = gst_pipeline_new(bin_name); g_free(bin_name); #ifdef OWR_DEBUG g_signal_connect(priv->pipeline, "deep-notify", G_CALLBACK(_owr_deep_notify), NULL); #endif priv->sink = NULL; priv->src = NULL; bus = gst_pipeline_get_bus(GST_PIPELINE(priv->pipeline)); bus_source = gst_bus_create_watch(bus); g_source_set_callback(bus_source, (GSourceFunc) bus_call, priv->pipeline, NULL); g_source_attach(bus_source, _owr_get_main_context()); g_source_unref(bus_source); g_mutex_init(&priv->media_renderer_lock); }
static void nmp_sysmsg_init(NmpSysMsg *self) { G_ASSERT(self != NULL); INIT_MSGID(self->msg_id); self->flags = FLG_SYSMSG_FORWARD; self->packet_layer.seq = 0; self->from_io = NULL; self->dst = BUSSLOT_POS_MAX; self->src = BUSSLOT_POS_MAX; self->orig = BUSSLOT_POS_MAX; self->priv = nmp_new0(NmpSysMsgPriv, 1); BUG_ON(!self->priv); /* glib says never */ self->user_data = NULL; self->user_size = 0; g_atomic_int_add(&total_sysmsg_count, 1); { //@{debug} static guint print_tag = 0; if (++print_tag % 1000 == 0) { nmp_print( "===== NOW TOTAL SYSMSG OJBECTS: %d =====", g_atomic_int_get(&total_sysmsg_count) ); } } }
static GstElement *owr_image_renderer_get_element(OwrMediaRenderer *renderer) { OwrImageRenderer *image_renderer; OwrImageRendererPrivate *priv; GstElement *renderer_bin; GstElement *sink; GstPad *ghostpad, *sinkpad; gchar *bin_name; g_assert(renderer); image_renderer = OWR_IMAGE_RENDERER(renderer); priv = image_renderer->priv; g_assert(!priv->appsink); bin_name = g_strdup_printf("image-renderer-bin-%u", g_atomic_int_add(&unique_bin_id, 1)); renderer_bin = gst_bin_new(bin_name); g_free(bin_name); sink = gst_element_factory_make("appsink", "image-renderer-appsink"); g_assert(sink); priv->appsink = sink; g_object_set(sink, "max-buffers", 1, "drop", TRUE, "qos", TRUE, "enable-last-sample", FALSE, NULL); gst_bin_add_many(GST_BIN(renderer_bin), sink, NULL); sinkpad = gst_element_get_static_pad(sink, "sink"); g_assert(sinkpad); ghostpad = gst_ghost_pad_new("sink", sinkpad); gst_pad_set_active(ghostpad, TRUE); gst_element_add_pad(renderer_bin, ghostpad); gst_object_unref(sinkpad); return renderer_bin; }
static void nmp_sysmsg_dispose(GObject *object) { // NmpSysMsg *self = (NmpSysMsg*)object; g_atomic_int_add(&total_sysmsg_count, -1); G_OBJECT_CLASS(nmp_sysmsg_parent_class)->dispose(object); }
GVfsFtpConnection * g_vfs_ftp_connection_new (GSocketConnectable *addr, GCancellable * cancellable, GError ** error) { GVfsFtpConnection *conn; g_return_val_if_fail (G_IS_SOCKET_CONNECTABLE (addr), NULL); conn = g_slice_new0 (GVfsFtpConnection); conn->client = g_socket_client_new (); conn->debug_id = g_atomic_int_add (&debug_id, 1); conn->commands = G_IO_STREAM (g_socket_client_connect (conn->client, addr, cancellable, error)); if (conn->commands == NULL) { g_object_unref (conn->client); g_slice_free (GVfsFtpConnection, conn); return NULL; } conn->connection = G_SOCKET_CONNECTION (conn->commands); enable_nodelay (conn->connection); enable_keepalive (conn->connection); create_input_stream (conn); /* The first thing that needs to happen is receiving the welcome message */ conn->waiting_for_reply = TRUE; return conn; }
static guint get_sctp_association_id () { static guint assoc_id = 0; return g_atomic_int_add (&assoc_id, 1); }
static void assert_singleton_execution3 (void) { static volatile int seen_execution = 0; int old_seen_execution = g_atomic_int_add (&seen_execution, 1); if (old_seen_execution != 0) g_error ("%s: function executed more than once", G_STRFUNC); }
/** * g_hash_table_ref: * @hash_table: a valid #GHashTable. * * Atomically increments the reference count of @hash_table by one. * This function is MT-safe and may be called from any thread. * * Return value: the passed in #GHashTable. * * Since: 2.10 **/ GHashTable* g_hash_table_ref (GHashTable *hash_table) { g_return_val_if_fail (hash_table != NULL, NULL); g_return_val_if_fail (hash_table->ref_count > 0, hash_table); g_atomic_int_add (&hash_table->ref_count, 1); return hash_table; }
/** * gtk_css_section_ref: * @section: a #GtkCssSection * * Increments the reference count on @section. * * Returns: @section itself. * * Since: 3.2 **/ GtkCssSection * gtk_css_section_ref (GtkCssSection *section) { g_return_val_if_fail (section != NULL, NULL); g_atomic_int_add (§ion->ref_count, 1); return section; }
__export HmNet * hm_net_ref(HmNet *net) { G_ASSERT(net != NULL && g_atomic_int_get(&net->ref_count) > 0); g_atomic_int_add(&net->ref_count, 1); return net; }
/** * ostree_remote_ref: * @remote: an #OstreeRemote * * Increase the reference count on the given @remote. * * Returns: (transfer full): a copy of @remote, for convenience * Since: 2017.6 */ OstreeRemote * ostree_remote_ref (OstreeRemote *remote) { gint refcount; g_return_val_if_fail (remote != NULL, NULL); refcount = g_atomic_int_add (&remote->ref_count, 1); g_assert (refcount > 0); return remote; }
void janus_videocall_hangup_media(janus_plugin_session *handle) { JANUS_LOG(LOG_INFO, "No WebRTC media anymore\n"); if(g_atomic_int_get(&stopping) || !g_atomic_int_get(&initialized)) return; janus_videocall_session *session = (janus_videocall_session *)handle->plugin_handle; if(!session) { JANUS_LOG(LOG_ERR, "No session associated with this handle...\n"); return; } if(session->destroyed) return; if(g_atomic_int_add(&session->hangingup, 1)) return; /* Get rid of the recorders, if available */ janus_mutex_lock(&session->rec_mutex); if(session->arc) { janus_recorder_close(session->arc); JANUS_LOG(LOG_INFO, "Closed audio recording %s\n", session->arc->filename ? session->arc->filename : "??"); janus_recorder_free(session->arc); } session->arc = NULL; if(session->vrc) { janus_recorder_close(session->vrc); JANUS_LOG(LOG_INFO, "Closed video recording %s\n", session->vrc->filename ? session->vrc->filename : "??"); janus_recorder_free(session->vrc); } session->vrc = NULL; janus_mutex_unlock(&session->rec_mutex); if(session->peer) { /* Send event to our peer too */ json_t *call = json_object(); json_object_set_new(call, "videocall", json_string("event")); json_t *calling = json_object(); json_object_set_new(calling, "event", json_string("hangup")); json_object_set_new(calling, "username", json_string(session->username)); json_object_set_new(calling, "reason", json_string("Remote WebRTC hangup")); json_object_set_new(call, "result", calling); gateway->close_pc(session->peer->handle); int ret = gateway->push_event(session->peer->handle, &janus_videocall_plugin, NULL, call, NULL); JANUS_LOG(LOG_VERB, " >> Pushing event to peer: %d (%s)\n", ret, janus_get_api_error(ret)); json_decref(call); /* Also notify event handlers */ if(notify_events && gateway->events_is_enabled()) { json_t *info = json_object(); json_object_set_new(info, "event", json_string("hangup")); json_object_set_new(info, "reason", json_string("Remote WebRTC hangup")); gateway->notify_event(&janus_videocall_plugin, session->peer->handle, info); } } session->peer = NULL; /* Reset controls */ session->has_audio = FALSE; session->has_video = FALSE; session->audio_active = TRUE; session->video_active = TRUE; session->bitrate = 0; }
static void thread_process (gpointer thread_data, gpointer unused) { ThreadData *data = thread_data; if (!data->klass->process (data->operation, data->input, data->aux, data->aux2, data->output, &data->roi, data->level)) data->success = FALSE; g_atomic_int_add (data->pending, -1); }
/** * gtk_widget_path_ref: * @path: a #GtkWidgetPath * * Increments the reference count on @path. * * Returns: @path itself. * * Since: 3.2 **/ GtkWidgetPath * gtk_widget_path_ref (GtkWidgetPath *path) { g_return_val_if_fail (path != NULL, path); g_atomic_int_add (&path->ref_count, 1); return path; }