static GstClockReturn gst_test_clock_wait (GstClock * clock, GstClockEntry * entry, GstClockTimeDiff * jitter) { GstTestClock *test_clock = GST_TEST_CLOCK (clock); GstTestClockPrivate *priv = GST_TEST_CLOCK_GET_PRIVATE (test_clock); GST_OBJECT_LOCK (test_clock); GST_CAT_DEBUG_OBJECT (GST_CAT_TEST_CLOCK, test_clock, "requesting synchronous clock notification at %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_CLOCK_ENTRY_TIME (entry))); if (GST_CLOCK_ENTRY_STATUS (entry) == GST_CLOCK_UNSCHEDULED) goto was_unscheduled; if (gst_test_clock_lookup_entry_context (test_clock, entry) == NULL) gst_test_clock_add_entry (test_clock, entry, jitter); GST_CLOCK_ENTRY_STATUS (entry) = GST_CLOCK_BUSY; while (GST_CLOCK_ENTRY_STATUS (entry) == GST_CLOCK_BUSY) g_cond_wait (&priv->entry_processed_cond, GST_OBJECT_GET_LOCK (test_clock)); GST_OBJECT_UNLOCK (test_clock); return GST_CLOCK_ENTRY_STATUS (entry); /* ERRORS */ was_unscheduled: { GST_CAT_DEBUG_OBJECT (GST_CAT_TEST_CLOCK, test_clock, "entry was unscheduled"); GST_OBJECT_UNLOCK (test_clock); return GST_CLOCK_UNSCHEDULED; } }
/* This function is used to run UI on the main thread in order to ask the user * for an action during an operation. Since the operation cannot progress until * an action is provided by the user, the current thread needs to be blocked. * For this we wait on a condition on the shared data. We proceed further * unblocking the thread when the condition is set in the UI thread. */ static void invoke_main_context_sync (GMainContext *main_context, GSourceFunc source_func, gpointer user_data) { ContextInvokeData data; /* Allow only one thread at a time to invoke the main context so we * don't get race conditions which could lead to multiple dialogs being * displayed at the same time */ G_LOCK (main_context_sync); data.source_func = source_func; data.user_data = user_data; g_mutex_init (&data.mutex); g_cond_init (&data.cond); data.completed = FALSE; g_mutex_lock (&data.mutex); g_main_context_invoke (main_context, invoke_main_context_source_func_wrapper, &data); while (!data.completed) { g_cond_wait (&data.cond, &data.mutex); } g_mutex_unlock (&data.mutex); G_UNLOCK (main_context_sync); g_mutex_clear (&data.mutex); g_cond_clear (&data.cond); }
void PlaybackPipeline::removeSourceBuffer(RefPtr<SourceBufferPrivateGStreamer> sourceBufferPrivate) { GST_DEBUG_OBJECT(m_webKitMediaSrc.get(), "Element removed from MediaSource"); GST_OBJECT_LOCK(m_webKitMediaSrc.get()); WebKitMediaSrcPrivate* priv = m_webKitMediaSrc->priv; Stream* stream = 0; GList *l; for (l = priv->streams; l; l = l->next) { Stream *tmp = static_cast<Stream*>(l->data); if (tmp->sourceBuffer == sourceBufferPrivate.get()) { stream = tmp; priv->streams = g_list_remove(priv->streams, stream); break; } } GST_OBJECT_UNLOCK(m_webKitMediaSrc.get()); if (stream) { if (stream->appsrc) gst_app_src_end_of_stream(GST_APP_SRC(stream->appsrc)); if (stream->type != Invalid) { if (WTF::isMainThread()) releaseStreamTrackInfo(stream->parent, stream); else { WTF::GMutexLocker<GMutex> lock(stream->parent->priv->streamMutex); GRefPtr<WebKitMediaSrc> protector(stream->parent); stream->parent->priv->timeoutSource.schedule([protector, stream] { releaseStreamTrackInfo(protector.get(), stream); }); g_cond_wait(&stream->parent->priv->streamCondition, &stream->parent->priv->streamMutex); } } g_timeout_add(300, (GSourceFunc)freeStreamLater, stream); } }
static void gst_decklink_sink_com_thread (GstDecklinkSink * sink) { HRESULT res; g_mutex_lock (sink->com_init_lock); /* Initialize COM with a MTA for this process. This thread will * be the first one to enter the apartement and the last one to leave * it, unitializing COM properly */ res = CoInitializeEx (0, COINIT_MULTITHREADED); if (res == S_FALSE) GST_WARNING_OBJECT (sink, "COM has been already initialized in the same process"); else if (res == RPC_E_CHANGED_MODE) GST_WARNING_OBJECT (sink, "The concurrency model of COM has changed."); else GST_INFO_OBJECT (sink, "COM intialized succesfully"); sink->comInitialized = TRUE; /* Signal other threads waiting on this condition that COM was initialized */ g_cond_signal (sink->com_initialized); g_mutex_unlock (sink->com_init_lock); /* Wait until the unitialize condition is met to leave the COM apartement */ g_mutex_lock (sink->com_deinit_lock); g_cond_wait (sink->com_uninitialize, sink->com_deinit_lock); CoUninitialize (); GST_INFO_OBJECT (sink, "COM unintialized succesfully"); sink->comInitialized = FALSE; g_cond_signal (sink->com_uninitialized); g_mutex_unlock (sink->com_deinit_lock); }
static gpointer xmms_ringbuf_xform_thread (gpointer data) { xmms_xform_t *xform = (xmms_xform_t *)data; xmms_ringbuf_priv_t *priv; priv = xmms_xform_private_data_get (xform); g_mutex_lock (&priv->state_lock); while (priv->state != STATE_WANT_STOP) { if (priv->state == STATE_WANT_BUFFER) { priv->state = STATE_BUFFERING; g_cond_signal (&priv->state_cond); while (priv->state == STATE_BUFFERING) { g_mutex_unlock (&priv->state_lock); fill (xform, priv); g_mutex_lock (&priv->state_lock); } } else if (priv->state == STATE_WANT_SEEK) { seek (xform, priv); priv->state = STATE_SEEK_DONE; priv->seek_whence = XMMS_XFORM_SEEK_CUR; priv->seek_offset = 0; g_cond_signal (&priv->state_cond); while (priv->state == STATE_SEEK_DONE) { g_cond_wait (&priv->state_cond, &priv->state_lock); } } XMMS_DBG ("thread: state: %d", priv->state); } priv->state = STATE_IS_STOPPED; g_cond_signal (&priv->state_cond); g_mutex_unlock (&priv->state_lock); return NULL; }
static gpointer xmms_ofa_thread (gpointer arg) { xmms_ofa_data_t *data = (xmms_ofa_data_t *)arg; const char *fp; g_mutex_lock (&data->mutex); while (data->thread_state == XMMS_OFA_WAIT) { g_cond_wait (&data->cond, &data->mutex); } if (data->thread_state == XMMS_OFA_ABORT) { g_mutex_unlock (&data->mutex); return NULL; } g_mutex_unlock (&data->mutex); XMMS_DBG ("Calculating fingerprint... (will consume CPU)"); fp = ofa_create_print (data->buf, #if G_BYTE_ORDER == G_BIG_ENDIAN OFA_BIG_ENDIAN, #else OFA_LITTLE_ENDIAN, #endif data->bytes_to_read/2, 44100, 1); g_mutex_lock (&data->mutex); data->thread_state = XMMS_OFA_DONE; data->fp = g_strdup (fp); g_mutex_unlock (&data->mutex); XMMS_DBG ("Fingerprint calculated: %s", fp); return NULL; }
void schro_async_free (SchroAsync * async) { int i; g_mutex_lock (async->mutex); async->stop = DIE; while (async->n_threads_running > 0) { g_cond_signal (async->thread_cond); g_cond_wait (async->app_cond, async->mutex); } g_mutex_unlock (async->mutex); for (i = 0; i < async->n_threads; i++) { g_thread_join (async->threads[i].thread); } g_mutex_free (async->mutex); g_cond_free (async->app_cond); g_cond_free (async->thread_cond); schro_free (async->threads); schro_free (async); }
static int run_spot_cmd (GstSpotSrc *spot, enum spot_cmd cmd, gint64 opt) { struct spot_work *spot_work; int ret; /* create work struct */ spot_work = g_new0 (struct spot_work, 1); spot_work->spot_cond = g_cond_new (); spot_work->spot_mutex = g_mutex_new (); spot_work->cmd = cmd; spot_work->ret = 0; spot_work->opt = opt; /* add work struct to list of works */ g_mutex_lock (spot->process_events_mutex); spot->spot_works = g_list_append (spot->spot_works, spot_work); g_mutex_unlock (spot->process_events_mutex); /* wait for processing */ g_mutex_lock (spot_work->spot_mutex); GST_CAT_DEBUG_OBJECT (gst_spot_src_debug_threads, spot, "Broadcast process_events_cond"); g_cond_broadcast (spot->process_events_cond); g_cond_wait (spot_work->spot_cond, spot_work->spot_mutex); g_mutex_unlock (spot_work->spot_mutex); /* save return value */ ret = spot_work->ret; /* remove work struct */ g_cond_free (spot_work->spot_cond); g_mutex_free (spot_work->spot_mutex); g_free (spot_work); return ret; }
static gpointer format_template_thread(gpointer s) { gpointer *args = (gpointer *) s; LogMessage *msg = args[0]; LogTemplate *templ = args[1]; const gchar *expected = args[2]; GString *result; gint i; g_mutex_lock(thread_lock); while (!thread_start) g_cond_wait(thread_ping, thread_lock); g_mutex_unlock(thread_lock); result = g_string_sized_new(0); for (i = 0; i < 10000; i++) { log_template_format(templ, msg, NULL, LTZ_SEND, 5555, NULL, result); assert_string(result->str, expected, "multi-threaded formatting yielded invalid result (iteration: %d)", i); } g_string_free(result, TRUE); return NULL; }
GIOPRecvBuffer * giop_recv_buffer_get (GIOPMessageQueueEntry *ent) { GIOPThread *tdata = giop_thread_self (); thread_switch: if (giop_thread_io ()) { ent_lock (ent); for (; !check_got (ent); ) { if (!giop_thread_queue_empty_T (tdata)) { ent_unlock (ent); giop_thread_queue_process (tdata); ent_lock (ent); } else g_cond_wait (tdata->incoming, tdata->lock); } ent_unlock (ent); } else { /* non-threaded */ while (!ent->buffer && ent->cnx && (ent->cnx->parent.status != LINK_DISCONNECTED) && !giop_thread_io()) link_main_iteration (TRUE); if (giop_thread_io()) goto thread_switch; } giop_thread_queue_tail_wakeup (tdata); giop_recv_list_destroy_queue_entry (ent); return ent->buffer; }
int dt_lua_gtk_wrap(lua_State*L) { lua_pushvalue(L,lua_upvalueindex(1)); lua_insert(L,1); if(pthread_equal(darktable.control->gui_thread, pthread_self())) { return dt_lua_do_chunk_raise(L,lua_gettop(L)-1,LUA_MULTRET); } else { gtk_wrap_communication communication; g_mutex_init(&communication.end_mutex); g_cond_init(&communication.end_cond); communication.L = L; g_mutex_lock(&communication.end_mutex); g_main_context_invoke(NULL,dt_lua_gtk_wrap_callback,&communication); g_cond_wait(&communication.end_cond,&communication.end_mutex); g_mutex_unlock(&communication.end_mutex); g_mutex_clear(&communication.end_mutex); if(communication.retval == LUA_OK) { return lua_gettop(L); } else { return lua_error(L); } } }
static gboolean gs_auth_run_message_handler (struct pam_closure *c, GSAuthMessageStyle style, const char *msg, char **resp) { GsAuthMessageHandlerData data; data.closure = c; data.style = style; data.msg = msg; data.resp = resp; data.should_interrupt_stack = TRUE; g_mutex_lock (message_handler_mutex); /* Queue the callback in the gui (the main) thread */ g_idle_add ((GSourceFunc) gs_auth_queued_message_handler, &data); if (gs_auth_get_verbose ()) { g_message ("Waiting for respose to message style %d: '%s'", style, msg); } /* Wait for the response */ g_cond_wait (message_handled_condition, message_handler_mutex); g_mutex_unlock (message_handler_mutex); if (gs_auth_get_verbose ()) { g_message ("Got respose to message style %d: interrupt:%d", style, data.should_interrupt_stack); } return data.should_interrupt_stack == FALSE; }
/** * Same as #xmms_ringbuf_read but blocks until you have all the data you want. * * @sa xmms_ringbuf_read */ guint xmms_ringbuf_read_wait (xmms_ringbuf_t *ringbuf, gpointer data, guint len, GMutex *mtx) { guint r = 0, res; guint8 *dest = data; g_return_val_if_fail (ringbuf, 0); g_return_val_if_fail (data, 0); g_return_val_if_fail (len > 0, 0); g_return_val_if_fail (mtx, 0); while (r < len) { res = xmms_ringbuf_read (ringbuf, dest + r, len - r); r += res; if (r == len || ringbuf->eos) { break; } if (!res) g_cond_wait (ringbuf->used_cond, mtx); } return r; }
static void * example_thread(void *data) { NiceAgent *agent; NiceCandidate *local, *remote; GIOChannel* io_stdin; guint stream_id; gchar *line = NULL; int rval; #ifdef G_OS_WIN32 io_stdin = g_io_channel_win32_new_fd(_fileno(stdin)); #else io_stdin = g_io_channel_unix_new(fileno(stdin)); #endif g_io_channel_set_flags (io_stdin, G_IO_FLAG_NONBLOCK, NULL); // Create the nice agent agent = nice_agent_new(g_main_loop_get_context (gloop), NICE_COMPATIBILITY_RFC5245); if (agent == NULL) g_error("Failed to create agent"); // Set the STUN settings and controlling mode if (stun_addr) { g_object_set(agent, "stun-server", stun_addr, NULL); g_object_set(agent, "stun-server-port", stun_port, NULL); } g_object_set(agent, "controlling-mode", controlling, NULL); // Connect to the signals g_signal_connect(agent, "candidate-gathering-done", G_CALLBACK(cb_candidate_gathering_done), NULL); g_signal_connect(agent, "new-selected-pair", G_CALLBACK(cb_new_selected_pair), NULL); g_signal_connect(agent, "component-state-changed", G_CALLBACK(cb_component_state_changed), NULL); // Create a new stream with one component stream_id = nice_agent_add_stream(agent, 1); if (stream_id == 0) g_error("Failed to add stream"); // Attach to the component to receive the data // Without this call, candidates cannot be gathered nice_agent_attach_recv(agent, stream_id, 1, g_main_loop_get_context (gloop), cb_nice_recv, NULL); // Start gathering local candidates if (!nice_agent_gather_candidates(agent, stream_id)) g_error("Failed to start candidate gathering"); g_debug("waiting for candidate-gathering-done signal..."); g_mutex_lock(&gather_mutex); while (!exit_thread && !candidate_gathering_done) g_cond_wait(&gather_cond, &gather_mutex); g_mutex_unlock(&gather_mutex); if (exit_thread) goto end; // Candidate gathering is done. Send our local candidates on stdout printf("Copy this line to remote client:\n"); printf("\n "); print_local_data(agent, stream_id, 1); printf("\n"); // Listen on stdin for the remote candidate list printf("Enter remote data (single line, no wrapping):\n"); printf("> "); fflush (stdout); while (!exit_thread) { GIOStatus s = g_io_channel_read_line (io_stdin, &line, NULL, NULL, NULL); if (s == G_IO_STATUS_NORMAL) { // Parse remote candidate list and set it on the agent rval = parse_remote_data(agent, stream_id, 1, line); if (rval == EXIT_SUCCESS) { g_free (line); break; } else { fprintf(stderr, "ERROR: failed to parse remote data\n"); printf("Enter remote data (single line, no wrapping):\n"); printf("> "); fflush (stdout); } g_free (line); } else if (s == G_IO_STATUS_AGAIN) { g_usleep (100000); } } g_debug("waiting for state READY or FAILED signal..."); g_mutex_lock(&negotiate_mutex); while (!exit_thread && !negotiation_done) g_cond_wait(&negotiate_cond, &negotiate_mutex); g_mutex_unlock(&negotiate_mutex); if (exit_thread) goto end; // Get current selected candidate pair and print IP address used if (nice_agent_get_selected_pair (agent, stream_id, 1, &local, &remote)) { gchar ipaddr[INET6_ADDRSTRLEN]; nice_address_to_string(&local->addr, ipaddr); printf("\nNegotiation complete: ([%s]:%d,", ipaddr, nice_address_get_port(&local->addr)); nice_address_to_string(&remote->addr, ipaddr); printf(" [%s]:%d)\n", ipaddr, nice_address_get_port(&remote->addr)); } // Listen to stdin and send data written to it printf("\nSend lines to remote (Ctrl-D to quit):\n"); printf("> "); fflush (stdout); while (!exit_thread) { GIOStatus s = g_io_channel_read_line (io_stdin, &line, NULL, NULL, NULL); if (s == G_IO_STATUS_NORMAL) { nice_agent_send(agent, stream_id, 1, strlen(line), line); g_free (line); printf("> "); fflush (stdout); } else if (s == G_IO_STATUS_AGAIN) { g_usleep (100000); } else { // Ctrl-D was pressed. nice_agent_send(agent, stream_id, 1, 1, "\0"); break; } } end: g_io_channel_unref (io_stdin); g_object_unref(agent); g_main_loop_quit (gloop); return NULL; }
static GstFlowReturn gst_shape_wipe_video_sink_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer) { GstShapeWipe *self = GST_SHAPE_WIPE (parent); GstFlowReturn ret = GST_FLOW_OK; GstBuffer *mask = NULL, *outbuf = NULL; GstClockTime timestamp; gboolean new_outbuf = FALSE; GstVideoFrame inframe, outframe, maskframe; if (G_UNLIKELY (GST_VIDEO_INFO_FORMAT (&self->vinfo) == GST_VIDEO_FORMAT_UNKNOWN)) goto not_negotiated; timestamp = GST_BUFFER_TIMESTAMP (buffer); timestamp = gst_segment_to_stream_time (&self->segment, GST_FORMAT_TIME, timestamp); if (GST_CLOCK_TIME_IS_VALID (timestamp)) gst_object_sync_values (GST_OBJECT (self), timestamp); GST_LOG_OBJECT (self, "Blending buffer with timestamp %" GST_TIME_FORMAT " at position %f", GST_TIME_ARGS (timestamp), self->mask_position); g_mutex_lock (&self->mask_mutex); if (self->shutdown) goto shutdown; if (!self->mask) g_cond_wait (&self->mask_cond, &self->mask_mutex); if (self->mask == NULL || self->shutdown) { goto shutdown; } else { mask = gst_buffer_ref (self->mask); } g_mutex_unlock (&self->mask_mutex); if (!gst_shape_wipe_do_qos (self, GST_BUFFER_TIMESTAMP (buffer))) goto qos; /* Try to blend inplace, if it's not possible * get a new buffer from downstream. */ if (!gst_buffer_is_writable (buffer)) { outbuf = gst_buffer_new_allocate (NULL, gst_buffer_get_size (buffer), NULL); gst_buffer_copy_into (outbuf, buffer, GST_BUFFER_COPY_METADATA, 0, -1); new_outbuf = TRUE; } else { outbuf = buffer; } gst_video_frame_map (&inframe, &self->vinfo, buffer, new_outbuf ? GST_MAP_READ : GST_MAP_READWRITE); gst_video_frame_map (&outframe, &self->vinfo, outbuf, new_outbuf ? GST_MAP_WRITE : GST_MAP_READWRITE); gst_video_frame_map (&maskframe, &self->minfo, mask, GST_MAP_READ); switch (GST_VIDEO_INFO_FORMAT (&self->vinfo)) { case GST_VIDEO_FORMAT_AYUV: case GST_VIDEO_FORMAT_ARGB: case GST_VIDEO_FORMAT_ABGR: if (self->mask_bpp == 16) gst_shape_wipe_blend_argb_16 (self, &inframe, &maskframe, &outframe); else gst_shape_wipe_blend_argb_8 (self, &inframe, &maskframe, &outframe); break; case GST_VIDEO_FORMAT_BGRA: case GST_VIDEO_FORMAT_RGBA: if (self->mask_bpp == 16) gst_shape_wipe_blend_bgra_16 (self, &inframe, &maskframe, &outframe); else gst_shape_wipe_blend_bgra_8 (self, &inframe, &maskframe, &outframe); break; default: g_assert_not_reached (); break; } gst_video_frame_unmap (&outframe); gst_video_frame_unmap (&inframe); gst_video_frame_unmap (&maskframe); gst_buffer_unref (mask); if (new_outbuf) gst_buffer_unref (buffer); ret = gst_pad_push (self->srcpad, outbuf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto push_failed; return ret; /* Errors */ not_negotiated: { GST_ERROR_OBJECT (self, "No valid caps yet"); gst_buffer_unref (buffer); return GST_FLOW_NOT_NEGOTIATED; } shutdown: { GST_DEBUG_OBJECT (self, "Shutting down"); gst_buffer_unref (buffer); return GST_FLOW_FLUSHING; } qos: { GST_DEBUG_OBJECT (self, "Dropping buffer because of QoS"); gst_buffer_unref (buffer); gst_buffer_unref (mask); return GST_FLOW_OK; } push_failed: { GST_ERROR_OBJECT (self, "Pushing buffer downstream failed: %s", gst_flow_get_name (ret)); return ret; } }
static GstFlowReturn gst_shm_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstShmSink *self = GST_SHM_SINK (bsink); int rv = 0; GstMapInfo map; gboolean need_new_memory = FALSE; GstFlowReturn ret = GST_FLOW_OK; GstMemory *memory = NULL; GstBuffer *sendbuf = NULL; gsize written_bytes; GST_OBJECT_LOCK (self); while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } while (!gst_shm_sink_can_render (self, GST_BUFFER_TIMESTAMP (buf))) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } if (gst_buffer_n_memory (buf) > 1) { GST_LOG_OBJECT (self, "Buffer %p has %d GstMemory, we only support a single" " one, need to do a memcpy", buf, gst_buffer_n_memory (buf)); need_new_memory = TRUE; } else { memory = gst_buffer_peek_memory (buf, 0); if (memory->allocator != GST_ALLOCATOR (self->allocator)) { need_new_memory = TRUE; GST_LOG_OBJECT (self, "Memory in buffer %p was not allocated by " "%" GST_PTR_FORMAT ", will memcpy", buf, memory->allocator); } } if (need_new_memory) { if (gst_buffer_get_size (buf) > sp_writer_get_max_buf_size (self->pipe)) { gsize area_size = sp_writer_get_max_buf_size (self->pipe); GST_ELEMENT_ERROR (self, RESOURCE, NO_SPACE_LEFT, (NULL), ("Shared memory area of size %" G_GSIZE_FORMAT " is smaller than" "buffer of size %" G_GSIZE_FORMAT, area_size, gst_buffer_get_size (buf))); goto error; } while ((memory = gst_shm_sink_allocator_alloc_locked (self->allocator, gst_buffer_get_size (buf), &self->params)) == NULL) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) GST_OBJECT_LOCK (self); else return ret; } } while (self->wait_for_connection && !self->clients) { g_cond_wait (&self->cond, GST_OBJECT_GET_LOCK (self)); if (self->unlock) { GST_OBJECT_UNLOCK (self); ret = gst_base_sink_wait_preroll (bsink); if (ret == GST_FLOW_OK) { GST_OBJECT_LOCK (self); } else { gst_memory_unref (memory); return ret; } } } if (!gst_memory_map (memory, &map, GST_MAP_WRITE)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to map memory")); goto error; } GST_DEBUG_OBJECT (self, "Copying %" G_GSIZE_FORMAT " bytes into map of size %" G_GSIZE_FORMAT " bytes.", gst_buffer_get_size (buf), map.size); written_bytes = gst_buffer_extract (buf, 0, map.data, map.size); GST_DEBUG_OBJECT (self, "Copied %" G_GSIZE_FORMAT " bytes.", written_bytes); gst_memory_unmap (memory, &map); sendbuf = gst_buffer_new (); if (!gst_buffer_copy_into (sendbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to copy data into send buffer")); gst_buffer_unref (sendbuf); goto error; } gst_buffer_append_memory (sendbuf, memory); } else { sendbuf = gst_buffer_ref (buf); } if (!gst_buffer_map (sendbuf, &map, GST_MAP_READ)) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to map data into send buffer")); goto error; } /* Make the memory readonly as of now as we've sent it to the other side * We know it's not mapped for writing anywhere as we just mapped it for * reading */ rv = sp_writer_send_buf (self->pipe, (char *) map.data, map.size, sendbuf); if (rv == -1) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to send data over SHM")); gst_buffer_unmap (sendbuf, &map); goto error; } gst_buffer_unmap (sendbuf, &map); GST_OBJECT_UNLOCK (self); if (rv == 0) { GST_DEBUG_OBJECT (self, "No clients connected, unreffing buffer"); gst_buffer_unref (sendbuf); } return ret; error: GST_OBJECT_UNLOCK (self); return GST_FLOW_ERROR; }
static GstFlowReturn gst_app_src_create (GstBaseSrc * bsrc, guint64 offset, guint size, GstBuffer ** buf) { GstAppSrc *appsrc = GST_APP_SRC_CAST (bsrc); GstAppSrcPrivate *priv = appsrc->priv; GstFlowReturn ret; GST_OBJECT_LOCK (appsrc); if (G_UNLIKELY (priv->size != bsrc->segment.duration && bsrc->segment.format == GST_FORMAT_BYTES)) { GST_DEBUG_OBJECT (appsrc, "Size changed from %" G_GINT64_FORMAT " to %" G_GINT64_FORMAT, bsrc->segment.duration, priv->size); bsrc->segment.duration = priv->size; GST_OBJECT_UNLOCK (appsrc); gst_element_post_message (GST_ELEMENT (appsrc), gst_message_new_duration_changed (GST_OBJECT (appsrc))); } else { GST_OBJECT_UNLOCK (appsrc); } g_mutex_lock (&priv->mutex); /* check flushing first */ if (G_UNLIKELY (priv->flushing)) goto flushing; if (priv->stream_type == GST_APP_STREAM_TYPE_RANDOM_ACCESS) { /* if we are dealing with a random-access stream, issue a seek if the offset * changed. */ if (G_UNLIKELY (priv->offset != offset)) { gboolean res; /* do the seek */ res = gst_app_src_emit_seek (appsrc, offset); if (G_UNLIKELY (!res)) /* failing to seek is fatal */ goto seek_error; priv->offset = offset; priv->is_eos = FALSE; } } while (TRUE) { /* return data as long as we have some */ if (!g_queue_is_empty (priv->queue)) { guint buf_size; if (priv->new_caps) { gst_app_src_do_negotiate (bsrc); priv->new_caps = FALSE; } *buf = g_queue_pop_head (priv->queue); buf_size = gst_buffer_get_size (*buf); GST_DEBUG_OBJECT (appsrc, "we have buffer %p of size %u", *buf, buf_size); priv->queued_bytes -= buf_size; /* only update the offset when in random_access mode */ if (priv->stream_type == GST_APP_STREAM_TYPE_RANDOM_ACCESS) priv->offset += buf_size; /* signal that we removed an item */ g_cond_broadcast (&priv->cond); /* see if we go lower than the empty-percent */ if (priv->min_percent && priv->max_bytes) { if (priv->queued_bytes * 100 / priv->max_bytes <= priv->min_percent) /* ignore flushing state, we got a buffer and we will return it now. * Errors will be handled in the next round */ gst_app_src_emit_need_data (appsrc, size); } ret = GST_FLOW_OK; break; } else { gst_app_src_emit_need_data (appsrc, size); /* we can be flushing now because we released the lock above */ if (G_UNLIKELY (priv->flushing)) goto flushing; /* if we have a buffer now, continue the loop and try to return it. In * random-access mode (where a buffer is normally pushed in the above * signal) we can still be empty because the pushed buffer got flushed or * when the application pushes the requested buffer later, we support both * possibilities. */ if (!g_queue_is_empty (priv->queue)) continue; /* no buffer yet, maybe we are EOS, if not, block for more data. */ } /* check EOS */ if (G_UNLIKELY (priv->is_eos)) goto eos; /* nothing to return, wait a while for new data or flushing. */ g_cond_wait (&priv->cond, &priv->mutex); } g_mutex_unlock (&priv->mutex); return ret; /* ERRORS */ flushing: { GST_DEBUG_OBJECT (appsrc, "we are flushing"); g_mutex_unlock (&priv->mutex); return GST_FLOW_FLUSHING; } eos: { GST_DEBUG_OBJECT (appsrc, "we are EOS"); g_mutex_unlock (&priv->mutex); return GST_FLOW_EOS; } seek_error: { g_mutex_unlock (&priv->mutex); GST_ELEMENT_ERROR (appsrc, RESOURCE, READ, ("failed to seek"), GST_ERROR_SYSTEM); return GST_FLOW_ERROR; } }
static GstFlowReturn gst_app_src_push_buffer_full (GstAppSrc * appsrc, GstBuffer * buffer, gboolean steal_ref) { gboolean first = TRUE; GstAppSrcPrivate *priv; g_return_val_if_fail (GST_IS_APP_SRC (appsrc), GST_FLOW_ERROR); g_return_val_if_fail (GST_IS_BUFFER (buffer), GST_FLOW_ERROR); priv = appsrc->priv; g_mutex_lock (&priv->mutex); while (TRUE) { /* can't accept buffers when we are flushing or EOS */ if (priv->flushing) goto flushing; if (priv->is_eos) goto eos; if (priv->max_bytes && priv->queued_bytes >= priv->max_bytes) { GST_DEBUG_OBJECT (appsrc, "queue filled (%" G_GUINT64_FORMAT " >= %" G_GUINT64_FORMAT ")", priv->queued_bytes, priv->max_bytes); if (first) { gboolean emit; emit = priv->emit_signals; /* only signal on the first push */ g_mutex_unlock (&priv->mutex); if (priv->callbacks.enough_data) priv->callbacks.enough_data (appsrc, priv->user_data); else if (emit) g_signal_emit (appsrc, gst_app_src_signals[SIGNAL_ENOUGH_DATA], 0, NULL); g_mutex_lock (&priv->mutex); /* continue to check for flushing/eos after releasing the lock */ first = FALSE; continue; } if (priv->block) { GST_DEBUG_OBJECT (appsrc, "waiting for free space"); /* we are filled, wait until a buffer gets popped or when we * flush. */ g_cond_wait (&priv->cond, &priv->mutex); } else { /* no need to wait for free space, we just pump more data into the * queue hoping that the caller reacts to the enough-data signal and * stops pushing buffers. */ break; } } else break; } GST_DEBUG_OBJECT (appsrc, "queueing buffer %p", buffer); if (!steal_ref) gst_buffer_ref (buffer); g_queue_push_tail (priv->queue, buffer); priv->queued_bytes += gst_buffer_get_size (buffer); g_cond_broadcast (&priv->cond); g_mutex_unlock (&priv->mutex); return GST_FLOW_OK; /* ERRORS */ flushing: { GST_DEBUG_OBJECT (appsrc, "refuse buffer %p, we are flushing", buffer); if (steal_ref) gst_buffer_unref (buffer); g_mutex_unlock (&priv->mutex); return GST_FLOW_FLUSHING; } eos: { GST_DEBUG_OBJECT (appsrc, "refuse buffer %p, we are EOS", buffer); if (steal_ref) gst_buffer_unref (buffer); g_mutex_unlock (&priv->mutex); return GST_FLOW_EOS; } }
static gpointer audio_output_task(gpointer arg) { struct audio_output *ao = arg; g_mutex_lock(ao->mutex); while (1) { switch (ao->command) { case AO_COMMAND_NONE: break; case AO_COMMAND_ENABLE: ao_enable(ao); ao_command_finished(ao); break; case AO_COMMAND_DISABLE: ao_disable(ao); ao_command_finished(ao); break; case AO_COMMAND_OPEN: ao_open(ao); ao_command_finished(ao); break; case AO_COMMAND_REOPEN: ao_reopen(ao); ao_command_finished(ao); break; case AO_COMMAND_CLOSE: assert(ao->open); assert(ao->pipe != NULL); ao_close(ao, false); ao_command_finished(ao); break; case AO_COMMAND_PAUSE: if (!ao->open) { /* the output has failed after audio_output_all_pause() has submitted the PAUSE command; bail out */ ao_command_finished(ao); break; } ao_pause(ao); /* don't "break" here: this might cause ao_play() to be called when command==CLOSE ends the paused state - "continue" checks the new command first */ continue; case AO_COMMAND_DRAIN: if (ao->open) { assert(ao->chunk == NULL); assert(music_pipe_peek(ao->pipe) == NULL); g_mutex_unlock(ao->mutex); ao_plugin_drain(ao->plugin, ao->data); g_mutex_lock(ao->mutex); } ao_command_finished(ao); continue; case AO_COMMAND_CANCEL: ao->chunk = NULL; if (ao->open) { g_mutex_unlock(ao->mutex); ao_plugin_cancel(ao->plugin, ao->data); g_mutex_lock(ao->mutex); } ao_command_finished(ao); continue; case AO_COMMAND_KILL: ao->chunk = NULL; ao_command_finished(ao); g_mutex_unlock(ao->mutex); return NULL; } if (ao->open && ao->allow_play && ao_play(ao)) /* don't wait for an event if there are more chunks in the pipe */ continue; if (ao->command == AO_COMMAND_NONE) g_cond_wait(ao->cond, ao->mutex); } }
static void run_output_order_test (gint n_linked) { /* This test creates a multiqueue with 2 linked output, and 3 outputs that * return 'not-linked' when data is pushed, then verifies that all buffers * are received on not-linked pads only after earlier buffers on the * 'linked' pads are made */ GstElement *pipe; GstElement *mq; GstPad *inputpads[5]; GstPad *sinkpads[5]; struct PadData pad_data[5]; guint32 max_linked_id; guint32 eos_seen; GMutex *mutex; GCond *cond; gint i; const gint NPADS = 5; const gint NBUFFERS = 1000; mutex = g_mutex_new (); cond = g_cond_new (); pipe = gst_bin_new ("testbin"); mq = gst_element_factory_make ("multiqueue", NULL); fail_unless (mq != NULL); gst_bin_add (GST_BIN (pipe), mq); /* No limits */ g_object_set (mq, "max-size-bytes", (guint) 0, "max-size-buffers", (guint) 0, "max-size-time", (guint64) 0, "extra-size-bytes", (guint) 0, "extra-size-buffers", (guint) 0, "extra-size-time", (guint64) 0, NULL); /* Construct NPADS dummy output pads. The first 'n_linked' return FLOW_OK, the rest * return NOT_LINKED. The not-linked ones check the expected ordering of * output buffers */ for (i = 0; i < NPADS; i++) { GstPad *mq_srcpad, *mq_sinkpad; gchar *name; name = g_strdup_printf ("dummysrc%d", i); inputpads[i] = gst_pad_new (name, GST_PAD_SRC); g_free (name); gst_pad_set_getcaps_function (inputpads[i], mq_dummypad_getcaps); mq_sinkpad = gst_element_get_request_pad (mq, "sink%d"); fail_unless (mq_sinkpad != NULL); gst_pad_link (inputpads[i], mq_sinkpad); gst_pad_set_active (inputpads[i], TRUE); mq_srcpad = mq_sinkpad_to_srcpad (mq, mq_sinkpad); name = g_strdup_printf ("dummysink%d", i); sinkpads[i] = gst_pad_new (name, GST_PAD_SINK); g_free (name); gst_pad_set_chain_function (sinkpads[i], mq_dummypad_chain); gst_pad_set_event_function (sinkpads[i], mq_dummypad_event); gst_pad_set_getcaps_function (sinkpads[i], mq_dummypad_getcaps); pad_data[i].pad_num = i; pad_data[i].max_linked_id_ptr = &max_linked_id; pad_data[i].eos_count_ptr = &eos_seen; pad_data[i].is_linked = (i < n_linked ? TRUE : FALSE); pad_data[i].n_linked = n_linked; pad_data[i].cond = cond; pad_data[i].mutex = mutex; pad_data[i].first_buf = TRUE; gst_pad_set_element_private (sinkpads[i], pad_data + i); gst_pad_link (mq_srcpad, sinkpads[i]); gst_pad_set_active (sinkpads[i], TRUE); gst_object_unref (mq_sinkpad); gst_object_unref (mq_srcpad); } /* Run the test. Push 1000 buffers through the multiqueue in a pattern */ max_linked_id = 0; eos_seen = 0; gst_element_set_state (pipe, GST_STATE_PLAYING); for (i = 0; i < NBUFFERS; i++) { const guint8 pad_pattern[] = { 0, 0, 0, 0, 1, 1, 2, 1, 0, 2, 3, 2, 3, 1, 4 }; const guint n = sizeof (pad_pattern) / sizeof (guint8); guint8 cur_pad; GstBuffer *buf; GstFlowReturn ret; cur_pad = pad_pattern[i % n]; buf = gst_buffer_new_and_alloc (4); g_static_mutex_lock (&_check_lock); fail_if (buf == NULL); g_static_mutex_unlock (&_check_lock); GST_WRITE_UINT32_BE (GST_BUFFER_DATA (buf), i + 1); GST_BUFFER_TIMESTAMP (buf) = (i + 1) * GST_SECOND; ret = gst_pad_push (inputpads[cur_pad], buf); g_static_mutex_lock (&_check_lock); if (pad_data[cur_pad].is_linked) { fail_unless (ret == GST_FLOW_OK, "Push on pad %d returned %d when FLOW_OK was expected", cur_pad, ret); } else { /* Expect OK initially, then NOT_LINKED when the srcpad starts pushing */ fail_unless (ret == GST_FLOW_OK || ret == GST_FLOW_NOT_LINKED, "Push on pad %d returned %d when FLOW_OK or NOT_LINKED was expected", cur_pad, ret); } g_static_mutex_unlock (&_check_lock); } for (i = 0; i < NPADS; i++) { gst_pad_push_event (inputpads[i], gst_event_new_eos ()); } /* Wait while the buffers are processed */ g_mutex_lock (mutex); while (eos_seen < 5) { g_cond_wait (cond, mutex); } g_mutex_unlock (mutex); /* Clean up */ for (i = 0; i < 5; i++) { GstPad *mq_input = gst_pad_get_peer (inputpads[i]); gst_pad_unlink (inputpads[i], mq_input); gst_element_release_request_pad (mq, mq_input); gst_object_unref (mq_input); gst_object_unref (inputpads[i]); gst_object_unref (sinkpads[i]); } gst_element_set_state (pipe, GST_STATE_NULL); gst_object_unref (pipe); g_cond_free (cond); g_mutex_free (mutex); }
/** * gcr_importer_import: * @importer: the importer * @cancellable: a #GCancellable, or %NULL * @error: the location to place an error on failure, or %NULL * * Import the queued items in the importer. This call will block * until the operation completes. * * Returns: whether the items were imported successfully or not */ gboolean gcr_importer_import (GcrImporter *importer, GCancellable *cancellable, GError **error) { gboolean result; ImportClosure *closure; GcrImporterIface *iface; g_return_val_if_fail (GCR_IS_IMPORTER (importer), FALSE); g_return_val_if_fail (cancellable == NULL || G_IS_CANCELLABLE (cancellable), FALSE); g_return_val_if_fail (error == NULL || *error == NULL, FALSE); iface = GCR_IMPORTER_GET_INTERFACE (importer); if (iface->import_sync) return (iface->import_sync) (importer, cancellable, error); g_return_val_if_fail (iface->import_async != NULL, FALSE); g_return_val_if_fail (iface->import_finish != NULL, FALSE); closure = g_new0 (ImportClosure, 1); closure->cond = g_new (GCond, 1); g_cond_init (closure->cond); closure->mutex = g_new (GMutex, 1); g_mutex_init (closure->mutex); closure->context = g_main_context_get_thread_default (); g_mutex_lock (closure->mutex); (iface->import_async) (importer, cancellable, on_import_async_complete, closure); /* * Handle the case where we've been called from within the main context * or in the case where the main context is not running. This approximates * the behavior of a modal dialog. */ if (g_main_context_acquire (closure->context)) { while (!closure->complete) { g_mutex_unlock (closure->mutex); g_main_context_iteration (closure->context, TRUE); g_mutex_lock (closure->mutex); } g_main_context_release (closure->context); /* * Handle the case where we're in a different thread than the main * context and a main loop is running. */ } else { while (!closure->complete) g_cond_wait (closure->cond, closure->mutex); } g_mutex_unlock (closure->mutex); result = (closure->error == NULL); if (closure->error) g_propagate_error (error, closure->error); g_cond_clear (closure->cond); g_free (closure->cond); g_mutex_clear (closure->mutex); g_free (closure->mutex); g_free (closure); return result; }
/** * Allocate space for the blob (not necessarily in memory!) * * @param[in] self this * @param[in] req_size required space * * @warning Caller shall hold a write lock on the blob! **/ static void z_blob_alloc(ZBlob *self, gint64 req_size) { gchar *newdata; gint err; gint64 req_alloc_size, alloc_req; gboolean alloc_granted; z_enter(); g_assert(self); g_assert(req_size >= 0); /* determine the allocation size */ if ((self->alloc_size <= 0) || self->is_in_file) { req_alloc_size = req_size; } else { /* First run (if shrinking reqd): go just below the requested size */ req_alloc_size = self->alloc_size; while (req_alloc_size > req_size) { req_alloc_size >>= 1; } /* Second run: find next available size */ while (req_alloc_size < req_size) { req_alloc_size <<= 1; } } /* just return if the allocation needn't change */ if (req_alloc_size == self->alloc_size) z_return(); alloc_req = req_alloc_size - self->alloc_size; g_mutex_lock(self->system->mtx_blobsys); self->alloc_req = alloc_req; alloc_granted = z_blob_check_alloc(self); g_mutex_unlock(self->system->mtx_blobsys); if (!alloc_granted) { self->approved = FALSE; self->replied = FALSE; g_mutex_lock(self->mtx_reply); g_async_queue_push(self->system->req_queue, self); while (!self->replied) g_cond_wait(self->cond_reply, self->mtx_reply); g_mutex_unlock(self->mtx_reply); alloc_granted = self->approved; } g_assert(alloc_granted); if (self->is_in_file) { err = ftruncate(self->fd, req_alloc_size); if (err < 0) z_log(NULL, CORE_ERROR, 3, "Error truncating blob file, ftruncate() failed; file='%s', error='%s'", self->filename, g_strerror(errno)); } else { newdata = g_renew(gchar, self->data, req_alloc_size); if (self->alloc_size < req_alloc_size && newdata) memset(newdata + self->alloc_size, 0, req_alloc_size - self->alloc_size); self->data = newdata; } self->alloc_size = req_alloc_size; if (self->size > req_alloc_size) self->size = req_alloc_size; self->stat.alloc_count++; self->stat.last_accessed = time(NULL); z_return(); }
static int dgram_process (int sock, StunAgent *oldagent, StunAgent *newagent) { union { struct sockaddr_storage storage; struct sockaddr addr; } addr; socklen_t addr_len; uint8_t buf[STUN_MAX_MESSAGE_SIZE]; size_t buf_len = 0; size_t len = 0; StunMessage request; StunMessage response; StunValidationStatus validation; StunAgent *agent = NULL; gint ret; addr_len = sizeof (struct sockaddr_in); recv_packet: len = recvfrom (sock, buf, sizeof(buf), 0, &addr.addr, &addr_len); if (drop_stun_packets) { g_debug ("Dropping STUN packet as requested"); return -1; } if (len == (size_t)-1) { return -1; } validation = stun_agent_validate (newagent, &request, buf, len, NULL, 0); if (validation == STUN_VALIDATION_SUCCESS) { agent = newagent; } else { validation = stun_agent_validate (oldagent, &request, buf, len, NULL, 0); agent = oldagent; } /* Unknown attributes */ if (validation == STUN_VALIDATION_UNKNOWN_REQUEST_ATTRIBUTE) { buf_len = stun_agent_build_unknown_attributes_error (agent, &response, buf, sizeof (buf), &request); goto send_buf; } /* Mal-formatted packets */ if (validation != STUN_VALIDATION_SUCCESS || stun_message_get_class (&request) != STUN_REQUEST) { goto recv_packet; } switch (stun_message_get_method (&request)) { case STUN_BINDING: stun_agent_init_response (agent, &response, buf, sizeof (buf), &request); if (stun_message_has_cookie (&request)) stun_message_append_xor_addr (&response, STUN_ATTRIBUTE_XOR_MAPPED_ADDRESS, &addr.storage, addr_len); else stun_message_append_addr (&response, STUN_ATTRIBUTE_MAPPED_ADDRESS, &addr.addr, addr_len); break; case STUN_SHARED_SECRET: case STUN_ALLOCATE: case STUN_SET_ACTIVE_DST: case STUN_CONNECT: case STUN_OLD_SET_ACTIVE_DST: case STUN_IND_DATA: case STUN_IND_CONNECT_STATUS: case STUN_CHANNELBIND: default: if (!stun_agent_init_error (agent, &response, buf, sizeof (buf), &request, STUN_ERROR_BAD_REQUEST)) { g_debug ("STUN error message not initialized properly"); g_assert_not_reached(); } } buf_len = stun_agent_finish_message (agent, &response, NULL, 0); send_buf: g_cancellable_cancel (global_cancellable); g_debug ("Ready to send a STUN response"); g_assert (g_mutex_trylock (stun_mutex_ptr)); got_stun_packet = TRUE; while (send_stun) { g_debug ("Waiting for signal. State is %d", global_lagent_state); g_cond_wait (stun_signal_ptr, stun_mutex_ptr); } g_mutex_unlock (stun_mutex_ptr); len = sendto (sock, buf, buf_len, 0, &addr.addr, addr_len); g_debug ("STUN response sent"); drop_stun_packets = TRUE; ret = (len < buf_len) ? -1 : 0; return ret; }
static GstFlowReturn gst_decklink_audio_src_create (GstPushSrc * bsrc, GstBuffer ** buffer) { GstDecklinkAudioSrc *self = GST_DECKLINK_AUDIO_SRC_CAST (bsrc); GstFlowReturn flow_ret = GST_FLOW_OK; const guint8 *data; glong sample_count; gsize data_size; CapturePacket *p; AudioPacket *ap; GstClockTime timestamp, duration; GstClockTime start_time, end_time; guint64 start_offset, end_offset; gboolean discont = FALSE; g_mutex_lock (&self->lock); while (g_queue_is_empty (&self->current_packets) && !self->flushing) { g_cond_wait (&self->cond, &self->lock); } p = (CapturePacket *) g_queue_pop_head (&self->current_packets); g_mutex_unlock (&self->lock); if (self->flushing) { if (p) capture_packet_free (p); GST_DEBUG_OBJECT (self, "Flushing"); return GST_FLOW_FLUSHING; } p->packet->GetBytes ((gpointer *) & data); sample_count = p->packet->GetSampleFrameCount (); data_size = self->info.bpf * sample_count; ap = (AudioPacket *) g_malloc0 (sizeof (AudioPacket)); *buffer = gst_buffer_new_wrapped_full ((GstMemoryFlags) GST_MEMORY_FLAG_READONLY, (gpointer) data, data_size, 0, data_size, ap, (GDestroyNotify) audio_packet_free); ap->packet = p->packet; p->packet->AddRef (); ap->input = self->input->input; ap->input->AddRef (); timestamp = p->capture_time; // Jitter and discontinuity handling, based on audiobasesrc start_time = timestamp; // Convert to the sample numbers start_offset = gst_util_uint64_scale (start_time, self->info.rate, GST_SECOND); end_offset = start_offset + sample_count; end_time = gst_util_uint64_scale_int (end_offset, GST_SECOND, self->info.rate); duration = end_time - start_time; if (self->next_offset == (guint64) - 1) { discont = TRUE; } else { guint64 diff, max_sample_diff; // Check discont if (start_offset <= self->next_offset) diff = self->next_offset - start_offset; else diff = start_offset - self->next_offset; max_sample_diff = gst_util_uint64_scale_int (self->alignment_threshold, self->info.rate, GST_SECOND); // Discont! if (G_UNLIKELY (diff >= max_sample_diff)) { if (self->discont_wait > 0) { if (self->discont_time == GST_CLOCK_TIME_NONE) { self->discont_time = start_time; } else if (start_time - self->discont_time >= self->discont_wait) { discont = TRUE; self->discont_time = GST_CLOCK_TIME_NONE; } } else { discont = TRUE; } } else if (G_UNLIKELY (self->discont_time != GST_CLOCK_TIME_NONE)) { // we have had a discont, but are now back on track! self->discont_time = GST_CLOCK_TIME_NONE; } } if (discont) { // Have discont, need resync and use the capture timestamps if (self->next_offset != (guint64) - 1) GST_INFO_OBJECT (self, "Have discont. Expected %" G_GUINT64_FORMAT ", got %" G_GUINT64_FORMAT, self->next_offset, start_offset); GST_BUFFER_FLAG_SET (*buffer, GST_BUFFER_FLAG_DISCONT); self->next_offset = end_offset; } else { // No discont, just keep counting self->discont_time = GST_CLOCK_TIME_NONE; timestamp = gst_util_uint64_scale (self->next_offset, GST_SECOND, self->info.rate); self->next_offset += sample_count; duration = gst_util_uint64_scale (self->next_offset, GST_SECOND, self->info.rate) - timestamp; } GST_BUFFER_TIMESTAMP (*buffer) = timestamp; GST_BUFFER_DURATION (*buffer) = duration; GST_DEBUG_OBJECT (self, "Outputting buffer %p with timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT, *buffer, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (*buffer)), GST_TIME_ARGS (GST_BUFFER_DURATION (*buffer))); capture_packet_free (p); return flow_ret; }
/** * gst_bus_post: * @bus: a #GstBus to post on * @message: The #GstMessage to post * * Post a message on the given bus. Ownership of the message * is taken by the bus. * * Returns: TRUE if the message could be posted, FALSE if the bus is flushing. * * MT safe. */ gboolean gst_bus_post (GstBus * bus, GstMessage * message) { GstBusSyncReply reply = GST_BUS_PASS; GstBusSyncHandler handler; gboolean emit_sync_message; gpointer handler_data; g_return_val_if_fail (GST_IS_BUS (bus), FALSE); g_return_val_if_fail (GST_IS_MESSAGE (message), FALSE); GST_DEBUG_OBJECT (bus, "[msg %p] posting on bus, type %s, %" GST_PTR_FORMAT " from source %" GST_PTR_FORMAT, message, GST_MESSAGE_TYPE_NAME (message), message->structure, message->src); GST_OBJECT_LOCK (bus); /* check if the bus is flushing */ if (GST_OBJECT_FLAG_IS_SET (bus, GST_BUS_FLUSHING)) goto is_flushing; handler = bus->sync_handler; handler_data = bus->sync_handler_data; emit_sync_message = bus->priv->num_sync_message_emitters > 0; GST_OBJECT_UNLOCK (bus); /* first call the sync handler if it is installed */ if (handler) reply = handler (bus, message, handler_data); /* emit sync-message if requested to do so via gst_bus_enable_sync_message_emission. terrible but effective */ if (emit_sync_message && reply != GST_BUS_DROP && handler != gst_bus_sync_signal_handler) gst_bus_sync_signal_handler (bus, message, NULL); /* now see what we should do with the message */ switch (reply) { case GST_BUS_DROP: /* drop the message */ GST_DEBUG_OBJECT (bus, "[msg %p] dropped", message); break; case GST_BUS_PASS: /* pass the message to the async queue, refcount passed in the queue */ GST_DEBUG_OBJECT (bus, "[msg %p] pushing on async queue", message); g_mutex_lock (bus->queue_lock); g_queue_push_tail (bus->queue, message); g_cond_broadcast (bus->priv->queue_cond); g_mutex_unlock (bus->queue_lock); GST_DEBUG_OBJECT (bus, "[msg %p] pushed on async queue", message); gst_bus_wakeup_main_context (bus); break; case GST_BUS_ASYNC: { /* async delivery, we need a mutex and a cond to block * on */ GMutex *lock = g_mutex_new (); GCond *cond = g_cond_new (); GST_MESSAGE_COND (message) = cond; GST_MESSAGE_GET_LOCK (message) = lock; GST_DEBUG_OBJECT (bus, "[msg %p] waiting for async delivery", message); /* now we lock the message mutex, send the message to the async * queue. When the message is handled by the app and destroyed, * the cond will be signalled and we can continue */ g_mutex_lock (lock); g_mutex_lock (bus->queue_lock); g_queue_push_tail (bus->queue, message); g_cond_broadcast (bus->priv->queue_cond); g_mutex_unlock (bus->queue_lock); gst_bus_wakeup_main_context (bus); /* now block till the message is freed */ g_cond_wait (cond, lock); g_mutex_unlock (lock); GST_DEBUG_OBJECT (bus, "[msg %p] delivered asynchronously", message); g_mutex_free (lock); g_cond_free (cond); break; } default: g_warning ("invalid return from bus sync handler"); break; } return TRUE; /* ERRORS */ is_flushing: { GST_DEBUG_OBJECT (bus, "bus is flushing"); gst_message_unref (message); GST_OBJECT_UNLOCK (bus); return FALSE; } }
static GstFlowReturn gst_amc_audio_dec_drain (GstAmcAudioDec * self) { GstFlowReturn ret; gint idx; GError *err = NULL; GST_DEBUG_OBJECT (self, "Draining codec"); if (!self->started) { GST_DEBUG_OBJECT (self, "Codec not started yet"); return GST_FLOW_OK; } /* Don't send drain buffer twice, this doesn't work */ if (self->drained) { GST_DEBUG_OBJECT (self, "Codec is drained already"); return GST_FLOW_OK; } /* Make sure to release the base class stream lock, otherwise * _loop() can't call _finish_frame() and we might block forever * because no input buffers are released */ GST_AUDIO_DECODER_STREAM_UNLOCK (self); /* Send an EOS buffer to the component and let the base * class drop the EOS event. We will send it later when * the EOS buffer arrives on the output port. * Wait at most 0.5s here. */ idx = gst_amc_codec_dequeue_input_buffer (self->codec, 500000, &err); GST_AUDIO_DECODER_STREAM_LOCK (self); if (idx >= 0) { GstAmcBuffer *buf; GstAmcBufferInfo buffer_info; buf = gst_amc_codec_get_input_buffer (self->codec, idx, &err); if (buf) { GST_AUDIO_DECODER_STREAM_UNLOCK (self); g_mutex_lock (&self->drain_lock); self->draining = TRUE; memset (&buffer_info, 0, sizeof (buffer_info)); buffer_info.size = 0; buffer_info.presentation_time_us = gst_util_uint64_scale (self->last_upstream_ts, 1, GST_USECOND); buffer_info.flags |= BUFFER_FLAG_END_OF_STREAM; gst_amc_buffer_set_position_and_limit (buf, NULL, 0, 0); gst_amc_buffer_free (buf); buf = NULL; if (gst_amc_codec_queue_input_buffer (self->codec, idx, &buffer_info, &err)) { GST_DEBUG_OBJECT (self, "Waiting until codec is drained"); g_cond_wait (&self->drain_cond, &self->drain_lock); GST_DEBUG_OBJECT (self, "Drained codec"); ret = GST_FLOW_OK; } else { GST_ERROR_OBJECT (self, "Failed to queue input buffer"); if (self->flushing) { g_clear_error (&err); ret = GST_FLOW_FLUSHING; } else { GST_ELEMENT_WARNING_FROM_ERROR (self, err); ret = GST_FLOW_ERROR; } } self->drained = TRUE; self->draining = FALSE; g_mutex_unlock (&self->drain_lock); GST_AUDIO_DECODER_STREAM_LOCK (self); } else { GST_ERROR_OBJECT (self, "Failed to get buffer for EOS: %d", idx); if (err) GST_ELEMENT_WARNING_FROM_ERROR (self, err); ret = GST_FLOW_ERROR; } } else { GST_ERROR_OBJECT (self, "Failed to acquire buffer for EOS: %d", idx); if (err) GST_ELEMENT_WARNING_FROM_ERROR (self, err); ret = GST_FLOW_ERROR; } gst_adapter_flush (self->output_adapter, gst_adapter_available (self->output_adapter)); return ret; }
int main(void) { NiceAgent *lagent = NULL, *ragent = NULL; GThread *stun_thread = NULL; NiceAddress baseaddr; GSource *src; int sock; global_cancellable = g_cancellable_new (); src = g_cancellable_source_new (global_cancellable); g_source_set_dummy_callback (src); g_source_attach (src, NULL); sock = listen_socket (&stun_port); if (sock == -1) { g_assert_not_reached (); } stun_thread = g_thread_new ("listen for STUN requests", stun_thread_func, GINT_TO_POINTER (sock)); // Once the the thread is forked, we want to listen for a signal // that the socket was opened successfully g_mutex_lock (stun_thread_mutex_ptr); g_cond_wait (stun_thread_signal_ptr, stun_thread_mutex_ptr); lagent = nice_agent_new (NULL, NICE_COMPATIBILITY_RFC5245); ragent = nice_agent_new (NULL, NICE_COMPATIBILITY_RFC5245); g_object_set (G_OBJECT (lagent), "ice-tcp", FALSE, NULL); g_object_set (G_OBJECT (ragent), "ice-tcp", FALSE, NULL); g_object_set (G_OBJECT (lagent), "controlling-mode", TRUE, NULL); g_object_set (G_OBJECT (ragent), "controlling-mode", FALSE, NULL); g_object_set (G_OBJECT (lagent), "upnp", USE_UPNP, NULL); g_object_set (G_OBJECT (ragent), "upnp", USE_UPNP, NULL); g_object_set (G_OBJECT (lagent), "stun-server", "127.0.0.1", NULL); g_object_set (G_OBJECT (lagent), "stun-server-port", stun_port, NULL); g_object_set_data (G_OBJECT (lagent), "other-agent", ragent); g_object_set_data (G_OBJECT (ragent), "other-agent", lagent); g_assert (nice_address_set_from_string (&baseaddr, "127.0.0.1")); nice_agent_add_local_address (lagent, &baseaddr); nice_agent_add_local_address (ragent, &baseaddr); g_signal_connect(G_OBJECT(lagent), "candidate-gathering-done", G_CALLBACK(cb_candidate_gathering_done), LEFT_AGENT); g_signal_connect(G_OBJECT(ragent), "candidate-gathering-done", G_CALLBACK(cb_candidate_gathering_done), RIGHT_AGENT); g_signal_connect(G_OBJECT(lagent), "component-state-changed", G_CALLBACK(cb_component_state_changed), LEFT_AGENT); g_signal_connect(G_OBJECT(ragent), "component-state-changed", G_CALLBACK(cb_component_state_changed), RIGHT_AGENT); standard_test (lagent, ragent); bad_credentials_test (lagent, ragent); bad_candidate_test (lagent, ragent); new_candidate_test (lagent, ragent); // Do this to make sure the STUN thread exits exit_stun_thread = TRUE; drop_stun_packets = TRUE; send_dummy_data (); g_object_add_weak_pointer (G_OBJECT (lagent), (gpointer *) &lagent); g_object_add_weak_pointer (G_OBJECT (ragent), (gpointer *) &ragent); g_object_unref (lagent); g_object_unref (ragent); g_thread_join (stun_thread); g_object_unref (global_cancellable); g_source_destroy (src); g_source_unref (src); WAIT_UNTIL_UNSET (lagent, NULL); WAIT_UNTIL_UNSET (ragent, NULL); return 0; }
static gboolean gst_app_sink_event (GstBaseSink * sink, GstEvent * event) { GstAppSink *appsink = GST_APP_SINK_CAST (sink); GstAppSinkPrivate *priv = appsink->priv; switch (event->type) { case GST_EVENT_SEGMENT: g_mutex_lock (&priv->mutex); GST_DEBUG_OBJECT (appsink, "receiving SEGMENT"); g_queue_push_tail (priv->queue, gst_event_ref (event)); if (!priv->preroll) gst_event_copy_segment (event, &priv->preroll_segment); g_mutex_unlock (&priv->mutex); break; case GST_EVENT_EOS:{ gboolean emit = TRUE; g_mutex_lock (&priv->mutex); GST_DEBUG_OBJECT (appsink, "receiving EOS"); priv->is_eos = TRUE; g_cond_signal (&priv->cond); g_mutex_unlock (&priv->mutex); g_mutex_lock (&priv->mutex); /* wait until all buffers are consumed or we're flushing. * Otherwise we might signal EOS before all buffers are * consumed, which is a bit confusing for the application */ while (priv->num_buffers > 0 && !priv->flushing) g_cond_wait (&priv->cond, &priv->mutex); if (priv->flushing) emit = FALSE; g_mutex_unlock (&priv->mutex); if (emit) { /* emit EOS now */ if (priv->callbacks.eos) priv->callbacks.eos (appsink, priv->user_data); else g_signal_emit (appsink, gst_app_sink_signals[SIGNAL_EOS], 0); } break; } case GST_EVENT_FLUSH_START: /* we don't have to do anything here, the base class will call unlock * which will make sure we exit the _render method */ GST_DEBUG_OBJECT (appsink, "received FLUSH_START"); break; case GST_EVENT_FLUSH_STOP: g_mutex_lock (&priv->mutex); GST_DEBUG_OBJECT (appsink, "received FLUSH_STOP"); gst_app_sink_flush_unlocked (appsink); g_mutex_unlock (&priv->mutex); break; default: break; } return GST_BASE_SINK_CLASS (parent_class)->event (sink, event); }
void ThreadCondition::wait(Mutex& mutex) { g_cond_wait(m_condition.get(), mutex.impl().get()); }
static GstFlowReturn gst_app_sink_render (GstBaseSink * psink, GstBuffer * buffer) { GstFlowReturn ret; GstAppSink *appsink = GST_APP_SINK_CAST (psink); GstAppSinkPrivate *priv = appsink->priv; gboolean emit; restart: g_mutex_lock (&priv->mutex); if (priv->flushing) goto flushing; /* queue holding caps event might have been FLUSHed, * but caps state still present in pad caps */ if (G_UNLIKELY (!priv->last_caps && gst_pad_has_current_caps (GST_BASE_SINK_PAD (psink)))) { priv->last_caps = gst_pad_get_current_caps (GST_BASE_SINK_PAD (psink)); GST_DEBUG_OBJECT (appsink, "activating pad caps %" GST_PTR_FORMAT, priv->last_caps); } GST_DEBUG_OBJECT (appsink, "pushing render buffer %p on queue (%d)", buffer, priv->num_buffers); while (priv->max_buffers > 0 && priv->num_buffers >= priv->max_buffers) { if (priv->drop) { GstBuffer *old; /* we need to drop the oldest buffer and try again */ if ((old = dequeue_buffer (appsink))) { GST_DEBUG_OBJECT (appsink, "dropping old buffer %p", old); gst_buffer_unref (old); } } else { GST_DEBUG_OBJECT (appsink, "waiting for free space, length %d >= %d", priv->num_buffers, priv->max_buffers); if (priv->unlock) { /* we are asked to unlock, call the wait_preroll method */ g_mutex_unlock (&priv->mutex); if ((ret = gst_base_sink_wait_preroll (psink)) != GST_FLOW_OK) goto stopping; /* we are allowed to continue now */ goto restart; } /* wait for a buffer to be removed or flush */ g_cond_wait (&priv->cond, &priv->mutex); if (priv->flushing) goto flushing; } } /* we need to ref the buffer when pushing it in the queue */ g_queue_push_tail (priv->queue, gst_buffer_ref (buffer)); priv->num_buffers++; g_cond_signal (&priv->cond); emit = priv->emit_signals; g_mutex_unlock (&priv->mutex); if (priv->callbacks.new_sample) { ret = priv->callbacks.new_sample (appsink, priv->user_data); } else { ret = GST_FLOW_OK; if (emit) g_signal_emit (appsink, gst_app_sink_signals[SIGNAL_NEW_SAMPLE], 0, &ret); } return ret; flushing: { GST_DEBUG_OBJECT (appsink, "we are flushing"); g_mutex_unlock (&priv->mutex); return GST_FLOW_FLUSHING; } stopping: { GST_DEBUG_OBJECT (appsink, "we are stopping"); return ret; } }