static void gst_decklink_audio_src_got_packet (GstElement * element, IDeckLinkAudioInputPacket * packet, GstClockTime capture_time, gboolean discont) { GstDecklinkAudioSrc *self = GST_DECKLINK_AUDIO_SRC_CAST (element); GstDecklinkVideoSrc *videosrc = NULL; GST_LOG_OBJECT (self, "Got audio packet at %" GST_TIME_FORMAT, GST_TIME_ARGS (capture_time)); g_mutex_lock (&self->input->lock); if (self->input->videosrc) videosrc = GST_DECKLINK_VIDEO_SRC_CAST (gst_object_ref (self->input->videosrc)); g_mutex_unlock (&self->input->lock); if (videosrc) { gst_decklink_video_src_convert_to_external_clock (videosrc, &capture_time, NULL); gst_object_unref (videosrc); GST_LOG_OBJECT (self, "Actual timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (capture_time)); } g_mutex_lock (&self->lock); if (!self->flushing) { CapturePacket *p; while (g_queue_get_length (&self->current_packets) >= self->buffer_size) { p = (CapturePacket *) g_queue_pop_head (&self->current_packets); GST_WARNING_OBJECT (self, "Dropping old packet at %" GST_TIME_FORMAT, GST_TIME_ARGS (p->capture_time)); capture_packet_free (p); } p = (CapturePacket *) g_malloc0 (sizeof (CapturePacket)); p->packet = packet; p->capture_time = capture_time; p->discont = discont; packet->AddRef (); g_queue_push_tail (&self->current_packets, p); g_cond_signal (&self->cond); } g_mutex_unlock (&self->lock); }
static GstFlowReturn gst_decklink_audio_src_create (GstPushSrc * bsrc, GstBuffer ** buffer) { GstDecklinkAudioSrc *self = GST_DECKLINK_AUDIO_SRC_CAST (bsrc); GstFlowReturn flow_ret = GST_FLOW_OK; const guint8 *data; glong sample_count; gsize data_size; CapturePacket *p; AudioPacket *ap; GstClockTime timestamp, duration; GstClockTime start_time, end_time; guint64 start_offset, end_offset; gboolean discont = FALSE; g_mutex_lock (&self->lock); while (g_queue_is_empty (&self->current_packets) && !self->flushing) { g_cond_wait (&self->cond, &self->lock); } p = (CapturePacket *) g_queue_pop_head (&self->current_packets); g_mutex_unlock (&self->lock); if (self->flushing) { if (p) capture_packet_free (p); GST_DEBUG_OBJECT (self, "Flushing"); return GST_FLOW_FLUSHING; } p->packet->GetBytes ((gpointer *) & data); sample_count = p->packet->GetSampleFrameCount (); data_size = self->info.bpf * sample_count; ap = (AudioPacket *) g_malloc0 (sizeof (AudioPacket)); *buffer = gst_buffer_new_wrapped_full ((GstMemoryFlags) GST_MEMORY_FLAG_READONLY, (gpointer) data, data_size, 0, data_size, ap, (GDestroyNotify) audio_packet_free); ap->packet = p->packet; p->packet->AddRef (); ap->input = self->input->input; ap->input->AddRef (); timestamp = p->capture_time; // Jitter and discontinuity handling, based on audiobasesrc start_time = timestamp; // Convert to the sample numbers start_offset = gst_util_uint64_scale (start_time, self->info.rate, GST_SECOND); end_offset = start_offset + sample_count; end_time = gst_util_uint64_scale_int (end_offset, GST_SECOND, self->info.rate); duration = end_time - start_time; if (self->next_offset == (guint64) - 1) { discont = TRUE; } else { guint64 diff, max_sample_diff; // Check discont if (start_offset <= self->next_offset) diff = self->next_offset - start_offset; else diff = start_offset - self->next_offset; max_sample_diff = gst_util_uint64_scale_int (self->alignment_threshold, self->info.rate, GST_SECOND); // Discont! if (G_UNLIKELY (diff >= max_sample_diff)) { if (self->discont_wait > 0) { if (self->discont_time == GST_CLOCK_TIME_NONE) { self->discont_time = start_time; } else if (start_time - self->discont_time >= self->discont_wait) { discont = TRUE; self->discont_time = GST_CLOCK_TIME_NONE; } } else { discont = TRUE; } } else if (G_UNLIKELY (self->discont_time != GST_CLOCK_TIME_NONE)) { // we have had a discont, but are now back on track! self->discont_time = GST_CLOCK_TIME_NONE; } } if (discont) { // Have discont, need resync and use the capture timestamps if (self->next_offset != (guint64) - 1) GST_INFO_OBJECT (self, "Have discont. Expected %" G_GUINT64_FORMAT ", got %" G_GUINT64_FORMAT, self->next_offset, start_offset); GST_BUFFER_FLAG_SET (*buffer, GST_BUFFER_FLAG_DISCONT); self->next_offset = end_offset; } else { // No discont, just keep counting self->discont_time = GST_CLOCK_TIME_NONE; timestamp = gst_util_uint64_scale (self->next_offset, GST_SECOND, self->info.rate); self->next_offset += sample_count; duration = gst_util_uint64_scale (self->next_offset, GST_SECOND, self->info.rate) - timestamp; } GST_BUFFER_TIMESTAMP (*buffer) = timestamp; GST_BUFFER_DURATION (*buffer) = duration; GST_DEBUG_OBJECT (self, "Outputting buffer %p with timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT, *buffer, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (*buffer)), GST_TIME_ARGS (GST_BUFFER_DURATION (*buffer))); capture_packet_free (p); return flow_ret; }
static void gst_decklink_audio_src_got_packet (GstElement * element, IDeckLinkAudioInputPacket * packet, GstClockTime capture_time, GstClockTime packet_time, gboolean no_signal) { GstDecklinkAudioSrc *self = GST_DECKLINK_AUDIO_SRC_CAST (element); GstClockTime timestamp; GST_LOG_OBJECT (self, "Got audio packet at %" GST_TIME_FORMAT " / %" GST_TIME_FORMAT ", no signal %d", GST_TIME_ARGS (capture_time), GST_TIME_ARGS (packet_time), no_signal); g_mutex_lock (&self->input->lock); if (self->input->videosrc) { GstDecklinkVideoSrc *videosrc = GST_DECKLINK_VIDEO_SRC_CAST (gst_object_ref (self->input->videosrc)); if (videosrc->drop_no_signal_frames && no_signal) { g_mutex_unlock (&self->input->lock); return; } if (videosrc->first_time == GST_CLOCK_TIME_NONE) videosrc->first_time = packet_time; if (videosrc->skip_first_time > 0 && packet_time - videosrc->first_time < videosrc->skip_first_time) { GST_DEBUG_OBJECT (self, "Skipping frame as requested: %" GST_TIME_FORMAT " < %" GST_TIME_FORMAT, GST_TIME_ARGS (packet_time), GST_TIME_ARGS (videosrc->skip_first_time + videosrc->first_time)); g_mutex_unlock (&self->input->lock); return; } if (videosrc->output_stream_time) timestamp = packet_time; else timestamp = gst_clock_adjust_with_calibration (NULL, packet_time, videosrc->current_time_mapping.xbase, videosrc->current_time_mapping.b, videosrc->current_time_mapping.num, videosrc->current_time_mapping.den); } else { timestamp = capture_time; } g_mutex_unlock (&self->input->lock); GST_LOG_OBJECT (self, "Converted times to %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); g_mutex_lock (&self->lock); if (!self->flushing) { CapturePacket *p; while (g_queue_get_length (&self->current_packets) >= self->buffer_size) { p = (CapturePacket *) g_queue_pop_head (&self->current_packets); GST_WARNING_OBJECT (self, "Dropping old packet at %" GST_TIME_FORMAT, GST_TIME_ARGS (p->timestamp)); capture_packet_free (p); } p = (CapturePacket *) g_malloc0 (sizeof (CapturePacket)); p->packet = packet; p->timestamp = timestamp; p->no_signal = no_signal; packet->AddRef (); g_queue_push_tail (&self->current_packets, p); g_cond_signal (&self->cond); } g_mutex_unlock (&self->lock); }