/* This will be called "at the start of acquisition into each image buffer." * If acquisition blocks because we don't copy buffers fast enough, the number * of times this function is called will be less than the IMAQ cumulative * buffer count. */ uInt32 gst_niimaqsrc_frame_start_callback (SESSION_ID sid, IMG_ERR err, IMG_SIGNAL_TYPE signal_type, uInt32 signal_identifier, void *userdata) { GstNiImaqSrc *src = GST_NIIMAQSRC (userdata); GstClockTime abstime; static guint32 index = 0; g_mutex_lock (&src->mutex); /* time hasn't been read yet, this frame will be dropped */ if (src->times[index] != GST_CLOCK_TIME_NONE) { g_mutex_unlock (&src->mutex); return 1; } /* get clock time */ abstime = gst_clock_get_time (GST_ELEMENT_CLOCK (src)); src->times[index] = abstime; if (G_UNLIKELY (src->start_time == NULL)) src->start_time = gst_date_time_new_now_utc (); /* first frame, use as element base time */ if (src->base_time == GST_CLOCK_TIME_NONE) src->base_time = abstime; index = (index + 1) % src->bufsize; g_mutex_unlock (&src->mutex); /* return 1 to rearm the callback */ return 1; }
static void gst_cel_video_src_timestamp_buffer (GstCelVideoSrc * self, GstBuffer * buf) { GstClock *clock; GstClockTime ts; GST_OBJECT_LOCK (self); if ((clock = GST_ELEMENT_CLOCK (self)) != NULL) { ts = gst_clock_get_time (clock); if (ts > GST_ELEMENT (self)->base_time) ts -= GST_ELEMENT (self)->base_time; else ts = 0; if (ts > self->duration) ts -= self->duration; else ts = 0; } else { ts = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (self); GST_BUFFER_OFFSET (buf) = self->offset; GST_BUFFER_OFFSET_END (buf) = self->offset + 1; GST_BUFFER_TIMESTAMP (buf) = ts; GST_BUFFER_DURATION (buf) = self->duration; if (self->offset == 0) GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); self->offset++; }
static void gst_cel_video_src_update_statistics (GstCelVideoSrc * self) { GstClock *clock; GST_OBJECT_LOCK (self); clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) gst_object_ref (clock); GST_OBJECT_UNLOCK (self); if (clock != NULL) { GstClockTime now = gst_clock_get_time (clock); gst_object_unref (clock); self->count++; if (GST_CLOCK_TIME_IS_VALID (self->last_sampling)) { if (now - self->last_sampling >= GST_SECOND) { GST_OBJECT_LOCK (self); self->fps = self->count; GST_OBJECT_UNLOCK (self); g_object_notify (G_OBJECT (self), "fps"); self->last_sampling = now; self->count = 0; } } else { self->last_sampling = now; } } }
static void gst_ks_video_src_update_statistics (GstKsVideoSrc * self) { GstKsVideoSrcPrivate *priv = GST_KS_VIDEO_SRC_GET_PRIVATE (self); GstClock *clock; GST_OBJECT_LOCK (self); clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) gst_object_ref (clock); GST_OBJECT_UNLOCK (self); if (clock != NULL) { GstClockTime now = gst_clock_get_time (clock); gst_object_unref (clock); priv->count++; if (GST_CLOCK_TIME_IS_VALID (priv->last_sampling)) { if (now - priv->last_sampling >= GST_SECOND) { GST_OBJECT_LOCK (self); priv->fps = priv->count; GST_OBJECT_UNLOCK (self); g_object_notify (G_OBJECT (self), "fps"); priv->last_sampling = now; priv->count = 0; } } else { priv->last_sampling = now; } } }
static GstClockTime gst_mio_video_src_get_timestamp (GstMIOVideoSrc * self, CMSampleBufferRef sbuf) { GstClock *clock; GstClockTime base_time; GstClockTime timestamp; GST_OBJECT_LOCK (self); if ((clock = GST_ELEMENT_CLOCK (self)) != NULL) { gst_object_ref (clock); } base_time = GST_ELEMENT_CAST (self)->base_time; GST_OBJECT_UNLOCK (self); if (G_UNLIKELY (clock == NULL)) goto no_clock; timestamp = GST_CLOCK_TIME_NONE; /* * If the current clock is GstSystemClock, we know that it's using the * CoreAudio/CoreVideo clock. As such we may use the timestamp attached * to the CMSampleBuffer. */ if (G_TYPE_FROM_INSTANCE (clock) == GST_TYPE_SYSTEM_CLOCK) { CFNumberRef number; UInt64 ht; number = self->ctx->cm->CMGetAttachment (sbuf, *self->ctx->mio->kTundraSampleBufferAttachmentKey_HostTime, NULL); if (number != NULL && CFNumberGetValue (number, kCFNumberSInt64Type, &ht)) { timestamp = gst_util_uint64_scale_int (ht, self->cv_ratio_n, self->cv_ratio_d); } } if (!GST_CLOCK_TIME_IS_VALID (timestamp)) { timestamp = gst_clock_get_time (clock); } if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; gst_object_unref (clock); return timestamp; no_clock: return GST_CLOCK_TIME_NONE; }
/* Set buffer timing and offset data */ static void set_gstbuf_time_and_offset(GstAndroidVideoSource * p_src, GstBuffer * p_buf) { GstElement *p_element; GstClock *p_clock; GstClockTime now; GstClockTime base_time; GA_LOGTRACE("ENTER %s --xx--> thread(%ld)", __FUNCTION__, pthread_self()); p_element = GST_ELEMENT_CAST(p_src); GST_OBJECT_LOCK(p_element); p_clock = GST_ELEMENT_CLOCK(p_element); if (p_clock) { gst_object_ref(p_clock); base_time = p_element->base_time; GA_LOGTRACE("%s: base_time is: %llu", __FUNCTION__, base_time); } else { base_time = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK(p_element); if (p_clock) { /* Wrap around is not considered a problem due to the clock being 64 bit (famous last words? :-) ) */ now = gst_clock_get_time(p_clock) - base_time; GA_LOGTRACE("%s: gst_clock_get_time returns: %llu", __FUNCTION__, gst_clock_get_time(p_clock)); } else { now = GST_CLOCK_TIME_NONE; } if (p_clock) { gst_object_unref(p_clock); p_clock = NULL; } GST_BUFFER_PTS(p_buf) = now; GST_BUFFER_DTS(p_buf) = GST_CLOCK_TIME_NONE; GST_BUFFER_DURATION(p_buf) = GST_CLOCK_TIME_NONE; GST_BUFFER_OFFSET(p_buf) = GST_BUFFER_OFFSET_NONE; GST_BUFFER_OFFSET_END(p_buf) = GST_BUFFER_OFFSET_NONE; GA_LOGTRACE("%s: setting presentation timestamp (GstBuffer) to: %llu (%"GST_TIME_FORMAT")", __FUNCTION__, now, GST_TIME_ARGS(now)); GA_LOGTRACE("%s: m_prev_timestamp: %llu (%"GST_TIME_FORMAT")", __FUNCTION__, p_src->m_prev_timestamp, GST_TIME_ARGS(p_src->m_prev_timestamp)); GA_LOGTRACE("%s: timestamp diff: %llu (%"GST_TIME_FORMAT")", __FUNCTION__, now - p_src->m_prev_timestamp, GST_TIME_ARGS(now - p_src->m_prev_timestamp)); p_src->m_prev_timestamp = now; GA_LOGTRACE("EXIT %s", __FUNCTION__); return; }
static gboolean gst_ks_video_src_timestamp_buffer (GstKsVideoSrc * self, GstBuffer * buf, GstClockTime presentation_time) { GstKsVideoSrcPrivate *priv = GST_KS_VIDEO_SRC_GET_PRIVATE (self); GstClockTime duration; GstClock *clock; GstClockTime timestamp, base_time; /* Don't timestamp muxed streams */ if (gst_ks_video_device_stream_is_muxed (priv->device)) { duration = timestamp = GST_CLOCK_TIME_NONE; goto timestamp; } duration = gst_ks_video_device_get_duration (priv->device); GST_OBJECT_LOCK (self); clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) { gst_object_ref (clock); base_time = GST_ELEMENT (self)->base_time; } else { timestamp = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (self); if (clock != NULL) { /* The time according to the current clock */ timestamp = gst_clock_get_time (clock) - base_time; if (timestamp > duration) timestamp -= duration; else timestamp = 0; gst_object_unref (clock); clock = NULL; } timestamp: GST_BUFFER_PTS (buf) = timestamp; GST_BUFFER_DTS (buf) = GST_CLOCK_TIME_NONE; GST_BUFFER_DURATION (buf) = duration; return TRUE; }
static GstFlowReturn gst_rpi_cam_src_create (GstPushSrc * parent, GstBuffer ** buf) { GstRpiCamSrc *src = GST_RPICAMSRC (parent); GstFlowReturn ret; GstClock *clock = NULL; GstClockTime base_time; if (!src->started) { g_mutex_lock (&src->config_lock); raspi_capture_update_config (src->capture_state, &src->capture_config, FALSE); src->capture_config.change_flags = 0; g_mutex_unlock (&src->config_lock); if (!raspi_capture_start (src->capture_state)) return GST_FLOW_ERROR; src->started = TRUE; } GST_OBJECT_LOCK (src); if ((clock = GST_ELEMENT_CLOCK (src)) != NULL) gst_object_ref (clock); base_time = GST_ELEMENT_CAST (src)->base_time; GST_OBJECT_UNLOCK (src); g_mutex_lock (&src->config_lock); if (src->capture_config.change_flags) { raspi_capture_update_config (src->capture_state, &src->capture_config, TRUE); src->capture_config.change_flags = 0; } g_mutex_unlock (&src->config_lock); /* FIXME: Use custom allocator */ ret = raspi_capture_fill_buffer (src->capture_state, buf, clock, base_time); if (*buf) GST_LOG_OBJECT (src, "Made buffer of size %" G_GSIZE_FORMAT, gst_buffer_get_size (*buf)); if (clock) gst_object_unref (clock); return ret; }
static GstFlowReturn gst_rfb_src_fill (GstPushSrc * psrc, GstBuffer * outbuf) { GstRfbSrc *src = GST_RFB_SRC (psrc); RfbDecoder *decoder = src->decoder; GstMapInfo info; rfb_decoder_send_update_request (decoder, src->incremental_update, decoder->offset_x, decoder->offset_y, decoder->rect_width, decoder->rect_height); while (decoder->state != NULL) { if (!rfb_decoder_iterate (decoder)) { if (decoder->error != NULL) { GST_ELEMENT_ERROR (src, RESOURCE, READ, ("Error on VNC connection to host %s on port %d: %s", src->host, src->port, decoder->error->message), (NULL)); } else { GST_ELEMENT_ERROR (src, RESOURCE, READ, ("Error on setup VNC connection to host %s on port %d", src->host, src->port), (NULL)); } return GST_FLOW_ERROR; } } if (!gst_buffer_map (outbuf, &info, GST_MAP_WRITE)) { GST_ELEMENT_ERROR (src, RESOURCE, WRITE, ("Could not map the output frame"), (NULL)); return GST_FLOW_ERROR; } memcpy (info.data, decoder->frame, info.size); GST_BUFFER_PTS (outbuf) = gst_clock_get_time (GST_ELEMENT_CLOCK (src)) - GST_ELEMENT_CAST (src)->base_time; gst_buffer_unmap (outbuf, &info); return GST_FLOW_OK; }
static void kms_send_data_new_code (KmsSendData * self, guint64 ts, gchar * type, gchar * symbol) { GstClockTime running_time, base_time, now; GstClock *clock; GstBuffer *buffer; gchar *buffer_data; GstFlowReturn ret; if ((clock = GST_ELEMENT_CLOCK (self->priv->appsrc)) == NULL) { GST_ERROR_OBJECT (GST_ELEMENT (self), "no clock, we can't sync"); return; } buffer_data = g_strdup_printf ("Code detected in time %lu, type %s, symbol %s", ts, type, symbol); buffer = gst_buffer_new_wrapped (buffer_data, strlen (buffer_data)); base_time = GST_ELEMENT_CAST (self->priv->appsrc)->base_time; now = gst_clock_get_time (clock); running_time = now - base_time; /* Live sources always timestamp their buffers with the running_time of the */ /* pipeline. This is needed to be able to match the timestamps of different */ /* live sources in order to synchronize them. */ GST_BUFFER_PTS (buffer) = running_time; g_signal_emit_by_name (self->priv->appsrc, "push-buffer", buffer, &ret); if (ret != GST_FLOW_OK) { /* something wrong */ GST_WARNING ("Could not send buffer"); } gst_buffer_unref (buffer); }
static void kms_dummy_src_feed_data_channel (GstElement * appsrc, guint unused_size, gpointer data) { KmsDummySrc *self = KMS_DUMMY_SRC (data); GstClockTime running_time, base_time, now; GstClock *clock; GstBuffer *buffer; gchar *buffer_data; GstFlowReturn ret; if ((clock = GST_ELEMENT_CLOCK (appsrc)) == NULL) { GST_ERROR_OBJECT (GST_ELEMENT (data), "no clock, we can't sync"); return; } buffer_data = g_strdup_printf ("Test buffer %d", g_atomic_int_add (&self->priv->data_index, 1)); buffer = gst_buffer_new_wrapped (buffer_data, strlen (buffer_data)); base_time = GST_ELEMENT_CAST (appsrc)->base_time; now = gst_clock_get_time (clock); running_time = now - base_time; /* Live sources always timestamp their buffers with the running_time of the */ /* pipeline. This is needed to be able to match the timestamps of different */ /* live sources in order to synchronize them. */ GST_BUFFER_PTS (buffer) = running_time; g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret); if (ret != GST_FLOW_OK) { /* something wrong */ GST_WARNING ("Could not send buffer"); } gst_buffer_unref (buffer); }
static void gst_decklink_video_src_start_streams (GstElement * element) { GstDecklinkVideoSrc *self = GST_DECKLINK_VIDEO_SRC_CAST (element); HRESULT res; if (self->input->video_enabled && (!self->input->audiosrc || self->input->audio_enabled) && (GST_STATE (self) == GST_STATE_PLAYING || GST_STATE_PENDING (self) == GST_STATE_PLAYING)) { GST_DEBUG_OBJECT (self, "Starting streams"); res = self->input->input->StartStreams (); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to start streams: 0x%08x", res)); return; } self->input->started = TRUE; self->input->clock_restart = TRUE; // Need to unlock to get the clock time g_mutex_unlock (&self->input->lock); // Current times of internal and external clock when we go to // playing. We need this to convert the pipeline running time // to the running time of the hardware // // We can't use the normal base time for the external clock // because we might go to PLAYING later than the pipeline self->internal_base_time = gst_clock_get_internal_time (self->input->clock); self->external_base_time = gst_clock_get_internal_time (GST_ELEMENT_CLOCK (self)); g_mutex_lock (&self->input->lock); } else { GST_DEBUG_OBJECT (self, "Not starting streams yet"); } }
static GstFlowReturn gst_v4l2src_create (GstPushSrc * src, GstBuffer ** buf) { GstV4l2Src *v4l2src = GST_V4L2SRC (src); GstV4l2Object *obj = v4l2src->v4l2object; GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL_CAST (obj->pool); GstFlowReturn ret; GstClock *clock; GstClockTime abs_time, base_time, timestamp, duration; GstClockTime delay; do { ret = GST_BASE_SRC_CLASS (parent_class)->alloc (GST_BASE_SRC (src), 0, obj->info.size, buf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto alloc_failed; ret = gst_v4l2_buffer_pool_process (pool, buf); } while (ret == GST_V4L2_FLOW_CORRUPTED_BUFFER); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto error; timestamp = GST_BUFFER_TIMESTAMP (*buf); duration = obj->duration; /* timestamps, LOCK to get clock and base time. */ /* FIXME: element clock and base_time is rarely changing */ GST_OBJECT_LOCK (v4l2src); if ((clock = GST_ELEMENT_CLOCK (v4l2src))) { /* we have a clock, get base time and ref clock */ base_time = GST_ELEMENT (v4l2src)->base_time; gst_object_ref (clock); } else { /* no clock, can't set timestamps */ base_time = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (v4l2src); /* sample pipeline clock */ if (clock) { abs_time = gst_clock_get_time (clock); gst_object_unref (clock); } else { abs_time = GST_CLOCK_TIME_NONE; } if (timestamp != GST_CLOCK_TIME_NONE) { struct timespec now; GstClockTime gstnow; /* v4l2 specs say to use the system time although many drivers switched to * the more desirable monotonic time. We first try to use the monotonic time * and see how that goes */ clock_gettime (CLOCK_MONOTONIC, &now); gstnow = GST_TIMESPEC_TO_TIME (now); if (gstnow < timestamp && (timestamp - gstnow) > (10 * GST_SECOND)) { GTimeVal now; /* very large diff, fall back to system time */ g_get_current_time (&now); gstnow = GST_TIMEVAL_TO_TIME (now); } if (gstnow > timestamp) { delay = gstnow - timestamp; } else { delay = 0; } GST_DEBUG_OBJECT (v4l2src, "ts: %" GST_TIME_FORMAT " now %" GST_TIME_FORMAT " delay %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (gstnow), GST_TIME_ARGS (delay)); } else { /* we assume 1 frame latency otherwise */ if (GST_CLOCK_TIME_IS_VALID (duration)) delay = duration; else delay = 0; } /* set buffer metadata */ GST_BUFFER_OFFSET (*buf) = v4l2src->offset++; GST_BUFFER_OFFSET_END (*buf) = v4l2src->offset; if (G_LIKELY (abs_time != GST_CLOCK_TIME_NONE)) { /* the time now is the time of the clock minus the base time */ timestamp = abs_time - base_time; /* adjust for delay in the device */ if (timestamp > delay) timestamp -= delay; else timestamp = 0; } else { timestamp = GST_CLOCK_TIME_NONE; } /* activate settings for next frame */ if (GST_CLOCK_TIME_IS_VALID (duration)) { v4l2src->ctrl_time += duration; } else { /* this is not very good (as it should be the next timestamp), * still good enough for linear fades (as long as it is not -1) */ v4l2src->ctrl_time = timestamp; } gst_object_sync_values (GST_OBJECT (src), v4l2src->ctrl_time); GST_INFO_OBJECT (src, "sync to %" GST_TIME_FORMAT " out ts %" GST_TIME_FORMAT, GST_TIME_ARGS (v4l2src->ctrl_time), GST_TIME_ARGS (timestamp)); GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = duration; return ret; /* ERROR */ alloc_failed: { if (ret != GST_FLOW_FLUSHING) GST_ELEMENT_ERROR (src, RESOURCE, NO_SPACE_LEFT, ("Failed to allocate a buffer"), (NULL)); return ret; } error: { if (ret == GST_V4L2_FLOW_LAST_BUFFER) { GST_ELEMENT_ERROR (src, RESOURCE, FAILED, ("Driver returned a buffer with no payload, this most likely " "indicate a bug in the driver."), (NULL)); ret = GST_FLOW_ERROR; } else { GST_DEBUG_OBJECT (src, "error processing buffer %d (%s)", ret, gst_flow_get_name (ret)); } return ret; } }
static void gst_vader_set_property(GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstVader *filter; g_return_if_fail(GST_IS_VADER(object)); filter = GST_VADER(object); switch (prop_id) { case PROP_THRESHOLD: filter->threshold_level = (gint)(g_value_get_double(value) * 32768.0); break; case PROP_AUTO_THRESHOLD: /* We are going to muck around with things... */ g_static_rec_mutex_lock(&filter->mtx); filter->auto_threshold = g_value_get_boolean(value); /* Setting this to TRUE re-initializes auto calibration. */ if (filter->auto_threshold) { /* We have to be in silence mode to calibrate. */ filter->silent_prev = filter->silent; filter->silent = TRUE; /* Do "artifical" sil-speech or speech-sil transitions. */ if (filter->silent != filter->silent_prev) { gst_vader_transition(filter, gst_clock_get_time(GST_ELEMENT_CLOCK(filter))); } /* Reset counters and such. */ filter->threshold_level = -1; memset(filter->window, 0, sizeof(*filter->window) * VADER_WINDOW); filter->silence_mean = 0; filter->silence_stddev = 0; filter->silence_frames = 0; } g_static_rec_mutex_unlock(&filter->mtx); break; case PROP_SILENT: /* We are going to muck around with things... */ g_static_rec_mutex_lock(&filter->mtx); filter->silent_prev = filter->silent; filter->silent = g_value_get_boolean(value); /* Do "artifical" sil-speech or speech-sil transitions. */ if (filter->silent != filter->silent_prev) { gst_vader_transition(filter, gst_clock_get_time(GST_ELEMENT_CLOCK(filter))); /* Also flush the voting window so we don't go right back into speech. */ memset(filter->window, 0, sizeof(*filter->window) * VADER_WINDOW); } g_static_rec_mutex_unlock(&filter->mtx); break; case PROP_RUN_LENGTH: filter->threshold_length = g_value_get_uint64(value); break; case PROP_PRE_LENGTH: filter->pre_length = g_value_get_uint64(value); break; case PROP_DUMPDIR: g_free(filter->dumpdir); filter->dumpdir = g_strdup(g_value_get_string(value)); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; } }
static void gst_live_adder_loop (gpointer data) { GstLiveAdder *adder = GST_LIVE_ADDER (data); GstClockTime buffer_timestamp = 0; GstClockTime sync_time = 0; GstClock *clock = NULL; GstClockID id = NULL; GstClockReturn ret; GstBuffer *buffer = NULL; GstFlowReturn result; GstEvent *newseg_event = NULL; GST_OBJECT_LOCK (adder); again: for (;;) { if (adder->srcresult != GST_FLOW_OK) goto flushing; if (!g_queue_is_empty (adder->buffers)) break; if (check_eos_locked (adder)) goto eos; g_cond_wait (adder->not_empty_cond, GST_OBJECT_GET_LOCK (adder)); } buffer_timestamp = GST_BUFFER_TIMESTAMP (g_queue_peek_head (adder->buffers)); clock = GST_ELEMENT_CLOCK (adder); /* If we have no clock, then we can't do anything.. error */ if (!clock) { if (adder->playing) goto no_clock; else goto push_buffer; } GST_DEBUG_OBJECT (adder, "sync to timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (buffer_timestamp)); sync_time = buffer_timestamp + GST_ELEMENT_CAST (adder)->base_time; /* add latency, this includes our own latency and the peer latency. */ sync_time += adder->latency_ms * GST_MSECOND; sync_time += adder->peer_latency; /* create an entry for the clock */ id = adder->clock_id = gst_clock_new_single_shot_id (clock, sync_time); GST_OBJECT_UNLOCK (adder); ret = gst_clock_id_wait (id, NULL); GST_OBJECT_LOCK (adder); /* and free the entry */ gst_clock_id_unref (id); adder->clock_id = NULL; /* at this point, the clock could have been unlocked by a timeout, a new * head element was added to the queue or because we are shutting down. Check * for shutdown first. */ if (adder->srcresult != GST_FLOW_OK) goto flushing; if (ret == GST_CLOCK_UNSCHEDULED) { GST_DEBUG_OBJECT (adder, "Wait got unscheduled, will retry to push with new buffer"); goto again; } if (ret != GST_CLOCK_OK && ret != GST_CLOCK_EARLY) goto clock_error; push_buffer: buffer = g_queue_pop_head (adder->buffers); if (!buffer) goto again; /* * We make sure the timestamps are exactly contiguous * If its only small skew (due to rounding errors), we correct it * silently. Otherwise we put the discont flag */ if (GST_CLOCK_TIME_IS_VALID (adder->next_timestamp) && GST_BUFFER_TIMESTAMP (buffer) != adder->next_timestamp) { GstClockTimeDiff diff = GST_CLOCK_DIFF (GST_BUFFER_TIMESTAMP (buffer), adder->next_timestamp); if (diff < 0) diff = -diff; if (diff < GST_SECOND / adder->rate) { GST_BUFFER_TIMESTAMP (buffer) = adder->next_timestamp; GST_DEBUG_OBJECT (adder, "Correcting slight skew"); GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DISCONT); } else { GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT); GST_DEBUG_OBJECT (adder, "Expected buffer at %" GST_TIME_FORMAT ", but is at %" GST_TIME_FORMAT ", setting discont", GST_TIME_ARGS (adder->next_timestamp), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer))); } } else { GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DISCONT); } GST_BUFFER_OFFSET (buffer) = GST_BUFFER_OFFSET_NONE; GST_BUFFER_OFFSET_END (buffer) = GST_BUFFER_OFFSET_NONE; if (GST_BUFFER_DURATION_IS_VALID (buffer)) adder->next_timestamp = GST_BUFFER_TIMESTAMP (buffer) + GST_BUFFER_DURATION (buffer); else adder->next_timestamp = GST_CLOCK_TIME_NONE; if (adder->segment_pending) { /* * We set the start at 0, because we re-timestamps to the running time */ newseg_event = gst_event_new_new_segment_full (FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, -1, 0); adder->segment_pending = FALSE; } GST_OBJECT_UNLOCK (adder); if (newseg_event) gst_pad_push_event (adder->srcpad, newseg_event); GST_LOG_OBJECT (adder, "About to push buffer time:%" GST_TIME_FORMAT " duration:%" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), GST_TIME_ARGS (GST_BUFFER_DURATION (buffer))); result = gst_pad_push (adder->srcpad, buffer); if (result != GST_FLOW_OK) goto pause; return; flushing: { GST_DEBUG_OBJECT (adder, "we are flushing"); gst_pad_pause_task (adder->srcpad); GST_OBJECT_UNLOCK (adder); return; } clock_error: { gst_pad_pause_task (adder->srcpad); GST_OBJECT_UNLOCK (adder); GST_ELEMENT_ERROR (adder, STREAM, MUX, ("Error with the clock"), ("Error with the clock: %d", ret)); GST_ERROR_OBJECT (adder, "Error with the clock: %d", ret); return; } no_clock: { gst_pad_pause_task (adder->srcpad); GST_OBJECT_UNLOCK (adder); GST_ELEMENT_ERROR (adder, STREAM, MUX, ("No available clock"), ("No available clock")); GST_ERROR_OBJECT (adder, "No available clock"); return; } pause: { GST_DEBUG_OBJECT (adder, "pausing task, reason %s", gst_flow_get_name (result)); GST_OBJECT_LOCK (adder); /* store result */ adder->srcresult = result; /* we don't post errors or anything because upstream will do that for us * when we pass the return value upstream. */ gst_pad_pause_task (adder->srcpad); GST_OBJECT_UNLOCK (adder); return; } eos: { /* store result, we are flushing now */ GST_DEBUG_OBJECT (adder, "We are EOS, pushing EOS downstream"); adder->srcresult = GST_FLOW_UNEXPECTED; gst_pad_pause_task (adder->srcpad); GST_OBJECT_UNLOCK (adder); gst_pad_push_event (adder->srcpad, gst_event_new_eos ()); return; } }
/* messages are treated as warnings bcz those code should not be checked in. * and no error handling will supported for same manner. */ gboolean __util_gst_pad_probe(GstPad *pad, GstBuffer *buffer, gpointer u_data) { gint flag = (gint) u_data; GstElement* parent = NULL; gboolean ret = TRUE; /* show name as default */ parent = (GstElement*)gst_object_get_parent(GST_OBJECT(pad)); debug_warning("PAD PROBE : %s:%s\n", GST_ELEMENT_NAME(parent), GST_PAD_NAME(pad)); /* show time stamp */ if ( flag & MM_PROBE_TIMESTAMP ) { debug_warning("ts : %u:%02u:%02u.%09u\n", GST_TIME_ARGS(GST_BUFFER_TIMESTAMP(buffer))); } /* show buffer size */ if ( flag & MM_PROBE_BUFFERSIZE ) { debug_warning("buffer size : %ud\n", GST_BUFFER_SIZE(buffer)); } /* show buffer duration */ if ( flag & MM_PROBE_BUFFER_DURATION ) { debug_warning("dur : %lld\n", GST_BUFFER_DURATION(buffer)); } /* show buffer caps */ if ( flag & MM_PROBE_CAPS ) { debug_warning("caps : %s\n", gst_caps_to_string(GST_BUFFER_CAPS(buffer))); } /* drop buffer if flag is on */ if ( flag & MM_PROBE_DROP_BUFFER ) { debug_warning("dropping\n"); ret = FALSE; } /* show clock time */ if ( flag & MM_PROBE_CLOCK_TIME ) { GstClock* clock = NULL; GstClockTime now = GST_CLOCK_TIME_NONE; clock = GST_ELEMENT_CLOCK ( parent ); if ( clock ) { now = gst_clock_get_time( clock ); debug_warning("clock time : %" GST_TIME_FORMAT "\n", GST_TIME_ARGS( now )); } } if ( parent ) gst_object_unref(parent); return ret; }
static GstFlowReturn gst_v4l2src_create (GstPushSrc * src, GstBuffer ** buf) { GstV4l2Src *v4l2src = GST_V4L2SRC (src); GstV4l2Object *obj = v4l2src->v4l2object; GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL_CAST (obj->pool); GstFlowReturn ret; GstClock *clock; GstClockTime abs_time, base_time, timestamp, duration; GstClockTime delay; GstMessage *qos_msg; do { ret = GST_BASE_SRC_CLASS (parent_class)->alloc (GST_BASE_SRC (src), 0, obj->info.size, buf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto alloc_failed; ret = gst_v4l2_buffer_pool_process (pool, buf); } while (ret == GST_V4L2_FLOW_CORRUPTED_BUFFER); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto error; timestamp = GST_BUFFER_TIMESTAMP (*buf); duration = obj->duration; /* timestamps, LOCK to get clock and base time. */ /* FIXME: element clock and base_time is rarely changing */ GST_OBJECT_LOCK (v4l2src); if ((clock = GST_ELEMENT_CLOCK (v4l2src))) { /* we have a clock, get base time and ref clock */ base_time = GST_ELEMENT (v4l2src)->base_time; gst_object_ref (clock); } else { /* no clock, can't set timestamps */ base_time = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (v4l2src); /* sample pipeline clock */ if (clock) { abs_time = gst_clock_get_time (clock); gst_object_unref (clock); } else { abs_time = GST_CLOCK_TIME_NONE; } retry: if (!v4l2src->has_bad_timestamp && timestamp != GST_CLOCK_TIME_NONE) { struct timespec now; GstClockTime gstnow; /* v4l2 specs say to use the system time although many drivers switched to * the more desirable monotonic time. We first try to use the monotonic time * and see how that goes */ clock_gettime (CLOCK_MONOTONIC, &now); gstnow = GST_TIMESPEC_TO_TIME (now); if (timestamp > gstnow || (gstnow - timestamp) > (10 * GST_SECOND)) { GTimeVal now; /* very large diff, fall back to system time */ g_get_current_time (&now); gstnow = GST_TIMEVAL_TO_TIME (now); } /* Detect buggy drivers here, and stop using their timestamp. Failing any * of these condition would imply a very buggy driver: * - Timestamp in the future * - Timestamp is going backward compare to last seen timestamp * - Timestamp is jumping forward for less then a frame duration * - Delay is bigger then the actual timestamp * */ if (timestamp > gstnow) { GST_WARNING_OBJECT (v4l2src, "Timestamp in the future detected, ignoring driver timestamps"); v4l2src->has_bad_timestamp = TRUE; goto retry; } if (v4l2src->last_timestamp > timestamp) { GST_WARNING_OBJECT (v4l2src, "Timestamp going backward, ignoring driver timestamps"); v4l2src->has_bad_timestamp = TRUE; goto retry; } delay = gstnow - timestamp; if (delay > timestamp) { GST_WARNING_OBJECT (v4l2src, "Timestamp does not correlate with any clock, ignoring driver timestamps"); v4l2src->has_bad_timestamp = TRUE; goto retry; } /* Save last timestamp for sanity checks */ v4l2src->last_timestamp = timestamp; GST_DEBUG_OBJECT (v4l2src, "ts: %" GST_TIME_FORMAT " now %" GST_TIME_FORMAT " delay %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (gstnow), GST_TIME_ARGS (delay)); } else { /* we assume 1 frame latency otherwise */ if (GST_CLOCK_TIME_IS_VALID (duration)) delay = duration; else delay = 0; } /* set buffer metadata */ if (G_LIKELY (abs_time != GST_CLOCK_TIME_NONE)) { /* the time now is the time of the clock minus the base time */ timestamp = abs_time - base_time; /* adjust for delay in the device */ if (timestamp > delay) timestamp -= delay; else timestamp = 0; } else { timestamp = GST_CLOCK_TIME_NONE; } /* activate settings for next frame */ if (GST_CLOCK_TIME_IS_VALID (duration)) { v4l2src->ctrl_time += duration; } else { /* this is not very good (as it should be the next timestamp), * still good enough for linear fades (as long as it is not -1) */ v4l2src->ctrl_time = timestamp; } gst_object_sync_values (GST_OBJECT (src), v4l2src->ctrl_time); GST_INFO_OBJECT (src, "sync to %" GST_TIME_FORMAT " out ts %" GST_TIME_FORMAT, GST_TIME_ARGS (v4l2src->ctrl_time), GST_TIME_ARGS (timestamp)); /* use generated offset values only if there are not already valid ones * set by the v4l2 device */ if (!GST_BUFFER_OFFSET_IS_VALID (*buf) || !GST_BUFFER_OFFSET_END_IS_VALID (*buf)) { GST_BUFFER_OFFSET (*buf) = v4l2src->offset++; GST_BUFFER_OFFSET_END (*buf) = v4l2src->offset; } else { /* adjust raw v4l2 device sequence, will restart at null in case of renegotiation * (streamoff/streamon) */ GST_BUFFER_OFFSET (*buf) += v4l2src->renegotiation_adjust; GST_BUFFER_OFFSET_END (*buf) += v4l2src->renegotiation_adjust; /* check for frame loss with given (from v4l2 device) buffer offset */ if ((v4l2src->offset != 0) && (GST_BUFFER_OFFSET (*buf) != (v4l2src->offset + 1))) { guint64 lost_frame_count = GST_BUFFER_OFFSET (*buf) - v4l2src->offset - 1; GST_WARNING_OBJECT (v4l2src, "lost frames detected: count = %" G_GUINT64_FORMAT " - ts: %" GST_TIME_FORMAT, lost_frame_count, GST_TIME_ARGS (timestamp)); qos_msg = gst_message_new_qos (GST_OBJECT_CAST (v4l2src), TRUE, GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, timestamp, GST_CLOCK_TIME_IS_VALID (duration) ? lost_frame_count * duration : GST_CLOCK_TIME_NONE); gst_element_post_message (GST_ELEMENT_CAST (v4l2src), qos_msg); } v4l2src->offset = GST_BUFFER_OFFSET (*buf); } GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = duration; return ret; /* ERROR */ alloc_failed: { if (ret != GST_FLOW_FLUSHING) GST_ELEMENT_ERROR (src, RESOURCE, NO_SPACE_LEFT, ("Failed to allocate a buffer"), (NULL)); return ret; } error: { if (ret == GST_V4L2_FLOW_LAST_BUFFER) { GST_ELEMENT_ERROR (src, RESOURCE, FAILED, ("Driver returned a buffer with no payload, this most likely " "indicate a bug in the driver."), (NULL)); ret = GST_FLOW_ERROR; } else { GST_DEBUG_OBJECT (src, "error processing buffer %d (%s)", ret, gst_flow_get_name (ret)); } return ret; } }
static GstStateChangeReturn gst_mim_enc_change_state (GstElement * element, GstStateChange transition) { GstMimEnc *mimenc = GST_MIM_ENC (element); GstStateChangeReturn ret; gboolean paused_mode; switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY: case GST_STATE_CHANGE_READY_TO_PAUSED: GST_OBJECT_LOCK (mimenc); gst_segment_init (&mimenc->segment, GST_FORMAT_UNDEFINED); mimenc->last_buffer = GST_CLOCK_TIME_NONE; GST_OBJECT_UNLOCK (mimenc); break; case GST_STATE_CHANGE_PLAYING_TO_PAUSED: GST_OBJECT_LOCK (mimenc); if (mimenc->clock_id) gst_clock_id_unschedule (mimenc->clock_id); mimenc->stop_paused_mode = TRUE; GST_OBJECT_UNLOCK (mimenc); gst_pad_pause_task (mimenc->srcpad); break; default: break; } ret = GST_ELEMENT_CLASS (gst_mim_enc_parent_class)->change_state (element, transition); if (ret == GST_STATE_CHANGE_FAILURE) return ret; switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_PLAYING: GST_OBJECT_LOCK (mimenc); mimenc->stop_paused_mode = FALSE; paused_mode = mimenc->paused_mode; if (paused_mode) { if (!GST_ELEMENT_CLOCK (mimenc)) { GST_OBJECT_UNLOCK (mimenc); GST_ELEMENT_ERROR (mimenc, RESOURCE, FAILED, ("Using paused-mode requires a clock, but no clock was provided" " to the element"), (NULL)); return GST_STATE_CHANGE_FAILURE; } if (mimenc->last_buffer == GST_CLOCK_TIME_NONE) mimenc->last_buffer = gst_clock_get_time (GST_ELEMENT_CLOCK (mimenc)) - GST_ELEMENT_CAST (mimenc)->base_time; } GST_OBJECT_UNLOCK (mimenc); if (paused_mode) { if (!gst_pad_start_task (mimenc->srcpad, paused_mode_task, mimenc, NULL)) { ret = GST_STATE_CHANGE_FAILURE; GST_ERROR_OBJECT (mimenc, "Can not start task"); } } break; case GST_STATE_CHANGE_READY_TO_NULL: gst_mim_enc_reset (mimenc); break; default: break; } return ret; }
static gboolean gst_ks_video_src_timestamp_buffer (GstKsVideoSrc * self, GstBuffer * buf, GstClockTime presentation_time) { GstKsVideoSrcPrivate *priv = GST_KS_VIDEO_SRC_GET_PRIVATE (self); GstClockTime duration; GstClock *clock; GstClockTime timestamp; duration = gst_ks_video_device_get_duration (priv->device); GST_OBJECT_LOCK (self); clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) { gst_object_ref (clock); timestamp = GST_ELEMENT (self)->base_time; if (GST_CLOCK_TIME_IS_VALID (presentation_time)) { if (presentation_time > GST_ELEMENT (self)->base_time) presentation_time -= GST_ELEMENT (self)->base_time; else presentation_time = 0; } } else { timestamp = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (self); if (clock != NULL) { /* The time according to the current clock */ timestamp = gst_clock_get_time (clock) - timestamp; if (timestamp > duration) timestamp -= duration; else timestamp = 0; if (GST_CLOCK_TIME_IS_VALID (presentation_time)) { /* * We don't use this for anything yet, need to ponder how to deal * with pins that use an internal clock and timestamp from 0. */ GstClockTimeDiff diff = GST_CLOCK_DIFF (presentation_time, timestamp); GST_DEBUG_OBJECT (self, "diff between gst and driver timestamp: %" G_GINT64_FORMAT, diff); } gst_object_unref (clock); clock = NULL; /* Unless it's the first frame, align the current timestamp on a multiple * of duration since the previous */ if (GST_CLOCK_TIME_IS_VALID (priv->prev_ts)) { GstClockTime delta; guint delta_remainder, delta_offset; /* REVISIT: I've seen this happen with the GstSystemClock on Windows, * scary... */ if (timestamp < priv->prev_ts) { GST_INFO_OBJECT (self, "clock is ticking backwards"); return FALSE; } /* Round to a duration boundary */ delta = timestamp - priv->prev_ts; delta_remainder = delta % duration; if (delta_remainder < duration / 3) timestamp -= delta_remainder; else timestamp += duration - delta_remainder; /* How many frames are we off then? */ delta = timestamp - priv->prev_ts; delta_offset = delta / duration; if (delta_offset == 1) /* perfect */ GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT); else if (delta_offset > 1) { guint lost = delta_offset - 1; GST_INFO_OBJECT (self, "lost %d frame%s, setting discont flag", lost, (lost > 1) ? "s" : ""); GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); } else if (delta_offset == 0) { /* overproduction, skip this frame */ GST_INFO_OBJECT (self, "skipping frame"); return FALSE; } priv->offset += delta_offset; } priv->prev_ts = timestamp; } GST_BUFFER_OFFSET (buf) = priv->offset; GST_BUFFER_OFFSET_END (buf) = GST_BUFFER_OFFSET (buf) + 1; GST_BUFFER_TIMESTAMP (buf) = timestamp; GST_BUFFER_DURATION (buf) = duration; return TRUE; }
static GstStateChangeReturn gst_decklink_video_sink_change_state (GstElement * element, GstStateChange transition) { GstDecklinkVideoSink *self = GST_DECKLINK_VIDEO_SINK_CAST (element); GstStateChangeReturn ret; switch (transition) { case GST_STATE_CHANGE_READY_TO_PAUSED: g_mutex_lock (&self->output->lock); self->output->clock_start_time = GST_CLOCK_TIME_NONE; self->output->clock_last_time = 0; self->output->clock_offset = 0; g_mutex_unlock (&self->output->lock); gst_element_post_message (element, gst_message_new_clock_provide (GST_OBJECT_CAST (element), self->output->clock, TRUE)); self->last_render_time = GST_CLOCK_TIME_NONE; break; case GST_STATE_CHANGE_PAUSED_TO_PLAYING:{ GstClock *clock, *audio_clock; clock = gst_element_get_clock (GST_ELEMENT_CAST (self)); audio_clock = gst_decklink_output_get_audio_clock (self->output); if (clock && clock != self->output->clock && clock != audio_clock) { gst_clock_set_master (self->output->clock, clock); } if (clock) gst_object_unref (clock); if (audio_clock) gst_object_unref (audio_clock); break; } default: break; } ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); if (ret == GST_STATE_CHANGE_FAILURE) return ret; switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_READY: gst_element_post_message (element, gst_message_new_clock_lost (GST_OBJECT_CAST (element), self->output->clock)); gst_clock_set_master (self->output->clock, NULL); g_mutex_lock (&self->output->lock); self->output->clock_start_time = GST_CLOCK_TIME_NONE; self->output->clock_last_time = 0; self->output->clock_offset = 0; g_mutex_unlock (&self->output->lock); break; case GST_STATE_CHANGE_PLAYING_TO_PAUSED:{ GstClockTime start_time; HRESULT res; // FIXME: start time is the same for the complete pipeline, // but what we need here is the start time of this element! start_time = gst_element_get_base_time (element); if (start_time != GST_CLOCK_TIME_NONE) start_time = gst_clock_get_time (GST_ELEMENT_CLOCK (self)) - start_time; // FIXME: This will probably not work if (start_time == GST_CLOCK_TIME_NONE) start_time = 0; convert_to_internal_clock (self, &start_time, NULL); // The start time is now the running time when we stopped // playback GST_DEBUG_OBJECT (self, "Stopping scheduled playback at %" GST_TIME_FORMAT, GST_TIME_ARGS (start_time)); g_mutex_lock (&self->output->lock); self->output->started = FALSE; g_mutex_unlock (&self->output->lock); res = self->output->output->StopScheduledPlayback (start_time, 0, GST_SECOND); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to stop scheduled playback: 0x%08x", res)); ret = GST_STATE_CHANGE_FAILURE; } self->internal_base_time = GST_CLOCK_TIME_NONE; self->external_base_time = GST_CLOCK_TIME_NONE; break; } case GST_STATE_CHANGE_PAUSED_TO_PLAYING:{ g_mutex_lock (&self->output->lock); if (self->output->start_scheduled_playback) self->output->start_scheduled_playback (self->output->videosink); g_mutex_unlock (&self->output->lock); break; } default: break; } return ret; }
static void paused_mode_task (gpointer data) { GstMimEnc *mimenc = GST_MIM_ENC (data); GstClockTime now; GstClockTimeDiff diff; GstFlowReturn ret; GST_OBJECT_LOCK (mimenc); if (!GST_ELEMENT_CLOCK (mimenc)) { GST_OBJECT_UNLOCK (mimenc); GST_ERROR_OBJECT (mimenc, "Element has no clock"); gst_pad_pause_task (mimenc->srcpad); return; } if (mimenc->stop_paused_mode) { GST_OBJECT_UNLOCK (mimenc); goto stop_task; } now = gst_clock_get_time (GST_ELEMENT_CLOCK (mimenc)); diff = now - GST_ELEMENT_CAST (mimenc)->base_time - mimenc->last_buffer; if (diff < 0) diff = 0; if (diff > 3.95 * GST_SECOND) { GstBuffer *buffer; GstMapInfo out_map; buffer = gst_buffer_new_and_alloc (TCP_HEADER_SIZE); gst_buffer_map (buffer, &out_map, GST_MAP_WRITE); GST_BUFFER_TIMESTAMP (buffer) = mimenc->last_buffer + PAUSED_MODE_INTERVAL; gst_mim_enc_create_tcp_header (mimenc, out_map.data, 0, GST_BUFFER_TIMESTAMP (buffer), FALSE, TRUE); gst_buffer_unmap (buffer, &out_map); mimenc->last_buffer += PAUSED_MODE_INTERVAL; GST_OBJECT_UNLOCK (mimenc); GST_LOG_OBJECT (mimenc, "Haven't had an incoming buffer in 4 seconds," " sending out a pause frame"); ret = gst_pad_push (mimenc->srcpad, buffer); if (ret < 0) { GST_WARNING_OBJECT (mimenc, "Error pushing paused header: %s", gst_flow_get_name (ret)); goto stop_task; } } else { GstClockTime next_stop; GstClockID id; next_stop = now + (PAUSED_MODE_INTERVAL - MIN (diff, PAUSED_MODE_INTERVAL)); id = gst_clock_new_single_shot_id (GST_ELEMENT_CLOCK (mimenc), next_stop); if (mimenc->stop_paused_mode) { GST_OBJECT_UNLOCK (mimenc); goto stop_task; } mimenc->clock_id = id; GST_OBJECT_UNLOCK (mimenc); gst_clock_id_wait (id, NULL); GST_OBJECT_LOCK (mimenc); mimenc->clock_id = NULL; GST_OBJECT_UNLOCK (mimenc); gst_clock_id_unref (id); } return; stop_task: gst_pad_pause_task (mimenc->srcpad); }
static void paused_mode_task (gpointer data) { GstMimEnc *mimenc = GST_MIMENC (data); GstClockTime now; GstClockTimeDiff diff; GstFlowReturn ret; if (!GST_ELEMENT_CLOCK (mimenc)) { GST_ERROR_OBJECT (mimenc, "Element has no clock"); gst_pad_pause_task (mimenc->srcpad); return; } GST_OBJECT_LOCK (mimenc); if (mimenc->stop_paused_mode) { GST_OBJECT_UNLOCK (mimenc); goto stop_task; } now = gst_clock_get_time (GST_ELEMENT_CLOCK (mimenc)); diff = now - GST_ELEMENT_CAST (mimenc)->base_time - mimenc->last_buffer; if (diff < 0) diff = 0; if (diff > 3.95 * GST_SECOND) { GstBuffer *buffer = gst_mimenc_create_tcp_header (mimenc, 0, mimenc->last_buffer + 4 * GST_SECOND, FALSE, TRUE); GstEvent *event = NULL; mimenc->last_buffer += 4 * GST_SECOND; if (mimenc->need_newsegment) { event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, -1, 0); mimenc->need_newsegment = FALSE; } GST_OBJECT_UNLOCK (mimenc); GST_LOG_OBJECT (mimenc, "Haven't had an incoming buffer in 4 seconds," " sending out a pause frame"); if (event) { if (!gst_pad_push_event (mimenc->srcpad, event)) GST_WARNING_OBJECT (mimenc, "Failed to push NEWSEGMENT event"); } ret = gst_pad_push (mimenc->srcpad, buffer); if (ret < 0) { GST_WARNING_OBJECT (mimenc, "Error pushing paused header: %s", gst_flow_get_name (ret)); goto stop_task; } } else { GstClockTime next_stop; GstClockID id; next_stop = now + (4 * GST_SECOND - MIN (diff, 4 * GST_SECOND)); id = gst_clock_new_single_shot_id (GST_ELEMENT_CLOCK (mimenc), next_stop); if (mimenc->stop_paused_mode) { GST_OBJECT_UNLOCK (mimenc); goto stop_task; } mimenc->clock_id = id; GST_OBJECT_UNLOCK (mimenc); gst_clock_id_wait (id, NULL); GST_OBJECT_LOCK (mimenc); mimenc->clock_id = NULL; GST_OBJECT_UNLOCK (mimenc); gst_clock_id_unref (id); } return; stop_task: gst_pad_pause_task (mimenc->srcpad); }
static void gst_frame_store_task (GstPad *pad) { GstFrameStore *fs; GstBuffer *buffer; GstEvent *event = NULL; fs = GST_FRAME_STORE (gst_pad_get_parent (pad)); GST_DEBUG("task"); g_mutex_lock (fs->lock); while(1) { if (fs->stepping == FALSE || (fs->frame_number != fs->pushed_frame_number)) { buffer = gst_frame_store_get_frame (fs, fs->frame_number); } if (buffer) break; g_cond_wait (fs->cond, fs->lock); } if (fs->need_newsegment) { GstClock *clock; GstClockTime now; GstClockTime stream_time; clock = GST_ELEMENT_CLOCK (fs); if (clock == NULL) { now = 0; stream_time = 0; } else { now = gst_clock_get_time (GST_ELEMENT_CLOCK (fs)); stream_time = now - GST_ELEMENT(fs)->base_time; } GST_ERROR("now %lld buffer %lld stream_time %lld", now, GST_BUFFER_TIMESTAMP(buffer), stream_time); stream_time = GST_SECOND*10; event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP(buffer), -1, stream_time); fs->need_newsegment = FALSE; } if (fs->stepping) { buffer = gst_buffer_make_metadata_writable (buffer); GST_BUFFER_TIMESTAMP(buffer) = -1; GST_BUFFER_DURATION(buffer) = -1; } fs->pushed_frame_number = fs->frame_number; if (!fs->stepping) { fs->frame_number++; } if (fs->frame_number + 1 >= fs->range_offset + fs->range_size) { gst_frame_store_advance (fs); } g_mutex_unlock (fs->lock); if (event) { gst_pad_push_event (fs->srcpad, event); } gst_pad_push (fs->srcpad, buffer); GST_DEBUG("task done"); gst_object_unref (fs); }
static GstFlowReturn gst_base_audio_src_create (GstBaseSrc * bsrc, guint64 offset, guint length, GstBuffer ** outbuf) { GstBaseAudioSrc *src = GST_BASE_AUDIO_SRC (bsrc); GstBuffer *buf; guchar *data; guint samples, total_samples; guint64 sample; gint bps; GstRingBuffer *ringbuffer; GstRingBufferSpec *spec; guint read; GstClockTime timestamp, duration; GstClock *clock; ringbuffer = src->ringbuffer; spec = &ringbuffer->spec; if (G_UNLIKELY (!gst_ring_buffer_is_acquired (ringbuffer))) goto wrong_state; bps = spec->bytes_per_sample; if ((length == 0 && bsrc->blocksize == 0) || length == -1) /* no length given, use the default segment size */ length = spec->segsize; else /* make sure we round down to an integral number of samples */ length -= length % bps; /* figure out the offset in the ringbuffer */ if (G_UNLIKELY (offset != -1)) { sample = offset / bps; /* if a specific offset was given it must be the next sequential * offset we expect or we fail for now. */ if (src->next_sample != -1 && sample != src->next_sample) goto wrong_offset; } else { /* calculate the sequentially next sample we need to read. This can jump and * create a DISCONT. */ sample = gst_base_audio_src_get_offset (src); } GST_DEBUG_OBJECT (src, "reading from sample %" G_GUINT64_FORMAT, sample); /* get the number of samples to read */ total_samples = samples = length / bps; /* FIXME, using a bufferpool would be nice here */ buf = gst_buffer_new_and_alloc (length); data = GST_BUFFER_DATA (buf); do { read = gst_ring_buffer_read (ringbuffer, sample, data, samples); GST_DEBUG_OBJECT (src, "read %u of %u", read, samples); /* if we read all, we're done */ if (read == samples) break; /* else something interrupted us and we wait for playing again. */ GST_DEBUG_OBJECT (src, "wait playing"); if (gst_base_src_wait_playing (bsrc) != GST_FLOW_OK) goto stopped; GST_DEBUG_OBJECT (src, "continue playing"); /* read next samples */ sample += read; samples -= read; data += read * bps; } while (TRUE); /* mark discontinuity if needed */ if (G_UNLIKELY (sample != src->next_sample) && src->next_sample != -1) { GST_WARNING_OBJECT (src, "create DISCONT of %" G_GUINT64_FORMAT " samples at sample %" G_GUINT64_FORMAT, sample - src->next_sample, sample); GST_ELEMENT_WARNING (src, CORE, CLOCK, (_("Can't record audio fast enough")), ("dropped %" G_GUINT64_FORMAT " samples", sample - src->next_sample)); GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); } src->next_sample = sample + samples; /* get the normal timestamp to get the duration. */ timestamp = gst_util_uint64_scale_int (sample, GST_SECOND, spec->rate); duration = gst_util_uint64_scale_int (src->next_sample, GST_SECOND, spec->rate) - timestamp; GST_OBJECT_LOCK (src); if (!(clock = GST_ELEMENT_CLOCK (src))) goto no_sync; if (clock != src->clock) { /* we are slaved, check how to handle this */ switch (src->priv->slave_method) { case GST_BASE_AUDIO_SRC_SLAVE_RESAMPLE: /* not implemented, use skew algorithm. This algorithm should * work on the readout pointer and produces more or less samples based * on the clock drift */ case GST_BASE_AUDIO_SRC_SLAVE_SKEW: { GstClockTime running_time; GstClockTime base_time; GstClockTime current_time; guint64 running_time_sample; gint running_time_segment; gint current_segment; gint segment_skew; gint sps; /* samples per segment */ sps = ringbuffer->samples_per_seg; /* get the current time */ current_time = gst_clock_get_time (clock); /* get the basetime */ base_time = GST_ELEMENT_CAST (src)->base_time; /* get the running_time */ running_time = current_time - base_time; /* the running_time converted to a sample (relative to the ringbuffer) */ running_time_sample = gst_util_uint64_scale_int (running_time, spec->rate, GST_SECOND); /* the segmentnr corrensponding to running_time, round down */ running_time_segment = running_time_sample / sps; /* the segment currently read from the ringbuffer */ current_segment = sample / sps; /* the skew we have between running_time and the ringbuffertime */ segment_skew = running_time_segment - current_segment; GST_DEBUG_OBJECT (bsrc, "\n running_time = %" GST_TIME_FORMAT "\n timestamp = %" GST_TIME_FORMAT "\n running_time_segment = %d" "\n current_segment = %d" "\n segment_skew = %d", GST_TIME_ARGS (running_time), GST_TIME_ARGS (timestamp), running_time_segment, current_segment, segment_skew); /* Resync the ringbuffer if: * 1. We get one segment into the future. * This is clearly a lie, because we can't * possibly have a buffer with timestamp 1 at * time 0. (unless it has time-travelled...) * * 2. We are more than the length of the ringbuffer behind. * The length of the ringbuffer then gets to dictate * the threshold for what is concidered "too late" * * 3. If this is our first buffer. * We know that we should catch up to running_time * the first time we are ran. */ if ((segment_skew < 0) || (segment_skew >= ringbuffer->spec.segtotal) || (current_segment == 0)) { gint segments_written; gint first_segment; gint last_segment; gint new_last_segment; gint segment_diff; gint new_first_segment; guint64 new_sample; /* we are going to say that the last segment was captured at the current time (running_time), minus one segment of creation-latency in the ringbuffer. This can be thought of as: The segment arrived in the ringbuffer at time X, and that means it was created at time X - (one segment). */ new_last_segment = running_time_segment - 1; /* for better readablity */ first_segment = current_segment; /* get the amount of segments written from the device by now */ segments_written = g_atomic_int_get (&ringbuffer->segdone); /* subtract the base to segments_written to get the number of the last written segment in the ringbuffer (one segment written = segment 0) */ last_segment = segments_written - ringbuffer->segbase - 1; /* we see how many segments the ringbuffer was timeshifted */ segment_diff = new_last_segment - last_segment; /* we move the first segment an equal amount */ new_first_segment = first_segment + segment_diff; /* and we also move the segmentbase the same amount */ ringbuffer->segbase -= segment_diff; /* we calculate the new sample value */ new_sample = ((guint64) new_first_segment) * sps; /* and get the relative time to this -> our new timestamp */ timestamp = gst_util_uint64_scale_int (new_sample, GST_SECOND, spec->rate); /* we update the next sample accordingly */ src->next_sample = new_sample + samples; GST_DEBUG_OBJECT (bsrc, "Timeshifted the ringbuffer with %d segments: " "Updating the timestamp to %" GST_TIME_FORMAT ", " "and src->next_sample to %" G_GUINT64_FORMAT, segment_diff, GST_TIME_ARGS (timestamp), src->next_sample); } break; } case GST_BASE_AUDIO_SRC_SLAVE_RETIMESTAMP: { GstClockTime base_time, latency; /* We are slaved to another clock, take running time of the pipeline clock and * timestamp against it. Somebody else in the pipeline should figure out the * clock drift. We keep the duration we calculated above. */ timestamp = gst_clock_get_time (clock); base_time = GST_ELEMENT_CAST (src)->base_time; if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; /* subtract latency */ latency = gst_util_uint64_scale_int (total_samples, GST_SECOND, spec->rate); if (timestamp > latency) timestamp -= latency; else timestamp = 0; } case GST_BASE_AUDIO_SRC_SLAVE_NONE: break; } } else { GstClockTime base_time; /* we are not slaved, subtract base_time */ base_time = GST_ELEMENT_CAST (src)->base_time; if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; } no_sync: GST_OBJECT_UNLOCK (src); GST_BUFFER_TIMESTAMP (buf) = timestamp; GST_BUFFER_DURATION (buf) = duration; GST_BUFFER_OFFSET (buf) = sample; GST_BUFFER_OFFSET_END (buf) = sample + samples; *outbuf = buf; return GST_FLOW_OK; /* ERRORS */ wrong_state: { GST_DEBUG_OBJECT (src, "ringbuffer in wrong state"); return GST_FLOW_WRONG_STATE; } wrong_offset: { GST_ELEMENT_ERROR (src, RESOURCE, SEEK, (NULL), ("resource can only be operated on sequentially but offset %" G_GUINT64_FORMAT " was given", offset)); return GST_FLOW_ERROR; } stopped: { gst_buffer_unref (buf); GST_DEBUG_OBJECT (src, "ringbuffer stopped"); return GST_FLOW_WRONG_STATE; } }
static GstFlowReturn create(GstPushSrc *base, GstBuffer **buf) { GstTICaptureSrc *src = (GstTICaptureSrc *)base; Buffer_Handle hDstBuf; GstBuffer *outBuf; gint ret = GST_FLOW_OK; BufferGfx_Attrs gfxAttrs = BufferGfx_Attrs_DEFAULT; Int32 width, height; GST_LOG("create begin"); /* create capture device */ if (src->hCapture == NULL) { /* set framerate based on video standard */ switch(dmai_video_std(src->video_standard)) { case VideoStd_D1_NTSC: gst_value_set_fraction(&src->framerate,30000,1001); break; case VideoStd_D1_PAL: gst_value_set_fraction(&src->framerate,25,1); break; default: gst_value_set_fraction(&src->framerate,30,1); break; } /* set width & height based on video standard */ src->cAttrs.videoStd = dmai_video_std(src->video_standard); VideoStd_getResolution(src->cAttrs.videoStd, &width, &height); width = 720; height = 576; GST_WARNING("force video size to %dx%d", src->width, src->height); src->width = width; src->height = height; gfxAttrs.dim.height = src->height; gfxAttrs.dim.width = src->width; src->cAttrs.captureDimension = &gfxAttrs.dim; if (!capture_create(src)) return GST_FLOW_ERROR; } /* Get buffer from driver */ if (Capture_get(src->hCapture, &hDstBuf)) { GST_ELEMENT_ERROR(src, RESOURCE, FAILED, ("Failed to allocate buffer\n"), (NULL)); return GST_FLOW_ERROR; } /* Create a DMAI transport buffer object to carry a DMAI buffer to * the source pad. The transport buffer knows how to release the * buffer for re-use in this element when the source pad calls * gst_buffer_unref(). */ outBuf = gst_tidmaibuffertransport_new(hDstBuf, src->hBufTab, capture_buffer_finalize, (void*)src); gst_buffer_set_data(outBuf, GST_BUFFER_DATA(outBuf), Buffer_getSize(hDstBuf)); *buf = outBuf; /* set buffer metadata */ if (G_LIKELY (ret == GST_FLOW_OK && *buf)) { GstClock *clock; GstClockTime timestamp; GST_BUFFER_OFFSET (*buf) = src->offset++; GST_BUFFER_OFFSET_END (*buf) = src->offset; /* timestamps, LOCK to get clock and base time. */ GST_OBJECT_LOCK (src); if ((clock = GST_ELEMENT_CLOCK (src))) { /* we have a clock, get base time and ref clock */ timestamp = GST_ELEMENT (src)->base_time; gst_object_ref (clock); } else { /* no clock, can't set timestamps */ timestamp = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (src); if (G_LIKELY (clock)) { /* the time now is the time of the clock minus the base time */ timestamp = gst_clock_get_time (clock) - timestamp; gst_object_unref (clock); /* if we have a framerate adjust timestamp for frame latency */ if (GST_CLOCK_TIME_IS_VALID (src->duration)) { if (timestamp > src->duration) timestamp -= src->duration; else timestamp = 0; } } /* FIXME: use the timestamp from the buffer itself! */ GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = src->duration; } /* Create caps for buffer */ GstCaps *mycaps; GstStructure *structure; mycaps = gst_caps_new_empty(); if (src->cAttrs.colorSpace == ColorSpace_UYVY) { structure = gst_structure_new( "video/x-raw-yuv", "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC('U', 'Y', 'V', 'Y'), "framerate", GST_TYPE_FRACTION, gst_value_get_fraction_numerator(&src->framerate), gst_value_get_fraction_denominator(&src->framerate), "width", G_TYPE_INT, src->width, "height", G_TYPE_INT, src->height, (gchar*) NULL); } else if(src->cAttrs.colorSpace == ColorSpace_YUV420PSEMI) { structure = gst_structure_new( "video/x-raw-yuv", "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC('N', 'V', '1', '2'), "framerate", GST_TYPE_FRACTION, gst_value_get_fraction_numerator(&src->framerate), gst_value_get_fraction_denominator(&src->framerate), "width", G_TYPE_INT, src->width, "height", G_TYPE_INT, src->height, (gchar*) NULL); } else { GST_ERROR("unsupported fourcc\n"); return FALSE; } gst_caps_append_structure(mycaps, gst_structure_copy (structure)); gst_structure_free(structure); gst_buffer_set_caps(*buf, mycaps); gst_caps_unref(mycaps); { static int fn; fn++; GST_INFO("capture frame %d", fn); } GST_LOG("create end"); return GST_FLOW_OK; }
GstFlowReturn gst_euresys_fill (GstPushSrc * src, GstBuffer * buf) { GstEuresys *euresys = GST_EURESYS (src); MCSTATUS status = 0; MCSIGNALINFO siginfo; MCHANDLE hSurface; int *pImage; INT32 timeCode; INT64 timeStamp; int newsize; int dropped_frame_count; GstMapInfo minfo; /* Start acquisition */ if (!euresys->acq_started) { status = McSetParamInt (euresys->hChannel, MC_ChannelState, MC_ChannelState_ACTIVE); if (status != MC_OK) { GST_ELEMENT_ERROR (euresys, RESOURCE, FAILED, (("Failed to set channel state to ACTIVE.")), (NULL)); return GST_FLOW_ERROR; } euresys->acq_started = TRUE; } /* Wait for next surface (frame) */ while (TRUE) { /* Wait up to 5000 msecs for a signal */ status = McWaitSignal (euresys->hChannel, MC_SIG_ANY, 5000, &siginfo); if (status == MC_TIMEOUT) { GST_ELEMENT_ERROR (src, RESOURCE, FAILED, (("Timeout waiting for signal.")), (("Timeout waiting for signal."))); return GST_FLOW_ERROR; } else if (siginfo.Signal == MC_SIG_ACQUISITION_FAILURE) { GST_ELEMENT_ERROR (src, RESOURCE, FAILED, (("Acquisition failure due to timeout.")), (NULL)); return GST_FLOW_ERROR; } else if (siginfo.Signal == MC_SIG_SURFACE_PROCESSING) { break; } else { continue; } } /* Get pointer to image data and other info */ hSurface = (MCHANDLE) siginfo.SignalInfo; /* "number of bytes actually written into the surface" */ status = McGetParamInt (hSurface, MC_FillCount, &newsize); /* "Internal numbering of surface during acquisition sequence" (zero-based) */ status |= McGetParamInt (hSurface, MC_TimeCode, &timeCode); /* "number of microseconds elapsed since midnight (00:00:00), * January 1, 1970, coordinated universal time (UTC), according * to the system clock when the surface is filled" */ status |= McGetParamInt64 (hSurface, MC_TimeStamp_us, &timeStamp); status |= McGetParamPtr (hSurface, MC_SurfaceAddr, (PVOID *) & pImage); if (G_UNLIKELY (status != MC_OK)) { GST_ELEMENT_ERROR (euresys, RESOURCE, FAILED, (("Failed to read surface parameter.")), (NULL)); return GST_FLOW_ERROR; } GST_INFO ("Got surface #%05d", timeCode); dropped_frame_count = timeCode - (euresys->last_time_code + 1); if (dropped_frame_count != 0) { euresys->dropped_frame_count += dropped_frame_count; GST_WARNING ("Dropped %d frames (%d total)", dropped_frame_count, euresys->dropped_frame_count); /* TODO: emit message here about dropped frames */ } euresys->last_time_code = timeCode; /* Copy image to buffer from surface */ gst_buffer_map (buf, &minfo, GST_MAP_WRITE); /* TODO: fix strides? */ g_assert (minfo.size == newsize); memcpy (minfo.data, pImage, newsize); gst_buffer_unmap (buf, &minfo); /* TODO: set buffer timestamp based on MC_TimeStamp_us */ GST_BUFFER_TIMESTAMP (buf) = gst_clock_get_time (GST_ELEMENT_CLOCK (src)) - GST_ELEMENT_CAST (src)->base_time; /* Done processing surface, release control */ McSetParamInt (hSurface, MC_SurfaceState, MC_SurfaceState_FREE); return GST_FLOW_OK; }
static gboolean gst_ks_video_src_open_device (GstKsVideoSrc * self) { GstKsVideoSrcPrivate *priv = GST_KS_VIDEO_SRC_GET_PRIVATE (self); GstKsVideoDevice *device = NULL; GList *devices, *cur; g_assert (priv->device == NULL); devices = ks_enumerate_devices (&KSCATEGORY_VIDEO); if (devices == NULL) goto error_no_devices; devices = ks_video_device_list_sort_cameras_first (devices); for (cur = devices; cur != NULL; cur = cur->next) { KsDeviceEntry *entry = cur->data; GST_DEBUG_OBJECT (self, "device %d: name='%s' path='%s'", entry->index, entry->name, entry->path); } for (cur = devices; cur != NULL && device == NULL; cur = cur->next) { KsDeviceEntry *entry = cur->data; gboolean match; if (priv->device_path != NULL) { match = g_strcasecmp (entry->path, priv->device_path) == 0; } else if (priv->device_name != NULL) { match = g_strcasecmp (entry->name, priv->device_name) == 0; } else if (priv->device_index >= 0) { match = entry->index == priv->device_index; } else { match = TRUE; /* pick the first entry */ } if (match) { priv->ksclock = g_object_new (GST_TYPE_KS_CLOCK, NULL); if (priv->ksclock != NULL && gst_ks_clock_open (priv->ksclock)) { GstClock *clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) gst_ks_clock_provide_master_clock (priv->ksclock, clock); } else { GST_WARNING_OBJECT (self, "failed to create/open KsClock"); g_object_unref (priv->ksclock); priv->ksclock = NULL; } device = gst_ks_video_device_new (entry->path, priv->ksclock, gst_ks_video_src_alloc_buffer, self); } ks_device_entry_free (entry); } g_list_free (devices); if (device == NULL) goto error_no_match; if (!gst_ks_video_device_open (device)) goto error_open; priv->device = device; return TRUE; /* ERRORS */ error_no_devices: { GST_ELEMENT_ERROR (self, RESOURCE, NOT_FOUND, ("No video capture devices found"), (NULL)); return FALSE; } error_no_match: { if (priv->device_path != NULL) { GST_ELEMENT_ERROR (self, RESOURCE, NOT_FOUND, ("Specified video capture device with path '%s' not found", priv->device_path), (NULL)); } else if (priv->device_name != NULL) { GST_ELEMENT_ERROR (self, RESOURCE, NOT_FOUND, ("Specified video capture device with name '%s' not found", priv->device_name), (NULL)); } else { GST_ELEMENT_ERROR (self, RESOURCE, NOT_FOUND, ("Specified video capture device with index %d not found", priv->device_index), (NULL)); } return FALSE; } error_open: { GST_ELEMENT_ERROR (self, RESOURCE, OPEN_READ, ("Failed to open device"), (NULL)); g_object_unref (device); return FALSE; } }
static GstFlowReturn gst_wasapi_src_create (GstPushSrc * src, GstBuffer ** buf) { GstWasapiSrc *self = GST_WASAPI_SRC (src); GstFlowReturn ret = GST_FLOW_OK; GstClock *clock; GstClockTime timestamp, duration = self->period_time; HRESULT hr; gint16 *samples = NULL; guint32 nsamples_read = 0, nsamples; DWORD flags = 0; guint64 devpos; GST_OBJECT_LOCK (self); clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) gst_object_ref (clock); GST_OBJECT_UNLOCK (self); if (clock != NULL && GST_CLOCK_TIME_IS_VALID (self->next_time)) { GstClockID id; id = gst_clock_new_single_shot_id (clock, self->next_time); gst_clock_id_wait (id, NULL); gst_clock_id_unref (id); } do { hr = IAudioCaptureClient_GetBuffer (self->capture_client, (BYTE **) & samples, &nsamples_read, &flags, &devpos, NULL); } while (hr == AUDCLNT_S_BUFFER_EMPTY); if (hr != S_OK) { GST_ERROR_OBJECT (self, "IAudioCaptureClient::GetBuffer () failed: %s", gst_wasapi_util_hresult_to_string (hr)); ret = GST_FLOW_ERROR; goto beach; } if (flags != 0) { GST_WARNING_OBJECT (self, "devpos %" G_GUINT64_FORMAT ": flags=0x%08x", devpos, flags); } /* FIXME: Why do we get 1024 sometimes and not a multiple of * samples_per_buffer? Shouldn't WASAPI provide a DISCONT * flag if we read too slow? */ nsamples = nsamples_read; g_assert (nsamples >= self->samples_per_buffer); if (nsamples > self->samples_per_buffer) { GST_WARNING_OBJECT (self, "devpos %" G_GUINT64_FORMAT ": got %d samples, expected %d, clipping!", devpos, nsamples, self->samples_per_buffer); nsamples = self->samples_per_buffer; } if (clock == NULL || clock == self->clock) { timestamp = gst_util_uint64_scale (devpos, GST_SECOND, self->client_clock_freq); } else { GstClockTime base_time; timestamp = gst_clock_get_time (clock); base_time = GST_ELEMENT_CAST (self)->base_time; if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; if (timestamp > duration) timestamp -= duration; else timestamp = 0; } ret = gst_pad_alloc_buffer_and_set_caps (GST_BASE_SRC_PAD (self), devpos, nsamples * sizeof (gint16), GST_PAD_CAPS (GST_BASE_SRC_PAD (self)), buf); if (ret == GST_FLOW_OK) { guint i; gint16 *dst; GST_BUFFER_OFFSET_END (*buf) = devpos + self->samples_per_buffer; GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = duration; dst = (gint16 *) GST_BUFFER_DATA (*buf); for (i = 0; i < nsamples; i++) { *dst = *samples; samples += 2; dst++; } } hr = IAudioCaptureClient_ReleaseBuffer (self->capture_client, nsamples_read); if (hr != S_OK) { GST_ERROR_OBJECT (self, "IAudioCaptureClient::ReleaseBuffer () failed: %s", gst_wasapi_util_hresult_to_string (hr)); ret = GST_FLOW_ERROR; goto beach; } beach: if (clock != NULL) gst_object_unref (clock); return ret; }
static void gst_decklink_video_sink_start_scheduled_playback (GstElement * element) { GstDecklinkVideoSink *self = GST_DECKLINK_VIDEO_SINK_CAST (element); GstClockTime start_time; HRESULT res; bool active; if (self->output->video_enabled && (!self->output->audiosink || self->output->audio_enabled) && (GST_STATE (self) == GST_STATE_PLAYING || GST_STATE_PENDING (self) == GST_STATE_PLAYING)) { // Need to unlock to get the clock time g_mutex_unlock (&self->output->lock); // FIXME: start time is the same for the complete pipeline, // but what we need here is the start time of this element! start_time = gst_element_get_base_time (element); if (start_time != GST_CLOCK_TIME_NONE) start_time = gst_clock_get_time (GST_ELEMENT_CLOCK (self)) - start_time; // FIXME: This will probably not work if (start_time == GST_CLOCK_TIME_NONE) start_time = 0; // Current times of internal and external clock when we go to // playing. We need this to convert the pipeline running time // to the running time of the hardware // // We can't use the normal base time for the external clock // because we might go to PLAYING later than the pipeline self->internal_base_time = gst_clock_get_internal_time (self->output->clock); self->external_base_time = gst_clock_get_internal_time (GST_ELEMENT_CLOCK (self)); convert_to_internal_clock (self, &start_time, NULL); g_mutex_lock (&self->output->lock); // Check if someone else started in the meantime if (self->output->started) return; active = false; self->output->output->IsScheduledPlaybackRunning (&active); if (active) { GST_DEBUG_OBJECT (self, "Stopping scheduled playback"); self->output->started = FALSE; res = self->output->output->StopScheduledPlayback (0, 0, 0); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to stop scheduled playback: 0x%08x", res)); return; } } GST_DEBUG_OBJECT (self, "Starting scheduled playback at %" GST_TIME_FORMAT, GST_TIME_ARGS (start_time)); res = self->output->output->StartScheduledPlayback (start_time, GST_SECOND, 1.0); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to start scheduled playback: 0x%08x", res)); return; } self->output->started = TRUE; self->output->clock_restart = TRUE; } else { GST_DEBUG_OBJECT (self, "Not starting scheduled playback yet"); } }
/** * This funcion will push out buffers on the source pad. * * For each pushed buffer, the seqnum is recorded, if the next buffer B has a * different seqnum (missing packets before B), this function will wait for the * missing packet to arrive up to the timestamp of buffer B. */ static void gst_rtp_jitter_buffer_loop (GstRtpJitterBuffer * jitterbuffer) { GstRtpJitterBufferPrivate *priv; GstBuffer *outbuf; GstFlowReturn result; guint16 seqnum; guint32 next_seqnum; GstClockTime timestamp, out_time; gboolean discont = FALSE; gint gap; priv = jitterbuffer->priv; JBUF_LOCK_CHECK (priv, flushing); again: GST_DEBUG_OBJECT (jitterbuffer, "Peeking item"); while (TRUE) { /* always wait if we are blocked */ if (!priv->blocked) { /* if we have a packet, we can exit the loop and grab it */ if (rtp_jitter_buffer_num_packets (priv->jbuf) > 0) break; /* no packets but we are EOS, do eos logic */ if (priv->eos) goto do_eos; } /* underrun, wait for packets or flushing now */ priv->waiting = TRUE; JBUF_WAIT_CHECK (priv, flushing); priv->waiting = FALSE; } /* peek a buffer, we're just looking at the timestamp and the sequence number. * If all is fine, we'll pop and push it. If the sequence number is wrong we * wait on the timestamp. In the chain function we will unlock the wait when a * new buffer is available. The peeked buffer is valid for as long as we hold * the jitterbuffer lock. */ outbuf = rtp_jitter_buffer_peek (priv->jbuf); /* get the seqnum and the next expected seqnum */ seqnum = gst_rtp_buffer_get_seq (outbuf); next_seqnum = priv->next_seqnum; /* get the timestamp, this is already corrected for clock skew by the * jitterbuffer */ timestamp = GST_BUFFER_TIMESTAMP (outbuf); GST_DEBUG_OBJECT (jitterbuffer, "Peeked buffer #%d, expect #%d, timestamp %" GST_TIME_FORMAT ", now %d left", seqnum, next_seqnum, GST_TIME_ARGS (timestamp), rtp_jitter_buffer_num_packets (priv->jbuf)); /* apply our timestamp offset to the incomming buffer, this will be our output * timestamp. */ out_time = apply_offset (jitterbuffer, timestamp); /* get the gap between this and the previous packet. If we don't know the * previous packet seqnum assume no gap. */ if (next_seqnum != -1) { gap = gst_rtp_buffer_compare_seqnum (next_seqnum, seqnum); /* if we have a packet that we already pushed or considered dropped, pop it * off and get the next packet */ if (gap < 0) { GST_DEBUG_OBJECT (jitterbuffer, "Old packet #%d, next #%d dropping", seqnum, next_seqnum); outbuf = rtp_jitter_buffer_pop (priv->jbuf); gst_buffer_unref (outbuf); goto again; } } else { GST_DEBUG_OBJECT (jitterbuffer, "no next seqnum known, first packet"); gap = -1; } /* If we don't know what the next seqnum should be (== -1) we have to wait * because it might be possible that we are not receiving this buffer in-order, * a buffer with a lower seqnum could arrive later and we want to push that * earlier buffer before this buffer then. * If we know the expected seqnum, we can compare it to the current seqnum to * determine if we have missing a packet. If we have a missing packet (which * must be before this packet) we can wait for it until the deadline for this * packet expires. */ if (gap != 0 && out_time != -1) { GstClockID id; GstClockTime sync_time; GstClockReturn ret; GstClock *clock; GstClockTime duration = GST_CLOCK_TIME_NONE; if (gap > 0) { /* we have a gap */ GST_WARNING_OBJECT (jitterbuffer, "Sequence number GAP detected: expected %d instead of %d (%d missing)", next_seqnum, seqnum, gap); if (priv->last_out_time != -1) { GST_DEBUG_OBJECT (jitterbuffer, "out_time %" GST_TIME_FORMAT ", last %" GST_TIME_FORMAT, GST_TIME_ARGS (out_time), GST_TIME_ARGS (priv->last_out_time)); /* interpolate between the current time and the last time based on * number of packets we are missing, this is the estimated duration * for the missing packet based on equidistant packet spacing. Also make * sure we never go negative. */ if (out_time > priv->last_out_time) duration = (out_time - priv->last_out_time) / (gap + 1); else goto lost; GST_DEBUG_OBJECT (jitterbuffer, "duration %" GST_TIME_FORMAT, GST_TIME_ARGS (duration)); /* add this duration to the timestamp of the last packet we pushed */ out_time = (priv->last_out_time + duration); } } else { /* we don't know what the next_seqnum should be, wait for the last * possible moment to push this buffer, maybe we get an earlier seqnum * while we wait */ GST_DEBUG_OBJECT (jitterbuffer, "First buffer %d, do sync", seqnum); } GST_OBJECT_LOCK (jitterbuffer); clock = GST_ELEMENT_CLOCK (jitterbuffer); if (!clock) { GST_OBJECT_UNLOCK (jitterbuffer); /* let's just push if there is no clock */ goto push_buffer; } GST_DEBUG_OBJECT (jitterbuffer, "sync to timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (out_time)); /* prepare for sync against clock */ sync_time = out_time + GST_ELEMENT_CAST (jitterbuffer)->base_time; /* add latency, this includes our own latency and the peer latency. */ sync_time += (priv->latency_ms * GST_MSECOND); sync_time += priv->peer_latency; /* create an entry for the clock */ id = priv->clock_id = gst_clock_new_single_shot_id (clock, sync_time); GST_OBJECT_UNLOCK (jitterbuffer); /* release the lock so that the other end can push stuff or unlock */ JBUF_UNLOCK (priv); ret = gst_clock_id_wait (id, NULL); JBUF_LOCK (priv); /* and free the entry */ gst_clock_id_unref (id); priv->clock_id = NULL; /* at this point, the clock could have been unlocked by a timeout, a new * tail element was added to the queue or because we are shutting down. Check * for shutdown first. */ if (priv->srcresult != GST_FLOW_OK) goto flushing; /* if we got unscheduled and we are not flushing, it's because a new tail * element became available in the queue. Grab it and try to push or sync. */ if (ret == GST_CLOCK_UNSCHEDULED) { GST_DEBUG_OBJECT (jitterbuffer, "Wait got unscheduled, will retry to push with new buffer"); goto again; } lost: /* we now timed out, this means we lost a packet or finished synchronizing * on the first buffer. */ if (gap > 0) { GstEvent *event; /* we had a gap and thus we lost a packet. Create an event for this. */ GST_DEBUG_OBJECT (jitterbuffer, "Packet #%d lost", next_seqnum); priv->num_late++; discont = TRUE; if (priv->do_lost) { /* create paket lost event */ event = gst_event_new_custom (GST_EVENT_CUSTOM_DOWNSTREAM, gst_structure_new ("GstRTPPacketLost", "seqnum", G_TYPE_UINT, (guint) next_seqnum, "timestamp", G_TYPE_UINT64, out_time, "duration", G_TYPE_UINT64, duration, NULL)); gst_pad_push_event (priv->srcpad, event); } /* update our expected next packet */ priv->last_popped_seqnum = next_seqnum; priv->last_out_time = out_time; priv->next_seqnum = (next_seqnum + 1) & 0xffff; /* look for next packet */ goto again; } /* there was no known gap,just the first packet, exit the loop and push */ GST_DEBUG_OBJECT (jitterbuffer, "First packet #%d synced", seqnum); /* get new timestamp, latency might have changed */ out_time = apply_offset (jitterbuffer, timestamp); } push_buffer: /* when we get here we are ready to pop and push the buffer */ outbuf = rtp_jitter_buffer_pop (priv->jbuf); if (discont || priv->discont) { /* set DISCONT flag when we missed a packet. */ outbuf = gst_buffer_make_metadata_writable (outbuf); GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT); priv->discont = FALSE; } /* apply timestamp with offset to buffer now */ GST_BUFFER_TIMESTAMP (outbuf) = out_time; /* now we are ready to push the buffer. Save the seqnum and release the lock * so the other end can push stuff in the queue again. */ priv->last_popped_seqnum = seqnum; priv->last_out_time = out_time; priv->next_seqnum = (seqnum + 1) & 0xffff; JBUF_UNLOCK (priv); /* push buffer */ GST_DEBUG_OBJECT (jitterbuffer, "Pushing buffer %d, timestamp %" GST_TIME_FORMAT, seqnum, GST_TIME_ARGS (out_time)); result = gst_pad_push (priv->srcpad, outbuf); if (result != GST_FLOW_OK) goto pause; return; /* ERRORS */ do_eos: { /* store result, we are flushing now */ GST_DEBUG_OBJECT (jitterbuffer, "We are EOS, pushing EOS downstream"); priv->srcresult = GST_FLOW_UNEXPECTED; gst_pad_pause_task (priv->srcpad); gst_pad_push_event (priv->srcpad, gst_event_new_eos ()); JBUF_UNLOCK (priv); return; } flushing: { GST_DEBUG_OBJECT (jitterbuffer, "we are flushing"); gst_pad_pause_task (priv->srcpad); JBUF_UNLOCK (priv); return; } pause: { const gchar *reason = gst_flow_get_name (result); GST_DEBUG_OBJECT (jitterbuffer, "pausing task, reason %s", reason); JBUF_LOCK (priv); /* store result */ priv->srcresult = result; /* we don't post errors or anything because upstream will do that for us * when we pass the return value upstream. */ gst_pad_pause_task (priv->srcpad); JBUF_UNLOCK (priv); return; } }