static GstFlowReturn gst_decklink_audio_sink_render (GstBaseSink * bsink, GstBuffer * buffer) { GstDecklinkAudioSink *self = GST_DECKLINK_AUDIO_SINK_CAST (bsink); GstDecklinkVideoSink *video_sink; GstFlowReturn flow_ret; HRESULT ret; GstClockTime timestamp, duration; GstClockTime running_time, running_time_duration; GstClockTime schedule_time, schedule_time_duration; GstClockTime latency, render_delay; GstClockTimeDiff ts_offset; GstMapInfo map_info; const guint8 *data; gsize len, written_all; gboolean discont; GST_DEBUG_OBJECT (self, "Rendering buffer %p", buffer); // FIXME: Handle no timestamps if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) { return GST_FLOW_ERROR; } if (GST_BASE_SINK_CAST (self)->flushing) { return GST_FLOW_FLUSHING; } // If we're called before output is actually started, start pre-rolling if (!self->output->started) { self->output->output->BeginAudioPreroll (); } video_sink = GST_DECKLINK_VIDEO_SINK (gst_object_ref (self->output->videosink)); timestamp = GST_BUFFER_TIMESTAMP (buffer); duration = GST_BUFFER_DURATION (buffer); discont = gst_audio_stream_align_process (self->stream_align, GST_BUFFER_IS_DISCONT (buffer), timestamp, gst_buffer_get_size (buffer) / self->info.bpf, ×tamp, &duration, NULL); if (discont && self->resampler) gst_audio_resampler_reset (self->resampler); if (GST_BASE_SINK_CAST (self)->segment.rate < 0.0) { GstMapInfo out_map; gint out_frames = gst_buffer_get_size (buffer) / self->info.bpf; buffer = gst_buffer_make_writable (gst_buffer_ref (buffer)); gst_buffer_map (buffer, &out_map, GST_MAP_READWRITE); if (self->info.finfo->format == GST_AUDIO_FORMAT_S16) { gint16 *swap_data = (gint16 *) out_map.data; gint16 *swap_data_end = swap_data + (out_frames - 1) * self->info.channels; gint16 swap_tmp[16]; while (out_frames > 0) { memcpy (&swap_tmp, swap_data, self->info.bpf); memcpy (swap_data, swap_data_end, self->info.bpf); memcpy (swap_data_end, &swap_tmp, self->info.bpf); swap_data += self->info.channels; swap_data_end -= self->info.channels; out_frames -= 2; } } else { gint32 *swap_data = (gint32 *) out_map.data; gint32 *swap_data_end = swap_data + (out_frames - 1) * self->info.channels; gint32 swap_tmp[16]; while (out_frames > 0) { memcpy (&swap_tmp, swap_data, self->info.bpf); memcpy (swap_data, swap_data_end, self->info.bpf); memcpy (swap_data_end, &swap_tmp, self->info.bpf); swap_data += self->info.channels; swap_data_end -= self->info.channels; out_frames -= 2; } } gst_buffer_unmap (buffer, &out_map); } else { gst_buffer_ref (buffer); } if (self->resampler) { gint in_frames = gst_buffer_get_size (buffer) / self->info.bpf; gint out_frames = gst_audio_resampler_get_out_frames (self->resampler, in_frames); GstBuffer *out_buf = gst_buffer_new_and_alloc (out_frames * self->info.bpf); GstMapInfo out_map; gst_buffer_map (buffer, &map_info, GST_MAP_READ); gst_buffer_map (out_buf, &out_map, GST_MAP_READWRITE); gst_audio_resampler_resample (self->resampler, (gpointer *) & map_info.data, in_frames, (gpointer *) & out_map.data, out_frames); gst_buffer_unmap (out_buf, &out_map); gst_buffer_unmap (buffer, &map_info); buffer = out_buf; } gst_buffer_map (buffer, &map_info, GST_MAP_READ); data = map_info.data; len = map_info.size / self->info.bpf; written_all = 0; do { GstClockTime timestamp_now = timestamp + gst_util_uint64_scale (written_all, GST_SECOND, self->info.rate); guint32 buffered_samples; GstClockTime buffered_time; guint32 written = 0; GstClock *clock; GstClockTime clock_ahead; if (GST_BASE_SINK_CAST (self)->flushing) { flow_ret = GST_FLOW_FLUSHING; break; } running_time = gst_segment_to_running_time (&GST_BASE_SINK_CAST (self)->segment, GST_FORMAT_TIME, timestamp_now); running_time_duration = gst_segment_to_running_time (&GST_BASE_SINK_CAST (self)->segment, GST_FORMAT_TIME, timestamp_now + duration) - running_time; /* See gst_base_sink_adjust_time() */ latency = gst_base_sink_get_latency (bsink); render_delay = gst_base_sink_get_render_delay (bsink); ts_offset = gst_base_sink_get_ts_offset (bsink); running_time += latency; if (ts_offset < 0) { ts_offset = -ts_offset; if ((GstClockTime) ts_offset < running_time) running_time -= ts_offset; else running_time = 0; } else { running_time += ts_offset; } if (running_time > render_delay) running_time -= render_delay; else running_time = 0; clock = gst_element_get_clock (GST_ELEMENT_CAST (self)); clock_ahead = 0; if (clock) { GstClockTime clock_now = gst_clock_get_time (clock); GstClockTime base_time = gst_element_get_base_time (GST_ELEMENT_CAST (self)); gst_object_unref (clock); clock = NULL; if (clock_now != GST_CLOCK_TIME_NONE && base_time != GST_CLOCK_TIME_NONE) { GST_DEBUG_OBJECT (self, "Clock time %" GST_TIME_FORMAT ", base time %" GST_TIME_FORMAT ", target running time %" GST_TIME_FORMAT, GST_TIME_ARGS (clock_now), GST_TIME_ARGS (base_time), GST_TIME_ARGS (running_time)); if (clock_now > base_time) clock_now -= base_time; else clock_now = 0; if (clock_now < running_time) clock_ahead = running_time - clock_now; } } GST_DEBUG_OBJECT (self, "Ahead %" GST_TIME_FORMAT " of the clock running time", GST_TIME_ARGS (clock_ahead)); if (self->output-> output->GetBufferedAudioSampleFrameCount (&buffered_samples) != S_OK) buffered_samples = 0; buffered_time = gst_util_uint64_scale (buffered_samples, GST_SECOND, self->info.rate); buffered_time /= ABS (GST_BASE_SINK_CAST (self)->segment.rate); GST_DEBUG_OBJECT (self, "Buffered %" GST_TIME_FORMAT " in the driver (%u samples)", GST_TIME_ARGS (buffered_time), buffered_samples); // We start waiting once we have more than buffer-time buffered if (buffered_time > self->buffer_time || clock_ahead > self->buffer_time) { GstClockReturn clock_ret; GstClockTime wait_time = running_time; GST_DEBUG_OBJECT (self, "Buffered enough, wait for preroll or the clock or flushing"); if (wait_time < self->buffer_time) wait_time = 0; else wait_time -= self->buffer_time; flow_ret = gst_base_sink_do_preroll (GST_BASE_SINK_CAST (self), GST_MINI_OBJECT_CAST (buffer)); if (flow_ret != GST_FLOW_OK) break; clock_ret = gst_base_sink_wait_clock (GST_BASE_SINK_CAST (self), wait_time, NULL); if (GST_BASE_SINK_CAST (self)->flushing) { flow_ret = GST_FLOW_FLUSHING; break; } // Rerun the whole loop again if (clock_ret == GST_CLOCK_UNSCHEDULED) continue; } schedule_time = running_time; schedule_time_duration = running_time_duration; gst_decklink_video_sink_convert_to_internal_clock (video_sink, &schedule_time, &schedule_time_duration); GST_LOG_OBJECT (self, "Scheduling audio samples at %" GST_TIME_FORMAT " with duration %" GST_TIME_FORMAT, GST_TIME_ARGS (schedule_time), GST_TIME_ARGS (schedule_time_duration)); ret = self->output->output->ScheduleAudioSamples ((void *) data, len, schedule_time, GST_SECOND, &written); if (ret != S_OK) { bool is_running = true; self->output->output->IsScheduledPlaybackRunning (&is_running); if (is_running && !GST_BASE_SINK_CAST (self)->flushing && self->output->started) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to schedule frame: 0x%08lx", (unsigned long) ret)); flow_ret = GST_FLOW_ERROR; break; } else { // Ignore the error and go out of the loop here, we're shutting down // or are not started yet and there's nothing we can do at this point GST_INFO_OBJECT (self, "Ignoring scheduling error 0x%08x because we're not started yet" " or not anymore", (guint) ret); flow_ret = GST_FLOW_OK; break; } } len -= written; data += written * self->info.bpf; if (self->resampler) written_all += written * ABS (GST_BASE_SINK_CAST (self)->segment.rate); else written_all += written; flow_ret = GST_FLOW_OK; } while (len > 0); gst_buffer_unmap (buffer, &map_info); gst_buffer_unref (buffer); GST_DEBUG_OBJECT (self, "Returning %s", gst_flow_get_name (flow_ret)); return flow_ret; }
static GstFlowReturn gst_v4l2src_create (GstPushSrc * src, GstBuffer ** buf) { GstV4l2Src *v4l2src = GST_V4L2SRC (src); GstV4l2Object *obj = v4l2src->v4l2object; GstFlowReturn ret; GstClock *clock; GstClockTime abs_time, base_time, timestamp, duration; GstClockTime delay; ret = GST_BASE_SRC_CLASS (parent_class)->alloc (GST_BASE_SRC (src), 0, obj->info.size, buf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto alloc_failed; ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL_CAST (obj->pool), buf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto error; timestamp = GST_BUFFER_TIMESTAMP (*buf); duration = obj->duration; /* timestamps, LOCK to get clock and base time. */ /* FIXME: element clock and base_time is rarely changing */ GST_OBJECT_LOCK (v4l2src); if ((clock = GST_ELEMENT_CLOCK (v4l2src))) { /* we have a clock, get base time and ref clock */ base_time = GST_ELEMENT (v4l2src)->base_time; gst_object_ref (clock); } else { /* no clock, can't set timestamps */ base_time = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (v4l2src); /* sample pipeline clock */ if (clock) { abs_time = gst_clock_get_time (clock); gst_object_unref (clock); } else { abs_time = GST_CLOCK_TIME_NONE; } if (timestamp != GST_CLOCK_TIME_NONE) { struct timespec now; GstClockTime gstnow; /* v4l2 specs say to use the system time although many drivers switched to * the more desirable monotonic time. We first try to use the monotonic time * and see how that goes */ clock_gettime (CLOCK_MONOTONIC, &now); gstnow = GST_TIMESPEC_TO_TIME (now); if (gstnow < timestamp && (timestamp - gstnow) > (10 * GST_SECOND)) { GTimeVal now; /* very large diff, fall back to system time */ g_get_current_time (&now); gstnow = GST_TIMEVAL_TO_TIME (now); } if (gstnow > timestamp) { delay = gstnow - timestamp; } else { delay = 0; } GST_DEBUG_OBJECT (v4l2src, "ts: %" GST_TIME_FORMAT " now %" GST_TIME_FORMAT " delay %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (gstnow), GST_TIME_ARGS (delay)); } else { /* we assume 1 frame latency otherwise */ if (GST_CLOCK_TIME_IS_VALID (duration)) delay = duration; else delay = 0; } /* set buffer metadata */ GST_BUFFER_OFFSET (*buf) = v4l2src->offset++; GST_BUFFER_OFFSET_END (*buf) = v4l2src->offset; if (G_LIKELY (abs_time != GST_CLOCK_TIME_NONE)) { /* the time now is the time of the clock minus the base time */ timestamp = abs_time - base_time; /* adjust for delay in the device */ if (timestamp > delay) timestamp -= delay; else timestamp = 0; } else { timestamp = GST_CLOCK_TIME_NONE; } /* activate settings for next frame */ if (GST_CLOCK_TIME_IS_VALID (duration)) { v4l2src->ctrl_time += duration; } else { /* this is not very good (as it should be the next timestamp), * still good enough for linear fades (as long as it is not -1) */ v4l2src->ctrl_time = timestamp; } gst_object_sync_values (GST_OBJECT (src), v4l2src->ctrl_time); GST_INFO_OBJECT (src, "sync to %" GST_TIME_FORMAT " out ts %" GST_TIME_FORMAT, GST_TIME_ARGS (v4l2src->ctrl_time), GST_TIME_ARGS (timestamp)); GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = duration; return ret; /* ERROR */ alloc_failed: { if (ret != GST_FLOW_FLUSHING) GST_ELEMENT_ERROR (src, RESOURCE, NO_SPACE_LEFT, ("Failed to allocate a buffer"), (NULL)); return ret; } error: { GST_DEBUG_OBJECT (src, "error processing buffer %d (%s)", ret, gst_flow_get_name (ret)); return ret; } }
/** * gst_clock_id_wait * @id: The #GstClockID to wait on * @jitter: A pointer that will contain the jitter, can be %NULL. * * Perform a blocking wait on @id. * @id should have been created with gst_clock_new_single_shot_id() * or gst_clock_new_periodic_id() and should not have been unscheduled * with a call to gst_clock_id_unschedule(). * * If the @jitter argument is not %NULL and this function returns #GST_CLOCK_OK * or #GST_CLOCK_EARLY, it will contain the difference * against the clock and the time of @id when this method was * called. * Positive values indicate how late @id was relative to the clock * (in which case this function will return #GST_CLOCK_EARLY). * Negative values indicate how much time was spent waiting on the clock * before this function returned. * * Returns: the result of the blocking wait. #GST_CLOCK_EARLY will be returned * if the current clock time is past the time of @id, #GST_CLOCK_OK if * @id was scheduled in time. #GST_CLOCK_UNSCHEDULED if @id was * unscheduled with gst_clock_id_unschedule(). * * MT safe. */ GstClockReturn gst_clock_id_wait (GstClockID id, GstClockTimeDiff * jitter) { GstClockEntry *entry; GstClock *clock; GstClockReturn res; GstClockTime requested; GstClockClass *cclass; g_return_val_if_fail (id != NULL, GST_CLOCK_ERROR); entry = (GstClockEntry *) id; requested = GST_CLOCK_ENTRY_TIME (entry); clock = GST_CLOCK_ENTRY_CLOCK (entry); /* can't sync on invalid times */ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (requested))) goto invalid_time; cclass = GST_CLOCK_GET_CLASS (clock); GST_CAT_DEBUG_OBJECT (GST_CAT_CLOCK, clock, "waiting on clock entry %p", id); /* if we have a wait_jitter function, use that */ if (G_LIKELY (cclass->wait_jitter)) { res = cclass->wait_jitter (clock, entry, jitter); } else { /* check if we have a simple _wait function otherwise. The function without * the jitter arg is less optimal as we need to do an additional _get_time() * which is not atomic with the _wait() and a typical _wait() function does * yet another _get_time() anyway. */ if (G_UNLIKELY (cclass->wait == NULL)) goto not_supported; if (jitter) { GstClockTime now = gst_clock_get_time (clock); /* jitter is the diff against the clock when this entry is scheduled. Negative * values mean that the entry was in time, a positive value means that the * entry was too late. */ *jitter = GST_CLOCK_DIFF (requested, now); } res = cclass->wait (clock, entry); } GST_CAT_DEBUG_OBJECT (GST_CAT_CLOCK, clock, "done waiting entry %p, res: %d", id, res); if (entry->type == GST_CLOCK_ENTRY_PERIODIC) entry->time = requested + entry->interval; if (G_UNLIKELY (clock->stats)) gst_clock_update_stats (clock); return res; /* ERRORS */ invalid_time: { GST_CAT_DEBUG_OBJECT (GST_CAT_CLOCK, clock, "invalid time requested, returning _BADTIME"); return GST_CLOCK_BADTIME; } not_supported: { GST_CAT_DEBUG_OBJECT (GST_CAT_CLOCK, clock, "clock wait is not supported"); return GST_CLOCK_UNSUPPORTED; } }
static GstFlowReturn gst_dasf_src_create (GstAudioSrc *audiosrc, guint64 offset, guint length, GstBuffer **buffer) { GstDasfSrc* self = GST_DASF_SRC (audiosrc); GstBaseAudioSrc *baseaudiosrc = GST_BASE_AUDIO_SRC (self); GstBuffer* gst_buffer = NULL; OMX_BUFFERHEADERTYPE* omx_buffer = NULL; GstDasfSrcPrivate* priv = GST_DASF_SRC_GET_PRIVATE (self); GstGooAudioFilter* me = self->peer_element; GST_DEBUG (""); if (me->component->cur_state != OMX_StateExecuting) { return GST_FLOW_UNEXPECTED; } GST_DEBUG ("goo stuff"); { omx_buffer = goo_port_grab_buffer (me->outport); if (gst_pad_alloc_buffer (GST_BASE_SRC_PAD (self), priv->outcount, omx_buffer->nFilledLen, GST_PAD_CAPS (GST_BASE_SRC_PAD (self)), &gst_buffer) == GST_FLOW_OK) { if (GST_IS_GOO_BUFFER (gst_buffer)) { memcpy (GST_BUFFER_DATA (gst_buffer), omx_buffer->pBuffer, omx_buffer->nFilledLen); goo_component_release_buffer (me->component, omx_buffer); } else { gst_buffer_unref (gst_buffer); gst_buffer = (GstBuffer*) gst_goo_buffer_new (); gst_goo_buffer_set_data (gst_buffer, me->component, omx_buffer); } } else { goto fail; } } GST_DEBUG ("gst stuff"); { GstClock* clock = NULL; GstClockTime timestamp, duration; clock = gst_element_get_clock (GST_ELEMENT (self)); timestamp = gst_clock_get_time (clock); timestamp -= gst_element_get_base_time (GST_ELEMENT (self)); gst_object_unref (clock); GST_BUFFER_TIMESTAMP (gst_buffer) = gst_util_uint64_scale_int (GST_SECOND, priv->outcount, 50); /* Set 20 millisecond duration */ duration = gst_util_uint64_scale_int (GST_SECOND, 1, 50); GST_BUFFER_DURATION (gst_buffer) = duration; GST_BUFFER_OFFSET (gst_buffer) = priv->outcount++; GST_BUFFER_OFFSET_END (gst_buffer) = priv->outcount; gst_buffer_set_caps (gst_buffer, GST_PAD_CAPS (GST_BASE_SRC_PAD (self))); } beach: *buffer = gst_buffer; return GST_FLOW_OK; fail: if (G_LIKELY (*buffer)) { gst_buffer_unref (*buffer); } return GST_FLOW_ERROR; }
/** * httpserver_dispatcher: * @data: RequestData type pointer * @user_data: httpstreaming type pointer * * Process http request. * * Returns: positive value if have not completed the processing, for example live streaming. * 0 if have completed the processing. */ static GstClockTime httpstreaming_dispatcher (gpointer data, gpointer user_data) { RequestData *request_data = data; HTTPStreaming *httpstreaming = (HTTPStreaming *)user_data; gchar *buf; int i = 0, j, ret; Encoder *encoder; EncoderOutput *encoder_output; Channel *channel; RequestDataUserData *request_user_data; GstClockTime ret_clock_time; channel = get_channel (httpstreaming, request_data); switch (request_data->status) { case HTTP_REQUEST: GST_DEBUG ("new request arrived, socket is %d, uri is %s", request_data->sock, request_data->uri); encoder_output = get_encoder_output (httpstreaming, request_data); if (encoder_output == NULL) { buf = g_strdup_printf (http_404, PACKAGE_NAME, PACKAGE_VERSION); httpserver_write (request_data->sock, buf, strlen (buf)); g_free (buf); return 0; } else if ((request_data->parameters[0] == '\0') || (request_data->parameters[0] == 'b')) { /* default operator is play, ?bitrate= */ GST_ERROR ("Play %s.", request_data->uri); request_user_data = (RequestDataUserData *)g_malloc (sizeof (RequestDataUserData));//FIXME if (request_user_data == NULL) { GST_ERROR ("Internal Server Error, g_malloc for request_user_data failure."); buf = g_strdup_printf (http_500, PACKAGE_NAME, PACKAGE_VERSION); httpserver_write (request_data->sock, buf, strlen (buf)); g_free (buf); return 0; } if (*(encoder_output->head_addr) == *(encoder_output->tail_addr)) { GST_DEBUG ("%s unready.", request_data->uri); buf = g_strdup_printf (http_404, PACKAGE_NAME, PACKAGE_VERSION); httpserver_write (request_data->sock, buf, strlen (buf)); g_free (buf); return 0; } /* let send_chunk send new chunk. */ encoder = get_encoder (request_data->uri, httpstreaming->itvencoder->channel_array); request_user_data->encoder = encoder; request_user_data->chunk_size = 0; request_user_data->send_count = 2; request_user_data->chunk_size_str = g_strdup (""); request_user_data->chunk_size_str_len = 0; request_user_data->encoder_output = encoder_output; request_user_data->current_rap_addr = *(encoder_output->last_rap_addr); request_user_data->current_send_position = *(encoder_output->last_rap_addr) + 12; request_user_data->channel_age = channel->age; request_data->user_data = request_user_data; request_data->bytes_send = 0; buf = g_strdup_printf (http_chunked, PACKAGE_NAME, PACKAGE_VERSION); httpserver_write (request_data->sock, buf, strlen (buf)); g_free (buf); return gst_clock_get_time (httpstreaming->httpserver->system_clock) + GST_MSECOND; } else { buf = g_strdup_printf (http_404, PACKAGE_NAME, PACKAGE_VERSION); httpserver_write (request_data->sock, buf, strlen (buf)); g_free (buf); return 0; } break; case HTTP_CONTINUE: request_user_data = request_data->user_data; if ((request_user_data->channel_age != channel->age) || (*(channel->output->state) != GST_STATE_PLAYING)) { g_free (request_data->user_data); request_data->user_data = NULL; return 0; } encoder_output = request_user_data->encoder_output; if (request_user_data->current_send_position == *(encoder_output->tail_addr)) { /* no more stream, wait 10ms */ GST_DEBUG ("current:%llu == tail:%llu", request_user_data->current_send_position, encoder_output->tail_addr); return gst_clock_get_time (httpstreaming->httpserver->system_clock) + 500 * GST_MSECOND + g_random_int_range (1, 1000000); } ret_clock_time = send_chunk (encoder_output, request_data); if (ret_clock_time != GST_CLOCK_TIME_NONE) { return ret_clock_time + gst_clock_get_time (httpstreaming->httpserver->system_clock); } else { return GST_CLOCK_TIME_NONE; } case HTTP_FINISH: g_free (request_data->user_data); request_data->user_data = NULL; return 0; default: GST_ERROR ("Unknown status %d", request_data->status); buf = g_strdup_printf (http_400, PACKAGE_NAME, PACKAGE_VERSION); httpserver_write (request_data->sock, buf, strlen (buf)); g_free (buf); return 0; } }
static gboolean gst_dshowvideosrc_push_buffer (guint8 * buffer, guint size, gpointer src_object, GstClockTime duration) { GstDshowVideoSrc *src = GST_DSHOWVIDEOSRC (src_object); GstBuffer *buf = NULL; GstMapInfo map; IPin *pPin = NULL; HRESULT hres = S_FALSE; AM_MEDIA_TYPE *pMediaType = NULL; if (!buffer || size == 0 || !src) { return FALSE; } /* create a new buffer assign to it the clock time as timestamp */ buf = gst_buffer_new_and_alloc (size); gst_buffer_set_size(buf, size); GstClock *clock = gst_element_get_clock (GST_ELEMENT (src)); GST_BUFFER_PTS (buf) = GST_CLOCK_DIFF (gst_element_get_base_time (GST_ELEMENT (src)), gst_clock_get_time (clock)); //GST_BUFFER_DTS(buf) = GST_BUFFER_PTS (buf); GST_BUFFER_DTS(buf) = GST_CLOCK_TIME_NONE; GST_BUFFER_OFFSET(buf) = src->offset++; GST_BUFFER_OFFSET_END(buf) = src->offset; GST_BUFFER_FLAG_SET(buf, GST_BUFFER_FLAG_LIVE); gst_object_unref (clock); GST_BUFFER_DURATION (buf) = duration; gst_buffer_map(buf, &map, GST_MAP_WRITE); if (src->is_rgb) { /* FOR RGB directshow decoder will return bottom-up BITMAP * There is probably a way to get top-bottom video frames from * the decoder... */ gint line = 0; gint stride = size / src->height; for (; line < src->height; line++) { memcpy (map.data + (line * stride), buffer + (size - ((line + 1) * (stride))), stride); } } else { memcpy (map.data, buffer, size); } gst_buffer_unmap(buf, &map); src->time += duration; gst_object_sync_values (GST_OBJECT (src), src->time); GST_DEBUG ("push_buffer => pts %" GST_TIME_FORMAT "duration %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (duration)); g_mutex_lock (&src->buffer_mutex); if (src->buffer != NULL) gst_buffer_unref (src->buffer); src->buffer = buf; g_cond_signal (&src->buffer_cond); g_mutex_unlock (&src->buffer_mutex); return TRUE; }
/** * gst_rtsp_sdp_from_media: * @sdp: a #GstSDPMessage * @info: (transfer none): a #GstSDPInfo * @media: (transfer none): a #GstRTSPMedia * * Add @media specific info to @sdp. @info is used to configure the connection * information in the SDP. * * Returns: TRUE on success. */ gboolean gst_rtsp_sdp_from_media (GstSDPMessage * sdp, GstSDPInfo * info, GstRTSPMedia * media) { guint i, n_streams; gchar *rangestr; n_streams = gst_rtsp_media_n_streams (media); rangestr = gst_rtsp_media_get_range_string (media, FALSE, GST_RTSP_RANGE_NPT); if (rangestr == NULL) goto not_prepared; gst_sdp_message_add_attribute (sdp, "range", rangestr); g_free (rangestr); for (i = 0; i < n_streams; i++) { GstRTSPStream *stream; GstCaps *caps; GstStructure *s; GstRTSPProfile profiles; guint mask; stream = gst_rtsp_media_get_stream (media, i); caps = gst_rtsp_stream_get_caps (stream); if (caps == NULL) { g_warning ("ignoring stream %d without media type", i); continue; } s = gst_caps_get_structure (caps, 0); if (s == NULL) { gst_caps_unref (caps); g_warning ("ignoring stream %d without media type", i); continue; } /* make a new media for each profile */ profiles = gst_rtsp_stream_get_profiles (stream); mask = 1; while (profiles >= mask) { GstRTSPProfile prof = profiles & mask; if (prof) make_media (sdp, info, media, stream, s, prof); mask <<= 1; } gst_caps_unref (caps); } { GstNetTimeProvider *provider; if ((provider = gst_rtsp_media_get_time_provider (media, info->server_ip, 0))) { GstClock *clock; gchar *address, *str; gint port; g_object_get (provider, "clock", &clock, "address", &address, "port", &port, NULL); str = g_strdup_printf ("GstNetTimeProvider %s %s:%d %" G_GUINT64_FORMAT, g_type_name (G_TYPE_FROM_INSTANCE (clock)), address, port, gst_clock_get_time (clock)); gst_sdp_message_add_attribute (sdp, "x-gst-clock", str); g_free (str); gst_object_unref (clock); g_free (address); gst_object_unref (provider); } } return TRUE; /* ERRORS */ not_prepared: { GST_ERROR ("media %p is not prepared", media); return FALSE; } }
ClockTime Clock::clockTime() const { GstClockTime t = gst_clock_get_time(object<GstClock>()); return t; }
static OSErr data_proc (SGChannel c, Ptr p, long len, long *offset, long chRefCon, TimeValue time, short writeType, long refCon) { GstOSXVideoSrc *self; gint fps_n, fps_d; GstClockTime duration, timestamp, latency; CodecFlags flags; ComponentResult err; PixMapHandle hPixMap; Rect portRect; int pix_rowBytes; void *pix_ptr; int pix_height; int pix_size; self = GST_OSX_VIDEO_SRC (refCon); if (self->buffer != NULL) { gst_buffer_unref (self->buffer); self->buffer = NULL; } err = DecompressSequenceFrameS (self->dec_seq, p, len, 0, &flags, NULL); if (err != noErr) { GST_ERROR_OBJECT (self, "DecompressSequenceFrameS returned %d", (int) err); return err; } hPixMap = GetGWorldPixMap (self->world); LockPixels (hPixMap); GetPortBounds (self->world, &portRect); pix_rowBytes = (int) GetPixRowBytes (hPixMap); pix_ptr = GetPixBaseAddr (hPixMap); pix_height = (portRect.bottom - portRect.top); pix_size = pix_rowBytes * pix_height; GST_DEBUG_OBJECT (self, "num=%5d, height=%d, rowBytes=%d, size=%d", self->seq_num, pix_height, pix_rowBytes, pix_size); fps_n = FRAMERATE; fps_d = 1; duration = gst_util_uint64_scale_int (GST_SECOND, fps_d, fps_n); latency = duration; timestamp = gst_clock_get_time (GST_ELEMENT_CAST (self)->clock); timestamp -= gst_element_get_base_time (GST_ELEMENT_CAST (self)); if (timestamp > latency) timestamp -= latency; else timestamp = 0; self->buffer = gst_buffer_new_and_alloc (pix_size); GST_BUFFER_OFFSET (self->buffer) = self->seq_num; GST_BUFFER_TIMESTAMP (self->buffer) = timestamp; memcpy (GST_BUFFER_DATA (self->buffer), pix_ptr, pix_size); self->seq_num++; UnlockPixels (hPixMap); return noErr; }
static GstFlowReturn gst_dx9screencapsrc_create (GstPushSrc * push_src, GstBuffer ** buf) { GstDX9ScreenCapSrc *src = GST_DX9SCREENCAPSRC (push_src); GstBuffer *new_buf; gint new_buf_size, i; gint width, height, stride; GstClock *clock; GstClockTime buf_time, buf_dur; D3DLOCKED_RECT locked_rect; LPBYTE p_dst, p_src; HRESULT hres; GstMapInfo map; guint64 frame_number; if (G_UNLIKELY (!src->d3d9_device)) { GST_ELEMENT_ERROR (src, CORE, NEGOTIATION, (NULL), ("format wasn't negotiated before create function")); return GST_FLOW_NOT_NEGOTIATED; } clock = gst_element_get_clock (GST_ELEMENT (src)); if (clock != NULL) { GstClockTime time, base_time; /* Calculate sync time. */ time = gst_clock_get_time (clock); base_time = gst_element_get_base_time (GST_ELEMENT (src)); buf_time = time - base_time; if (src->rate_numerator) { frame_number = gst_util_uint64_scale (buf_time, src->rate_numerator, GST_SECOND * src->rate_denominator); } else { frame_number = -1; } } else { buf_time = GST_CLOCK_TIME_NONE; frame_number = -1; } if (frame_number != -1 && frame_number == src->frame_number) { GstClockID id; GstClockReturn ret; /* Need to wait for the next frame */ frame_number += 1; /* Figure out what the next frame time is */ buf_time = gst_util_uint64_scale (frame_number, src->rate_denominator * GST_SECOND, src->rate_numerator); id = gst_clock_new_single_shot_id (clock, buf_time + gst_element_get_base_time (GST_ELEMENT (src))); GST_OBJECT_LOCK (src); src->clock_id = id; GST_OBJECT_UNLOCK (src); GST_DEBUG_OBJECT (src, "Waiting for next frame time %" G_GUINT64_FORMAT, buf_time); ret = gst_clock_id_wait (id, NULL); GST_OBJECT_LOCK (src); gst_clock_id_unref (id); src->clock_id = NULL; if (ret == GST_CLOCK_UNSCHEDULED) { /* Got woken up by the unlock function */ GST_OBJECT_UNLOCK (src); return GST_FLOW_FLUSHING; } GST_OBJECT_UNLOCK (src); /* Duration is a complete 1/fps frame duration */ buf_dur = gst_util_uint64_scale_int (GST_SECOND, src->rate_denominator, src->rate_numerator); } else if (frame_number != -1) { GstClockTime next_buf_time; GST_DEBUG_OBJECT (src, "No need to wait for next frame time %" G_GUINT64_FORMAT " next frame = %" G_GINT64_FORMAT " prev = %" G_GINT64_FORMAT, buf_time, frame_number, src->frame_number); next_buf_time = gst_util_uint64_scale (frame_number + 1, src->rate_denominator * GST_SECOND, src->rate_numerator); /* Frame duration is from now until the next expected capture time */ buf_dur = next_buf_time - buf_time; } else { buf_dur = GST_CLOCK_TIME_NONE; } src->frame_number = frame_number; height = (src->src_rect.bottom - src->src_rect.top); width = (src->src_rect.right - src->src_rect.left); new_buf_size = width * 4 * height; GST_LOG_OBJECT (src, "creating buffer of %d bytes with %dx%d image", new_buf_size, width, height); /* Do screen capture and put it into buffer... * Aquire front buffer, and lock it */ hres = IDirect3DDevice9_GetFrontBufferData (src->d3d9_device, 0, src->surface); if (FAILED (hres)) { GST_DEBUG_OBJECT (src, "DirectX::GetBackBuffer failed."); return GST_FLOW_ERROR; } if (src->show_cursor) { CURSORINFO ci; ci.cbSize = sizeof (CURSORINFO); GetCursorInfo (&ci); if (ci.flags & CURSOR_SHOWING) { ICONINFO ii; HDC memDC; GetIconInfo (ci.hCursor, &ii); if (SUCCEEDED (IDirect3DSurface9_GetDC (src->surface, &memDC))) { HCURSOR cursor = CopyImage (ci.hCursor, IMAGE_CURSOR, 0, 0, LR_MONOCHROME | LR_DEFAULTSIZE); DrawIcon (memDC, ci.ptScreenPos.x - ii.xHotspot - src->monitor_info.rcMonitor.left, ci.ptScreenPos.y - ii.yHotspot - src->monitor_info.rcMonitor.top, cursor); DestroyCursor (cursor); IDirect3DSurface9_ReleaseDC (src->surface, memDC); } DeleteObject (ii.hbmColor); DeleteObject (ii.hbmMask); } } hres = IDirect3DSurface9_LockRect (src->surface, &locked_rect, &(src->src_rect), D3DLOCK_NO_DIRTY_UPDATE | D3DLOCK_NOSYSLOCK | D3DLOCK_READONLY); if (FAILED (hres)) { GST_DEBUG_OBJECT (src, "DirectX::LockRect failed."); return GST_FLOW_ERROR; } new_buf = gst_buffer_new_and_alloc (new_buf_size); gst_buffer_map (new_buf, &map, GST_MAP_WRITE); p_dst = (LPBYTE) map.data; p_src = (LPBYTE) locked_rect.pBits; stride = width * 4; for (i = 0; i < height; ++i) { memcpy (p_dst, p_src, stride); p_dst += stride; p_src += locked_rect.Pitch; } gst_buffer_unmap (new_buf, &map); /* Unlock copy of front buffer */ IDirect3DSurface9_UnlockRect (src->surface); GST_BUFFER_TIMESTAMP (new_buf) = buf_time; GST_BUFFER_DURATION (new_buf) = buf_dur; if (clock != NULL) gst_object_unref (clock); *buf = new_buf; return GST_FLOW_OK; }
static GstFlowReturn gst_fake_src_create (GstBaseSrc * basesrc, guint64 offset, guint length, GstBuffer ** ret) { GstFakeSrc *src; GstBuffer *buf; GstClockTime time; gsize size; src = GST_FAKE_SRC (basesrc); buf = gst_fake_src_create_buffer (src, &size); GST_BUFFER_OFFSET (buf) = offset; if (src->datarate > 0) { time = (src->bytes_sent * GST_SECOND) / src->datarate; GST_BUFFER_DURATION (buf) = size * GST_SECOND / src->datarate; } else if (gst_base_src_is_live (basesrc)) { GstClock *clock; clock = gst_element_get_clock (GST_ELEMENT (src)); if (clock) { time = gst_clock_get_time (clock); time -= gst_element_get_base_time (GST_ELEMENT (src)); gst_object_unref (clock); } else { /* not an error not to have a clock */ time = GST_CLOCK_TIME_NONE; } } else { time = GST_CLOCK_TIME_NONE; } GST_BUFFER_DTS (buf) = time; GST_BUFFER_PTS (buf) = time; if (!src->silent) { gchar dts_str[64], pts_str[64], dur_str[64]; gchar flag_str[100]; GST_OBJECT_LOCK (src); g_free (src->last_message); if (GST_BUFFER_DTS (buf) != GST_CLOCK_TIME_NONE) { g_snprintf (dts_str, sizeof (dts_str), "%" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_DTS (buf))); } else { g_strlcpy (dts_str, "none", sizeof (dts_str)); } if (GST_BUFFER_PTS (buf) != GST_CLOCK_TIME_NONE) { g_snprintf (pts_str, sizeof (pts_str), "%" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_PTS (buf))); } else { g_strlcpy (pts_str, "none", sizeof (pts_str)); } if (GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) { g_snprintf (dur_str, sizeof (dur_str), "%" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); } else { g_strlcpy (dur_str, "none", sizeof (dur_str)); } { const char *flag_list[15] = { "", "", "", "", "live", "decode-only", "discont", "resync", "corrupted", "marker", "header", "gap", "droppable", "delta-unit", "in-caps" }; int i; char *end = flag_str; end[0] = '\0'; for (i = 0; i < G_N_ELEMENTS (flag_list); i++) { if (GST_MINI_OBJECT_CAST (buf)->flags & (1 << i)) { strcpy (end, flag_list[i]); end += strlen (end); end[0] = ' '; end[1] = '\0'; end++; } } } src->last_message = g_strdup_printf ("create ******* (%s:%s) (%u bytes, dts: %s, pts:%s" ", duration: %s, offset: %" G_GINT64_FORMAT ", offset_end: %" G_GINT64_FORMAT ", flags: %08x %s) %p", GST_DEBUG_PAD_NAME (GST_BASE_SRC_CAST (src)->srcpad), (guint) size, dts_str, pts_str, dur_str, GST_BUFFER_OFFSET (buf), GST_BUFFER_OFFSET_END (buf), GST_MINI_OBJECT_CAST (buf)->flags, flag_str, buf); GST_OBJECT_UNLOCK (src); g_object_notify_by_pspec ((GObject *) src, pspec_last_message); } if (src->signal_handoffs) { GST_LOG_OBJECT (src, "pre handoff emit"); g_signal_emit (src, gst_fake_src_signals[SIGNAL_HANDOFF], 0, buf, basesrc->srcpad); GST_LOG_OBJECT (src, "post handoff emit"); } src->bytes_sent += size; *ret = buf; return GST_FLOW_OK; }
static GstFlowReturn gst_base_audio_src_create (GstBaseSrc * bsrc, guint64 offset, guint length, GstBuffer ** outbuf) { GstBaseAudioSrc *src = GST_BASE_AUDIO_SRC (bsrc); GstBuffer *buf; guchar *data; guint samples, total_samples; guint64 sample; gint bps; GstRingBuffer *ringbuffer; GstRingBufferSpec *spec; guint read; GstClockTime timestamp, duration; GstClock *clock; ringbuffer = src->ringbuffer; spec = &ringbuffer->spec; if (G_UNLIKELY (!gst_ring_buffer_is_acquired (ringbuffer))) goto wrong_state; bps = spec->bytes_per_sample; if ((length == 0 && bsrc->blocksize == 0) || length == -1) /* no length given, use the default segment size */ length = spec->segsize; else /* make sure we round down to an integral number of samples */ length -= length % bps; /* figure out the offset in the ringbuffer */ if (G_UNLIKELY (offset != -1)) { sample = offset / bps; /* if a specific offset was given it must be the next sequential * offset we expect or we fail for now. */ if (src->next_sample != -1 && sample != src->next_sample) goto wrong_offset; } else { /* calculate the sequentially next sample we need to read. This can jump and * create a DISCONT. */ sample = gst_base_audio_src_get_offset (src); } GST_DEBUG_OBJECT (src, "reading from sample %" G_GUINT64_FORMAT, sample); /* get the number of samples to read */ total_samples = samples = length / bps; /* FIXME, using a bufferpool would be nice here */ buf = gst_buffer_new_and_alloc (length); data = GST_BUFFER_DATA (buf); do { read = gst_ring_buffer_read (ringbuffer, sample, data, samples); GST_DEBUG_OBJECT (src, "read %u of %u", read, samples); /* if we read all, we're done */ if (read == samples) break; /* else something interrupted us and we wait for playing again. */ GST_DEBUG_OBJECT (src, "wait playing"); if (gst_base_src_wait_playing (bsrc) != GST_FLOW_OK) goto stopped; GST_DEBUG_OBJECT (src, "continue playing"); /* read next samples */ sample += read; samples -= read; data += read * bps; } while (TRUE); /* mark discontinuity if needed */ if (G_UNLIKELY (sample != src->next_sample) && src->next_sample != -1) { GST_WARNING_OBJECT (src, "create DISCONT of %" G_GUINT64_FORMAT " samples at sample %" G_GUINT64_FORMAT, sample - src->next_sample, sample); GST_ELEMENT_WARNING (src, CORE, CLOCK, (_("Can't record audio fast enough")), ("dropped %" G_GUINT64_FORMAT " samples", sample - src->next_sample)); GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); } src->next_sample = sample + samples; /* get the normal timestamp to get the duration. */ timestamp = gst_util_uint64_scale_int (sample, GST_SECOND, spec->rate); duration = gst_util_uint64_scale_int (src->next_sample, GST_SECOND, spec->rate) - timestamp; GST_OBJECT_LOCK (src); if (!(clock = GST_ELEMENT_CLOCK (src))) goto no_sync; if (clock != src->clock) { /* we are slaved, check how to handle this */ switch (src->priv->slave_method) { case GST_BASE_AUDIO_SRC_SLAVE_RESAMPLE: /* not implemented, use skew algorithm. This algorithm should * work on the readout pointer and produces more or less samples based * on the clock drift */ case GST_BASE_AUDIO_SRC_SLAVE_SKEW: { GstClockTime running_time; GstClockTime base_time; GstClockTime current_time; guint64 running_time_sample; gint running_time_segment; gint current_segment; gint segment_skew; gint sps; /* samples per segment */ sps = ringbuffer->samples_per_seg; /* get the current time */ current_time = gst_clock_get_time (clock); /* get the basetime */ base_time = GST_ELEMENT_CAST (src)->base_time; /* get the running_time */ running_time = current_time - base_time; /* the running_time converted to a sample (relative to the ringbuffer) */ running_time_sample = gst_util_uint64_scale_int (running_time, spec->rate, GST_SECOND); /* the segmentnr corrensponding to running_time, round down */ running_time_segment = running_time_sample / sps; /* the segment currently read from the ringbuffer */ current_segment = sample / sps; /* the skew we have between running_time and the ringbuffertime */ segment_skew = running_time_segment - current_segment; GST_DEBUG_OBJECT (bsrc, "\n running_time = %" GST_TIME_FORMAT "\n timestamp = %" GST_TIME_FORMAT "\n running_time_segment = %d" "\n current_segment = %d" "\n segment_skew = %d", GST_TIME_ARGS (running_time), GST_TIME_ARGS (timestamp), running_time_segment, current_segment, segment_skew); /* Resync the ringbuffer if: * 1. We get one segment into the future. * This is clearly a lie, because we can't * possibly have a buffer with timestamp 1 at * time 0. (unless it has time-travelled...) * * 2. We are more than the length of the ringbuffer behind. * The length of the ringbuffer then gets to dictate * the threshold for what is concidered "too late" * * 3. If this is our first buffer. * We know that we should catch up to running_time * the first time we are ran. */ if ((segment_skew < 0) || (segment_skew >= ringbuffer->spec.segtotal) || (current_segment == 0)) { gint segments_written; gint first_segment; gint last_segment; gint new_last_segment; gint segment_diff; gint new_first_segment; guint64 new_sample; /* we are going to say that the last segment was captured at the current time (running_time), minus one segment of creation-latency in the ringbuffer. This can be thought of as: The segment arrived in the ringbuffer at time X, and that means it was created at time X - (one segment). */ new_last_segment = running_time_segment - 1; /* for better readablity */ first_segment = current_segment; /* get the amount of segments written from the device by now */ segments_written = g_atomic_int_get (&ringbuffer->segdone); /* subtract the base to segments_written to get the number of the last written segment in the ringbuffer (one segment written = segment 0) */ last_segment = segments_written - ringbuffer->segbase - 1; /* we see how many segments the ringbuffer was timeshifted */ segment_diff = new_last_segment - last_segment; /* we move the first segment an equal amount */ new_first_segment = first_segment + segment_diff; /* and we also move the segmentbase the same amount */ ringbuffer->segbase -= segment_diff; /* we calculate the new sample value */ new_sample = ((guint64) new_first_segment) * sps; /* and get the relative time to this -> our new timestamp */ timestamp = gst_util_uint64_scale_int (new_sample, GST_SECOND, spec->rate); /* we update the next sample accordingly */ src->next_sample = new_sample + samples; GST_DEBUG_OBJECT (bsrc, "Timeshifted the ringbuffer with %d segments: " "Updating the timestamp to %" GST_TIME_FORMAT ", " "and src->next_sample to %" G_GUINT64_FORMAT, segment_diff, GST_TIME_ARGS (timestamp), src->next_sample); } break; } case GST_BASE_AUDIO_SRC_SLAVE_RETIMESTAMP: { GstClockTime base_time, latency; /* We are slaved to another clock, take running time of the pipeline clock and * timestamp against it. Somebody else in the pipeline should figure out the * clock drift. We keep the duration we calculated above. */ timestamp = gst_clock_get_time (clock); base_time = GST_ELEMENT_CAST (src)->base_time; if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; /* subtract latency */ latency = gst_util_uint64_scale_int (total_samples, GST_SECOND, spec->rate); if (timestamp > latency) timestamp -= latency; else timestamp = 0; } case GST_BASE_AUDIO_SRC_SLAVE_NONE: break; } } else { GstClockTime base_time; /* we are not slaved, subtract base_time */ base_time = GST_ELEMENT_CAST (src)->base_time; if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; } no_sync: GST_OBJECT_UNLOCK (src); GST_BUFFER_TIMESTAMP (buf) = timestamp; GST_BUFFER_DURATION (buf) = duration; GST_BUFFER_OFFSET (buf) = sample; GST_BUFFER_OFFSET_END (buf) = sample + samples; *outbuf = buf; return GST_FLOW_OK; /* ERRORS */ wrong_state: { GST_DEBUG_OBJECT (src, "ringbuffer in wrong state"); return GST_FLOW_WRONG_STATE; } wrong_offset: { GST_ELEMENT_ERROR (src, RESOURCE, SEEK, (NULL), ("resource can only be operated on sequentially but offset %" G_GUINT64_FORMAT " was given", offset)); return GST_FLOW_ERROR; } stopped: { gst_buffer_unref (buf); GST_DEBUG_OBJECT (src, "ringbuffer stopped"); return GST_FLOW_WRONG_STATE; } }
static GstFlowReturn _chain (GstPad * pad, GstObject * object, GstBuffer * buffer) { GstBuffer *actual_buf = buffer; GstAggregator *self = GST_AGGREGATOR (object); GstAggregatorPrivate *priv = self->priv; GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad); GstAggregatorClass *aggclass = GST_AGGREGATOR_GET_CLASS (object); GstClockTime timeout = gst_aggregator_get_timeout (self); GstClockTime now; GST_DEBUG_OBJECT (aggpad, "Start chaining a buffer %" GST_PTR_FORMAT, buffer); if (aggpad->priv->timeout_id) { gst_clock_id_unschedule (aggpad->priv->timeout_id); gst_clock_id_unref (aggpad->priv->timeout_id); aggpad->priv->timeout_id = NULL; } g_atomic_int_set (&aggpad->unresponsive, FALSE); PAD_STREAM_LOCK (aggpad); if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE) goto flushing; if (g_atomic_int_get (&aggpad->priv->pending_eos) == TRUE) goto eos; PAD_LOCK_EVENT (aggpad); if (aggpad->buffer) { GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed"); PAD_WAIT_EVENT (aggpad); } PAD_UNLOCK_EVENT (aggpad); if (g_atomic_int_get (&aggpad->priv->flushing) == TRUE) goto flushing; if (aggclass->clip) { aggclass->clip (self, aggpad, buffer, &actual_buf); } PAD_LOCK_EVENT (aggpad); if (aggpad->buffer) gst_buffer_unref (aggpad->buffer); aggpad->buffer = actual_buf; PAD_UNLOCK_EVENT (aggpad); PAD_STREAM_UNLOCK (aggpad); QUEUE_PUSH (self); if (GST_CLOCK_TIME_IS_VALID (timeout)) { now = gst_clock_get_time (self->clock); aggpad->priv->timeout_id = gst_clock_new_single_shot_id (self->clock, now + timeout); gst_clock_id_wait_async (aggpad->priv->timeout_id, _unresponsive_timeout, gst_object_ref (aggpad), gst_object_unref); } GST_DEBUG_OBJECT (aggpad, "Done chaining"); return priv->flow_return; flushing: PAD_STREAM_UNLOCK (aggpad); gst_buffer_unref (buffer); GST_DEBUG_OBJECT (aggpad, "We are flushing"); return GST_FLOW_FLUSHING; eos: PAD_STREAM_UNLOCK (aggpad); gst_buffer_unref (buffer); GST_DEBUG_OBJECT (pad, "We are EOS already..."); return GST_FLOW_EOS; }
/** * job_initialize: * @job: (in): the job to be initialized. * @daemon: (in): is gstreamill run in background. * * Initialize the output of the job, the output of the job include the status of source and encoders and * the output stream. * * Returns: 0 on success. */ gint job_initialize (Job *job, gboolean daemon) { gint i, fd; JobOutput *output; gchar *name, *p, *name_hexstr, *semaphore_name; struct timespec ts; sem_t *semaphore; job->output_size = status_output_size (job->description); name_hexstr = unicode_file_name_2_shm_name (job->name); semaphore_name = g_strdup_printf ("/%s", name_hexstr); semaphore = sem_open (semaphore_name, O_CREAT, 0644, 1); if (semaphore == SEM_FAILED) { GST_ERROR ("open semaphore failed: %s", g_strerror (errno)); g_free (semaphore_name); return 1; } if (clock_gettime (CLOCK_REALTIME, &ts) == -1) { GST_ERROR ("clock_gettime error: %s", g_strerror (errno)); g_free (semaphore_name); return 1; } ts.tv_sec += 2; while (sem_timedwait (semaphore, &ts) == -1) { if (errno == EINTR) { continue; } GST_ERROR ("sem_timedwait failure: %s", g_strerror (errno)); g_free (semaphore_name); return 1; } if (daemon) { /* daemon, use share memory */ fd = shm_open (name_hexstr, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); if (fd == -1) { GST_ERROR ("shm_open %s failure: %s", name_hexstr, g_strerror (errno)); job->output = NULL; g_free (name_hexstr); sem_post (semaphore); return 1; } g_free (name_hexstr); if (ftruncate (fd, job->output_size) == -1) { GST_ERROR ("ftruncate error: %s", g_strerror (errno)); job->output = NULL; sem_post (semaphore); return 1; } p = mmap (NULL, job->output_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); job->output_fd = fd; } else { p = g_malloc (job->output_size); job->output_fd = -1; } output = (JobOutput *)g_malloc (sizeof (JobOutput)); output->job_description = (gchar *)p; output->semaphore = semaphore; output->semaphore_name = semaphore_name; g_stpcpy (output->job_description, job->description); p += (strlen (job->description) / 8 + 1) * 8; output->state = (guint64 *)p; p += sizeof (guint64); /* state */ output->source.duration = (gint64 *)p; p += sizeof (gint64); /* duration for transcode */ output->source.sync_error_times = 0; output->source.stream_count = jobdesc_streams_count (job->description, "source"); output->source.streams = (struct _SourceStreamState *)p; for (i = 0; i < output->source.stream_count; i++) { output->source.streams[i].last_heartbeat = gst_clock_get_time (job->system_clock); } p += output->source.stream_count * sizeof (struct _SourceStreamState); output->encoder_count = jobdesc_encoders_count (job->description); if (output->encoder_count == 0) { GST_ERROR ("Invalid job without encoders, initialize job failure"); sem_post (semaphore); return 1; } output->encoders = (struct _EncoderOutput *)g_malloc (output->encoder_count * sizeof (struct _EncoderOutput)); for (i = 0; i < output->encoder_count; i++) { name = g_strdup_printf ("%s.encoder.%d", job->name, i); g_strlcpy (output->encoders[i].name, name, STREAM_NAME_LEN); g_free (name); name = g_strdup_printf ("encoder.%d", i); output->encoders[i].stream_count = jobdesc_streams_count (job->description, name); g_free (name); output->encoders[i].semaphore = output->semaphore; output->encoders[i].heartbeat = (GstClockTime *)p; p += sizeof (GstClockTime); /* encoder heartbeat */ output->encoders[i].eos = (gboolean *)p; p += sizeof (gboolean); output->encoders[i].streams = (struct _EncoderStreamState *)p; p += output->encoders[i].stream_count * sizeof (struct _EncoderStreamState); /* encoder state */ output->encoders[i].total_count = (guint64 *)p; p += sizeof (guint64); /* total count size */ /* non live job has no output */ if (!job->is_live) { continue; } output->encoders[i].is_first_buffer = TRUE; output->encoders[i].cache_addr = p; p += SHM_SIZE; output->encoders[i].cache_size = SHM_SIZE; output->encoders[i].head_addr = (guint64 *)p; p += sizeof (guint64); /* cache head */ output->encoders[i].tail_addr = (guint64 *)p; p += sizeof (guint64); /* cache tail */ output->encoders[i].last_rap_addr = (guint64 *)p; p += sizeof (guint64); /* last rap addr */ } job->output = output; sem_post (semaphore); return 0; }
static GstFlowReturn gst_gdiscreencapsrc_create (GstPushSrc * push_src, GstBuffer ** buf) { GstGDIScreenCapSrc *src = GST_GDISCREENCAPSRC (push_src); GstBuffer *new_buf; GstFlowReturn res; gint new_buf_size; GstClock *clock; GstClockTime time; GstClockTime base_time; if (G_UNLIKELY (!src->info.bmiHeader.biWidth || !src->info.bmiHeader.biHeight)) { GST_ELEMENT_ERROR (src, CORE, NEGOTIATION, (NULL), ("format wasn't negotiated before create function")); return GST_FLOW_NOT_NEGOTIATED; } else if (G_UNLIKELY (src->rate_numerator == 0 && src->frames == 1)) { GST_DEBUG_OBJECT (src, "eos: 0 framerate, frame %d", (gint) src->frames); return GST_FLOW_UNEXPECTED; } new_buf_size = GST_ROUND_UP_4 (src->info.bmiHeader.biWidth * 3) * (-src->info.bmiHeader.biHeight); GST_LOG_OBJECT (src, "creating buffer of %lu bytes with %dx%d image for frame %d", new_buf_size, src->info.bmiHeader.biWidth, -src->info.bmiHeader.biHeight, (gint) src->frames); res = gst_pad_alloc_buffer_and_set_caps (GST_BASE_SRC_PAD (src), GST_BUFFER_OFFSET_NONE, new_buf_size, GST_PAD_CAPS (GST_BASE_SRC_PAD (push_src)), &new_buf); if (res != GST_FLOW_OK) { GST_DEBUG_OBJECT (src, "could not allocate buffer, reason %s", gst_flow_get_name (res)); return res; } clock = gst_element_get_clock (GST_ELEMENT (src)); if (clock) { /* Calculate sync time. */ GstClockTime frame_time = gst_util_uint64_scale_int (src->frames * GST_SECOND, src->rate_denominator, src->rate_numerator); time = gst_clock_get_time (clock); base_time = gst_element_get_base_time (GST_ELEMENT (src)); GST_BUFFER_TIMESTAMP (new_buf) = MAX (time - base_time, frame_time); } else { GST_BUFFER_TIMESTAMP (new_buf) = GST_CLOCK_TIME_NONE; } /* Do screen capture and put it into buffer... */ gst_gdiscreencapsrc_screen_capture (src, new_buf); if (src->rate_numerator) { GST_BUFFER_DURATION (new_buf) = gst_util_uint64_scale_int (GST_SECOND, src->rate_denominator, src->rate_numerator); if (clock) { GST_BUFFER_DURATION (new_buf) = MAX (GST_BUFFER_DURATION (new_buf), gst_clock_get_time (clock) - time); } } else { /* NONE means forever */ GST_BUFFER_DURATION (new_buf) = GST_CLOCK_TIME_NONE; } GST_BUFFER_OFFSET (new_buf) = src->frames; src->frames++; GST_BUFFER_OFFSET_END (new_buf) = src->frames; gst_object_unref (clock); *buf = new_buf; return GST_FLOW_OK; }
static void gst_frame_store_task (GstPad *pad) { GstFrameStore *fs; GstBuffer *buffer; GstEvent *event = NULL; fs = GST_FRAME_STORE (gst_pad_get_parent (pad)); GST_DEBUG("task"); g_mutex_lock (fs->lock); while(1) { if (fs->stepping == FALSE || (fs->frame_number != fs->pushed_frame_number)) { buffer = gst_frame_store_get_frame (fs, fs->frame_number); } if (buffer) break; g_cond_wait (fs->cond, fs->lock); } if (fs->need_newsegment) { GstClock *clock; GstClockTime now; GstClockTime stream_time; clock = GST_ELEMENT_CLOCK (fs); if (clock == NULL) { now = 0; stream_time = 0; } else { now = gst_clock_get_time (GST_ELEMENT_CLOCK (fs)); stream_time = now - GST_ELEMENT(fs)->base_time; } GST_ERROR("now %lld buffer %lld stream_time %lld", now, GST_BUFFER_TIMESTAMP(buffer), stream_time); stream_time = GST_SECOND*10; event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP(buffer), -1, stream_time); fs->need_newsegment = FALSE; } if (fs->stepping) { buffer = gst_buffer_make_metadata_writable (buffer); GST_BUFFER_TIMESTAMP(buffer) = -1; GST_BUFFER_DURATION(buffer) = -1; } fs->pushed_frame_number = fs->frame_number; if (!fs->stepping) { fs->frame_number++; } if (fs->frame_number + 1 >= fs->range_offset + fs->range_size) { gst_frame_store_advance (fs); } g_mutex_unlock (fs->lock); if (event) { gst_pad_push_event (fs->srcpad, event); } gst_pad_push (fs->srcpad, buffer); GST_DEBUG("task done"); gst_object_unref (fs); }
static void gst_imx_v4l2src_af_check_status(GstImxV4l2VideoSrc *v4l2src) { int status; gboolean send_message; GstPhotographyFocusStatus message_status; gboolean schedule_recheck; if (v4l2_g_ctrl(v4l2src, V4L2_CID_AUTO_FOCUS_STATUS, &status) < 0) goto none; switch (status) { case V4L2_AUTO_FOCUS_STATUS_IDLE: default: none: send_message = TRUE; message_status = GST_PHOTOGRAPHY_FOCUS_STATUS_NONE; schedule_recheck = FALSE; break; case V4L2_AUTO_FOCUS_STATUS_BUSY: send_message = FALSE; schedule_recheck = TRUE; break; case V4L2_AUTO_FOCUS_STATUS_REACHED: send_message = TRUE; message_status = GST_PHOTOGRAPHY_FOCUS_STATUS_SUCCESS; schedule_recheck = FALSE; break; case V4L2_AUTO_FOCUS_STATUS_FAILED: send_message = TRUE; message_status = GST_PHOTOGRAPHY_FOCUS_STATUS_FAIL; schedule_recheck = FALSE; break; } if (send_message) { GstStructure *s; GstMessage *m; s = gst_structure_new(GST_PHOTOGRAPHY_AUTOFOCUS_DONE, "status", G_TYPE_INT, message_status, NULL); m = gst_message_new_custom(GST_MESSAGE_ELEMENT, GST_OBJECT(v4l2src), s); if (!gst_element_post_message(GST_ELEMENT(v4l2src), m)) GST_ERROR_OBJECT(v4l2src, "failed to post message"); } if (schedule_recheck) { GstClock *c; GstClockTime t; c = gst_system_clock_obtain(); t = gst_clock_get_time(c) + 50 * GST_MSECOND; v4l2src->af_clock_id = gst_clock_new_single_shot_id(c, t); gst_object_unref(c); if (gst_clock_id_wait_async(v4l2src->af_clock_id, gst_imx_v4l2src_af_status_cb, v4l2src, NULL) != GST_CLOCK_OK) GST_ERROR_OBJECT(v4l2src, "failed to schedule recheck"); } }
static GstFlowReturn gst_gdiscreencapsrc_create (GstPushSrc * push_src, GstBuffer ** buf) { GstGDIScreenCapSrc *src = GST_GDISCREENCAPSRC (push_src); GstBuffer *new_buf; gint new_buf_size; GstClock *clock; GstClockTime buf_time, buf_dur; guint64 frame_number; if (G_UNLIKELY (!src->info.bmiHeader.biWidth || !src->info.bmiHeader.biHeight)) { GST_ELEMENT_ERROR (src, CORE, NEGOTIATION, (NULL), ("format wasn't negotiated before create function")); return GST_FLOW_NOT_NEGOTIATED; } new_buf_size = GST_ROUND_UP_4 (src->info.bmiHeader.biWidth * 3) * (-src->info.bmiHeader.biHeight); GST_LOG_OBJECT (src, "creating buffer of %d bytes with %dx%d image", new_buf_size, (gint) src->info.bmiHeader.biWidth, (gint) (-src->info.bmiHeader.biHeight)); new_buf = gst_buffer_new_and_alloc (new_buf_size); clock = gst_element_get_clock (GST_ELEMENT (src)); if (clock != NULL) { GstClockTime time, base_time; /* Calculate sync time. */ time = gst_clock_get_time (clock); base_time = gst_element_get_base_time (GST_ELEMENT (src)); buf_time = time - base_time; if (src->rate_numerator) { frame_number = gst_util_uint64_scale (buf_time, src->rate_numerator, GST_SECOND * src->rate_denominator); } else { frame_number = -1; } } else { buf_time = GST_CLOCK_TIME_NONE; frame_number = -1; } if (frame_number != -1 && frame_number == src->frame_number) { GstClockID id; GstClockReturn ret; /* Need to wait for the next frame */ frame_number += 1; /* Figure out what the next frame time is */ buf_time = gst_util_uint64_scale (frame_number, src->rate_denominator * GST_SECOND, src->rate_numerator); id = gst_clock_new_single_shot_id (clock, buf_time + gst_element_get_base_time (GST_ELEMENT (src))); GST_OBJECT_LOCK (src); src->clock_id = id; GST_OBJECT_UNLOCK (src); GST_DEBUG_OBJECT (src, "Waiting for next frame time %" G_GUINT64_FORMAT, buf_time); ret = gst_clock_id_wait (id, NULL); GST_OBJECT_LOCK (src); gst_clock_id_unref (id); src->clock_id = NULL; if (ret == GST_CLOCK_UNSCHEDULED) { /* Got woken up by the unlock function */ GST_OBJECT_UNLOCK (src); return GST_FLOW_FLUSHING; } GST_OBJECT_UNLOCK (src); /* Duration is a complete 1/fps frame duration */ buf_dur = gst_util_uint64_scale_int (GST_SECOND, src->rate_denominator, src->rate_numerator); } else if (frame_number != -1) { GstClockTime next_buf_time; GST_DEBUG_OBJECT (src, "No need to wait for next frame time %" G_GUINT64_FORMAT " next frame = %" G_GINT64_FORMAT " prev = %" G_GINT64_FORMAT, buf_time, frame_number, src->frame_number); next_buf_time = gst_util_uint64_scale (frame_number + 1, src->rate_denominator * GST_SECOND, src->rate_numerator); /* Frame duration is from now until the next expected capture time */ buf_dur = next_buf_time - buf_time; } else { buf_dur = GST_CLOCK_TIME_NONE; } src->frame_number = frame_number; GST_BUFFER_TIMESTAMP (new_buf) = buf_time; GST_BUFFER_DURATION (new_buf) = buf_dur; /* Do screen capture and put it into buffer... */ gst_gdiscreencapsrc_screen_capture (src, new_buf); gst_object_unref (clock); *buf = new_buf; return GST_FLOW_OK; }
void test_functioning() { GstNetTimeProvider *ntp; GstNetTimePacket *packet; GstClock *clock; GstClockTime local; struct sockaddr_in servaddr; gint port = -1, sockfd, ret; socklen_t len; xmlfile = "test_functioning"; std_log(LOG_FILENAME_LINE, "Test Started test_functioning"); clock = gst_system_clock_obtain (); fail_unless (clock != NULL, "failed to get system clock"); ntp = gst_net_time_provider_new (clock, "127.0.0.1", 0); fail_unless (ntp != NULL, "failed to create net time provider"); g_object_get (ntp, "port", &port, NULL); fail_unless (port > 0); sockfd = socket (AF_INET, SOCK_DGRAM, 0); fail_if (sockfd < 0, "socket failed"); memset (&servaddr, 0, sizeof (servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_port = htons (port); inet_aton ("127.0.0.1", &servaddr.sin_addr); packet = gst_net_time_packet_new (NULL); fail_unless (packet != NULL, "failed to create packet"); packet->local_time = local = gst_clock_get_time (clock); len = sizeof (servaddr); ret = gst_net_time_packet_send (packet, sockfd, (struct sockaddr *) &servaddr, len); fail_unless (ret == GST_NET_TIME_PACKET_SIZE, "failed to send packet"); g_free (packet); packet = gst_net_time_packet_receive (sockfd, (struct sockaddr *) &servaddr, &len); fail_unless (packet != NULL, "failed to receive packet"); // fail_unless (packet->local_time == local, "local time is not the same"); //local time has changed fail_unless (packet->remote_time > local, "remote time not after local time"); fail_unless (packet->remote_time < gst_clock_get_time (clock), "remote time in the future"); g_free (packet); close (sockfd); //gst_object_unref (ntp); //thread is blocking gst_object_unref (clock); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
static GstFlowReturn gst_decklink_video_sink_prepare (GstBaseSink * bsink, GstBuffer * buffer) { GstDecklinkVideoSink *self = GST_DECKLINK_VIDEO_SINK_CAST (bsink); GstVideoFrame vframe; IDeckLinkMutableVideoFrame *frame; guint8 *outdata, *indata; GstFlowReturn flow_ret; HRESULT ret; GstClockTime timestamp, duration; GstClockTime running_time, running_time_duration; gint i; GstClock *clock; GST_DEBUG_OBJECT (self, "Preparing buffer %p", buffer); // FIXME: Handle no timestamps if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) { return GST_FLOW_ERROR; } timestamp = GST_BUFFER_TIMESTAMP (buffer); duration = GST_BUFFER_DURATION (buffer); if (duration == GST_CLOCK_TIME_NONE) { duration = gst_util_uint64_scale_int (GST_SECOND, self->info.fps_d, self->info.fps_n); } running_time = gst_segment_to_running_time (&GST_BASE_SINK_CAST (self)->segment, GST_FORMAT_TIME, timestamp); running_time_duration = gst_segment_to_running_time (&GST_BASE_SINK_CAST (self)->segment, GST_FORMAT_TIME, timestamp + duration) - running_time; // FIXME: https://bugzilla.gnome.org/show_bug.cgi?id=742916 // We need to drop late buffers here immediately instead of // potentially overflowing the internal queue of the hardware clock = gst_element_get_clock (GST_ELEMENT_CAST (self)); if (clock) { GstClockTime clock_running_time, base_time, clock_time, latency, max_lateness; base_time = gst_element_get_base_time (GST_ELEMENT_CAST (self)); clock_time = gst_clock_get_time (clock); if (base_time != GST_CLOCK_TIME_NONE && clock_time != GST_CLOCK_TIME_NONE) { clock_running_time = clock_time - base_time; latency = gst_base_sink_get_latency (GST_BASE_SINK_CAST (self)); max_lateness = gst_base_sink_get_max_lateness (GST_BASE_SINK_CAST (self)); if (clock_running_time > running_time + running_time_duration + latency + max_lateness) { GST_DEBUG_OBJECT (self, "Late buffer: %" GST_TIME_FORMAT " > %" GST_TIME_FORMAT, GST_TIME_ARGS (clock_running_time), GST_TIME_ARGS (running_time + running_time_duration)); if (self->last_render_time == GST_CLOCK_TIME_NONE || (self->last_render_time < clock_running_time && clock_running_time - self->last_render_time >= GST_SECOND)) { GST_DEBUG_OBJECT (self, "Rendering frame nonetheless because we had none for more than 1s"); running_time = clock_running_time; running_time_duration = 0; } else { GST_WARNING_OBJECT (self, "Dropping frame"); gst_object_unref (clock); return GST_FLOW_OK; } } } gst_object_unref (clock); } self->last_render_time = running_time; ret = self->output->output->CreateVideoFrame (self->info.width, self->info.height, self->info.stride[0], bmdFormat8BitYUV, bmdFrameFlagDefault, &frame); if (ret != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to create video frame: 0x%08x", ret)); return GST_FLOW_ERROR; } if (!gst_video_frame_map (&vframe, &self->info, buffer, GST_MAP_READ)) { GST_ERROR_OBJECT (self, "Failed to map video frame"); flow_ret = GST_FLOW_ERROR; goto out; } frame->GetBytes ((void **) &outdata); indata = (guint8 *) GST_VIDEO_FRAME_PLANE_DATA (&vframe, 0); for (i = 0; i < self->info.height; i++) { memcpy (outdata, indata, GST_VIDEO_FRAME_WIDTH (&vframe) * 2); indata += GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, 0); outdata += frame->GetRowBytes (); } gst_video_frame_unmap (&vframe); convert_to_internal_clock (self, &running_time, &running_time_duration); GST_LOG_OBJECT (self, "Scheduling video frame %p at %" GST_TIME_FORMAT " with duration %" GST_TIME_FORMAT, frame, GST_TIME_ARGS (running_time), GST_TIME_ARGS (running_time_duration)); ret = self->output->output->ScheduleVideoFrame (frame, running_time, running_time_duration, GST_SECOND); if (ret != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to schedule frame: 0x%08x", ret)); flow_ret = GST_FLOW_ERROR; goto out; } flow_ret = GST_FLOW_OK; out: frame->Release (); return flow_ret; }
static GstFlowReturn new_sample_cb (GstElement * appsink, gpointer user_data) { GstElement *appsrc = GST_ELEMENT (user_data); GstFlowReturn ret; GstSample *sample; GstBuffer *buffer; GstClockTime *base_time; GstPad *src, *sink; g_signal_emit_by_name (appsink, "pull-sample", &sample); if (sample == NULL) return GST_FLOW_OK; buffer = gst_sample_get_buffer (sample); if (buffer == NULL) { ret = GST_FLOW_OK; goto end; } gst_buffer_ref (buffer); buffer = gst_buffer_make_writable (buffer); KMS_ELEMENT_LOCK (GST_OBJECT_PARENT (appsrc)); base_time = g_object_get_data (G_OBJECT (GST_OBJECT_PARENT (appsrc)), BASE_TIME_DATA); if (base_time == NULL) { GstClock *clock; clock = gst_element_get_clock (appsrc); base_time = g_slice_new0 (GstClockTime); g_object_set_data_full (G_OBJECT (GST_OBJECT_PARENT (appsrc)), BASE_TIME_DATA, base_time, release_gst_clock); *base_time = gst_clock_get_time (clock) - gst_element_get_base_time (appsrc); g_object_unref (clock); GST_DEBUG ("Setting base time to: %" G_GUINT64_FORMAT, *base_time); } src = gst_element_get_static_pad (appsrc, "src"); sink = gst_pad_get_peer (src); if (sink != NULL) { if (GST_OBJECT_FLAG_IS_SET (sink, GST_PAD_FLAG_EOS)) { GST_INFO_OBJECT (sink, "Sending flush events"); gst_pad_send_event (sink, gst_event_new_flush_start ()); gst_pad_send_event (sink, gst_event_new_flush_stop (FALSE)); } g_object_unref (sink); } g_object_unref (src); if (GST_BUFFER_PTS_IS_VALID (buffer)) buffer->pts += *base_time; if (GST_BUFFER_DTS_IS_VALID (buffer)) buffer->dts += *base_time; KMS_ELEMENT_UNLOCK (GST_OBJECT_PARENT (appsrc)); // TODO: Do something to fix a possible previous EOS event g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret); gst_buffer_unref (buffer); if (ret != GST_FLOW_OK) { /* something wrong */ GST_ERROR ("Could not send buffer to appsrc %s. Cause: %s", GST_ELEMENT_NAME (appsrc), gst_flow_get_name (ret)); } end: if (sample != NULL) gst_sample_unref (sample); return ret; }
static void gst_decklink_video_sink_start_scheduled_playback (GstElement * element) { GstDecklinkVideoSink *self = GST_DECKLINK_VIDEO_SINK_CAST (element); GstClockTime start_time; HRESULT res; bool active; if (self->output->video_enabled && (!self->output->audiosink || self->output->audio_enabled) && (GST_STATE (self) == GST_STATE_PLAYING || GST_STATE_PENDING (self) == GST_STATE_PLAYING)) { // Need to unlock to get the clock time g_mutex_unlock (&self->output->lock); // FIXME: start time is the same for the complete pipeline, // but what we need here is the start time of this element! start_time = gst_element_get_base_time (element); if (start_time != GST_CLOCK_TIME_NONE) start_time = gst_clock_get_time (GST_ELEMENT_CLOCK (self)) - start_time; // FIXME: This will probably not work if (start_time == GST_CLOCK_TIME_NONE) start_time = 0; // Current times of internal and external clock when we go to // playing. We need this to convert the pipeline running time // to the running time of the hardware // // We can't use the normal base time for the external clock // because we might go to PLAYING later than the pipeline self->internal_base_time = gst_clock_get_internal_time (self->output->clock); self->external_base_time = gst_clock_get_internal_time (GST_ELEMENT_CLOCK (self)); convert_to_internal_clock (self, &start_time, NULL); g_mutex_lock (&self->output->lock); // Check if someone else started in the meantime if (self->output->started) return; active = false; self->output->output->IsScheduledPlaybackRunning (&active); if (active) { GST_DEBUG_OBJECT (self, "Stopping scheduled playback"); self->output->started = FALSE; res = self->output->output->StopScheduledPlayback (0, 0, 0); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to stop scheduled playback: 0x%08x", res)); return; } } GST_DEBUG_OBJECT (self, "Starting scheduled playback at %" GST_TIME_FORMAT, GST_TIME_ARGS (start_time)); res = self->output->output->StartScheduledPlayback (start_time, GST_SECOND, 1.0); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to start scheduled playback: 0x%08x", res)); return; } self->output->started = TRUE; self->output->clock_restart = TRUE; } else { GST_DEBUG_OBJECT (self, "Not starting scheduled playback yet"); } }
static GstFlowReturn gst_wasapi_src_create (GstPushSrc * src, GstBuffer ** buf) { GstWasapiSrc *self = GST_WASAPI_SRC (src); GstFlowReturn ret = GST_FLOW_OK; GstClock *clock; GstClockTime timestamp, duration = self->period_time; HRESULT hr; gint16 *samples = NULL; guint32 nsamples_read = 0, nsamples; DWORD flags = 0; guint64 devpos; GST_OBJECT_LOCK (self); clock = GST_ELEMENT_CLOCK (self); if (clock != NULL) gst_object_ref (clock); GST_OBJECT_UNLOCK (self); if (clock != NULL && GST_CLOCK_TIME_IS_VALID (self->next_time)) { GstClockID id; id = gst_clock_new_single_shot_id (clock, self->next_time); gst_clock_id_wait (id, NULL); gst_clock_id_unref (id); } do { hr = IAudioCaptureClient_GetBuffer (self->capture_client, (BYTE **) & samples, &nsamples_read, &flags, &devpos, NULL); } while (hr == AUDCLNT_S_BUFFER_EMPTY); if (hr != S_OK) { GST_ERROR_OBJECT (self, "IAudioCaptureClient::GetBuffer () failed: %s", gst_wasapi_util_hresult_to_string (hr)); ret = GST_FLOW_ERROR; goto beach; } if (flags != 0) { GST_WARNING_OBJECT (self, "devpos %" G_GUINT64_FORMAT ": flags=0x%08x", devpos, flags); } /* FIXME: Why do we get 1024 sometimes and not a multiple of * samples_per_buffer? Shouldn't WASAPI provide a DISCONT * flag if we read too slow? */ nsamples = nsamples_read; g_assert (nsamples >= self->samples_per_buffer); if (nsamples > self->samples_per_buffer) { GST_WARNING_OBJECT (self, "devpos %" G_GUINT64_FORMAT ": got %d samples, expected %d, clipping!", devpos, nsamples, self->samples_per_buffer); nsamples = self->samples_per_buffer; } if (clock == NULL || clock == self->clock) { timestamp = gst_util_uint64_scale (devpos, GST_SECOND, self->client_clock_freq); } else { GstClockTime base_time; timestamp = gst_clock_get_time (clock); base_time = GST_ELEMENT_CAST (self)->base_time; if (timestamp > base_time) timestamp -= base_time; else timestamp = 0; if (timestamp > duration) timestamp -= duration; else timestamp = 0; } ret = gst_pad_alloc_buffer_and_set_caps (GST_BASE_SRC_PAD (self), devpos, nsamples * sizeof (gint16), GST_PAD_CAPS (GST_BASE_SRC_PAD (self)), buf); if (ret == GST_FLOW_OK) { guint i; gint16 *dst; GST_BUFFER_OFFSET_END (*buf) = devpos + self->samples_per_buffer; GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = duration; dst = (gint16 *) GST_BUFFER_DATA (*buf); for (i = 0; i < nsamples; i++) { *dst = *samples; samples += 2; dst++; } } hr = IAudioCaptureClient_ReleaseBuffer (self->capture_client, nsamples_read); if (hr != S_OK) { GST_ERROR_OBJECT (self, "IAudioCaptureClient::ReleaseBuffer () failed: %s", gst_wasapi_util_hresult_to_string (hr)); ret = GST_FLOW_ERROR; goto beach; } beach: if (clock != NULL) gst_object_unref (clock); return ret; }
static GstStateChangeReturn gst_decklink_video_sink_change_state (GstElement * element, GstStateChange transition) { GstDecklinkVideoSink *self = GST_DECKLINK_VIDEO_SINK_CAST (element); GstStateChangeReturn ret; switch (transition) { case GST_STATE_CHANGE_READY_TO_PAUSED: g_mutex_lock (&self->output->lock); self->output->clock_start_time = GST_CLOCK_TIME_NONE; self->output->clock_last_time = 0; self->output->clock_offset = 0; g_mutex_unlock (&self->output->lock); gst_element_post_message (element, gst_message_new_clock_provide (GST_OBJECT_CAST (element), self->output->clock, TRUE)); self->last_render_time = GST_CLOCK_TIME_NONE; break; case GST_STATE_CHANGE_PAUSED_TO_PLAYING:{ GstClock *clock, *audio_clock; clock = gst_element_get_clock (GST_ELEMENT_CAST (self)); audio_clock = gst_decklink_output_get_audio_clock (self->output); if (clock && clock != self->output->clock && clock != audio_clock) { gst_clock_set_master (self->output->clock, clock); } if (clock) gst_object_unref (clock); if (audio_clock) gst_object_unref (audio_clock); break; } default: break; } ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); if (ret == GST_STATE_CHANGE_FAILURE) return ret; switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_READY: gst_element_post_message (element, gst_message_new_clock_lost (GST_OBJECT_CAST (element), self->output->clock)); gst_clock_set_master (self->output->clock, NULL); g_mutex_lock (&self->output->lock); self->output->clock_start_time = GST_CLOCK_TIME_NONE; self->output->clock_last_time = 0; self->output->clock_offset = 0; g_mutex_unlock (&self->output->lock); break; case GST_STATE_CHANGE_PLAYING_TO_PAUSED:{ GstClockTime start_time; HRESULT res; // FIXME: start time is the same for the complete pipeline, // but what we need here is the start time of this element! start_time = gst_element_get_base_time (element); if (start_time != GST_CLOCK_TIME_NONE) start_time = gst_clock_get_time (GST_ELEMENT_CLOCK (self)) - start_time; // FIXME: This will probably not work if (start_time == GST_CLOCK_TIME_NONE) start_time = 0; convert_to_internal_clock (self, &start_time, NULL); // The start time is now the running time when we stopped // playback GST_DEBUG_OBJECT (self, "Stopping scheduled playback at %" GST_TIME_FORMAT, GST_TIME_ARGS (start_time)); g_mutex_lock (&self->output->lock); self->output->started = FALSE; g_mutex_unlock (&self->output->lock); res = self->output->output->StopScheduledPlayback (start_time, 0, GST_SECOND); if (res != S_OK) { GST_ELEMENT_ERROR (self, STREAM, FAILED, (NULL), ("Failed to stop scheduled playback: 0x%08x", res)); ret = GST_STATE_CHANGE_FAILURE; } self->internal_base_time = GST_CLOCK_TIME_NONE; self->external_base_time = GST_CLOCK_TIME_NONE; break; } case GST_STATE_CHANGE_PAUSED_TO_PLAYING:{ g_mutex_lock (&self->output->lock); if (self->output->start_scheduled_playback) self->output->start_scheduled_playback (self->output->videosink); g_mutex_unlock (&self->output->lock); break; } default: break; } return ret; }
static GstFlowReturn gst_timecodestamper_transform_ip (GstBaseTransform * vfilter, GstBuffer * buffer) { GstTimeCodeStamper *timecodestamper = GST_TIME_CODE_STAMPER (vfilter); GstClockTime ref_time; GST_OBJECT_LOCK (timecodestamper); if (gst_buffer_get_video_time_code_meta (buffer) && !timecodestamper->override_existing) { GST_OBJECT_UNLOCK (timecodestamper); return GST_FLOW_OK; } else if (timecodestamper->override_existing) { gst_buffer_foreach_meta (buffer, remove_timecode_meta, NULL); } if (timecodestamper->source_clock != NULL) { if (timecodestamper->current_tc->hours == 0 && timecodestamper->current_tc->minutes == 0 && timecodestamper->current_tc->seconds == 0 && timecodestamper->current_tc->frames == 0) { guint64 hours, minutes, seconds, frames; /* Daily jam time */ ref_time = gst_clock_get_time (timecodestamper->source_clock); ref_time = ref_time % (24 * 60 * 60 * GST_SECOND); hours = ref_time / (GST_SECOND * 60 * 60); ref_time -= hours * GST_SECOND * 60 * 60; minutes = ref_time / (GST_SECOND * 60); ref_time -= minutes * GST_SECOND * 60; seconds = ref_time / GST_SECOND; ref_time -= seconds * GST_SECOND; /* Converting to frames for the whole ref_time might be inaccurate in case * we have a drop frame timecode */ frames = gst_util_uint64_scale (ref_time, timecodestamper->vinfo.fps_n, timecodestamper->vinfo.fps_d * GST_SECOND); GST_DEBUG_OBJECT (timecodestamper, "Initializing with %" G_GUINT64_FORMAT ":%" G_GUINT64_FORMAT ":%" G_GUINT64_FORMAT ":%" G_GUINT64_FORMAT "", hours, minutes, seconds, frames); gst_video_time_code_init (timecodestamper->current_tc, timecodestamper->vinfo.fps_n, timecodestamper->vinfo.fps_d, NULL, timecodestamper->vinfo.interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE ? 0 : GST_VIDEO_TIME_CODE_FLAGS_INTERLACED, hours, minutes, seconds, 0, 0); gst_timecodestamper_set_drop_frame (timecodestamper); /* Do not use frames when initializing because maybe we have drop frame */ gst_video_time_code_add_frames (timecodestamper->current_tc, frames); } } else if (timecodestamper->source_clock == NULL) { GstClockTime timecode_time; timecode_time = gst_video_time_code_nsec_since_daily_jam (timecodestamper->current_tc); ref_time = gst_segment_to_stream_time (&vfilter->segment, GST_FORMAT_TIME, buffer->pts); if (timecode_time != GST_CLOCK_TIME_NONE && ref_time != GST_CLOCK_TIME_NONE && ((timecode_time > ref_time && timecode_time - ref_time > GST_SECOND) || (ref_time > timecode_time && ref_time - timecode_time > GST_SECOND))) { gchar *tc_str = gst_video_time_code_to_string (timecodestamper->current_tc); GST_WARNING_OBJECT (timecodestamper, "Time code %s (stream time %" GST_TIME_FORMAT ") has drifted more than one second from stream time %" GST_TIME_FORMAT, tc_str, GST_TIME_ARGS (timecode_time), GST_TIME_ARGS (ref_time)); g_free (tc_str); } } gst_buffer_add_video_time_code_meta (buffer, timecodestamper->current_tc); gst_video_time_code_increment_frame (timecodestamper->current_tc); GST_OBJECT_UNLOCK (timecodestamper); return GST_FLOW_OK; }
static GstFlowReturn gst_genicamsrc_create (GstPushSrc * psrc, GstBuffer ** buf) { GstGenicamSrc *src = GST_GENICAM_SRC (psrc); guint32 dropped_frames = 0; GstClock *clock; GstClockTime clock_time; GST_LOG_OBJECT (src, "create"); *buf = gst_genicamsrc_get_buffer (src); if (!*buf) { return GST_FLOW_ERROR; } clock = gst_element_get_clock (GST_ELEMENT (src)); clock_time = gst_clock_get_time (clock); gst_object_unref (clock); /* check for dropped frames and disrupted signal */ //dropped_frames = (circ_handle.FrameCount - src->last_frame_count) - 1; if (dropped_frames > 0) { src->total_dropped_frames += dropped_frames; GST_WARNING_OBJECT (src, "Dropped %d frames (%d total)", dropped_frames, src->total_dropped_frames); } else if (dropped_frames < 0) { GST_WARNING_OBJECT (src, "Frame count non-monotonic, signal disrupted?"); } //src->last_frame_count = circ_handle.FrameCount; /* create GstBuffer then release circ buffer back to acquisition */ //*buf = gst_genicamsrc_create_buffer_from_circ_handle (src, &circ_handle); //ret = // BiCirStatusSet (src->board, &src->buffer_array, circ_handle, BIAVAILABLE); //if (ret != BI_OK) { // GST_ELEMENT_ERROR (src, RESOURCE, FAILED, // ("Failed to release buffer: %s", gst_genicamsrc_get_error_string (src, // ret)), (NULL)); // return GST_FLOW_ERROR; //} /* TODO: understand why timestamps for circ_handle are sometimes 0 */ //GST_BUFFER_TIMESTAMP (*buf) = // GST_CLOCK_DIFF (gst_element_get_base_time (GST_ELEMENT (src)), // src->acq_start_time + circ_handle.HiResTimeStamp.totalSec * GST_SECOND); GST_BUFFER_TIMESTAMP (*buf) = GST_CLOCK_DIFF (gst_element_get_base_time (GST_ELEMENT (src)), clock_time); //GST_BUFFER_OFFSET (*buf) = circ_handle.FrameCount - 1; if (src->stop_requested) { if (*buf != NULL) { gst_buffer_unref (*buf); *buf = NULL; } return GST_FLOW_FLUSHING; } return GST_FLOW_OK; error: return GST_FLOW_ERROR; }
static GstFlowReturn gst_v4l2src_create (GstPushSrc * src, GstBuffer ** buf) { GstV4l2Src *v4l2src = GST_V4L2SRC (src); GstV4l2Object *obj = v4l2src->v4l2object; GstV4l2BufferPool *pool = GST_V4L2_BUFFER_POOL_CAST (obj->pool); GstFlowReturn ret; GstClock *clock; GstClockTime abs_time, base_time, timestamp, duration; GstClockTime delay; GstMessage *qos_msg; do { ret = GST_BASE_SRC_CLASS (parent_class)->alloc (GST_BASE_SRC (src), 0, obj->info.size, buf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto alloc_failed; ret = gst_v4l2_buffer_pool_process (pool, buf); } while (ret == GST_V4L2_FLOW_CORRUPTED_BUFFER); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto error; timestamp = GST_BUFFER_TIMESTAMP (*buf); duration = obj->duration; /* timestamps, LOCK to get clock and base time. */ /* FIXME: element clock and base_time is rarely changing */ GST_OBJECT_LOCK (v4l2src); if ((clock = GST_ELEMENT_CLOCK (v4l2src))) { /* we have a clock, get base time and ref clock */ base_time = GST_ELEMENT (v4l2src)->base_time; gst_object_ref (clock); } else { /* no clock, can't set timestamps */ base_time = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (v4l2src); /* sample pipeline clock */ if (clock) { abs_time = gst_clock_get_time (clock); gst_object_unref (clock); } else { abs_time = GST_CLOCK_TIME_NONE; } retry: if (!v4l2src->has_bad_timestamp && timestamp != GST_CLOCK_TIME_NONE) { struct timespec now; GstClockTime gstnow; /* v4l2 specs say to use the system time although many drivers switched to * the more desirable monotonic time. We first try to use the monotonic time * and see how that goes */ clock_gettime (CLOCK_MONOTONIC, &now); gstnow = GST_TIMESPEC_TO_TIME (now); if (timestamp > gstnow || (gstnow - timestamp) > (10 * GST_SECOND)) { GTimeVal now; /* very large diff, fall back to system time */ g_get_current_time (&now); gstnow = GST_TIMEVAL_TO_TIME (now); } /* Detect buggy drivers here, and stop using their timestamp. Failing any * of these condition would imply a very buggy driver: * - Timestamp in the future * - Timestamp is going backward compare to last seen timestamp * - Timestamp is jumping forward for less then a frame duration * - Delay is bigger then the actual timestamp * */ if (timestamp > gstnow) { GST_WARNING_OBJECT (v4l2src, "Timestamp in the future detected, ignoring driver timestamps"); v4l2src->has_bad_timestamp = TRUE; goto retry; } if (v4l2src->last_timestamp > timestamp) { GST_WARNING_OBJECT (v4l2src, "Timestamp going backward, ignoring driver timestamps"); v4l2src->has_bad_timestamp = TRUE; goto retry; } delay = gstnow - timestamp; if (delay > timestamp) { GST_WARNING_OBJECT (v4l2src, "Timestamp does not correlate with any clock, ignoring driver timestamps"); v4l2src->has_bad_timestamp = TRUE; goto retry; } /* Save last timestamp for sanity checks */ v4l2src->last_timestamp = timestamp; GST_DEBUG_OBJECT (v4l2src, "ts: %" GST_TIME_FORMAT " now %" GST_TIME_FORMAT " delay %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (gstnow), GST_TIME_ARGS (delay)); } else { /* we assume 1 frame latency otherwise */ if (GST_CLOCK_TIME_IS_VALID (duration)) delay = duration; else delay = 0; } /* set buffer metadata */ if (G_LIKELY (abs_time != GST_CLOCK_TIME_NONE)) { /* the time now is the time of the clock minus the base time */ timestamp = abs_time - base_time; /* adjust for delay in the device */ if (timestamp > delay) timestamp -= delay; else timestamp = 0; } else { timestamp = GST_CLOCK_TIME_NONE; } /* activate settings for next frame */ if (GST_CLOCK_TIME_IS_VALID (duration)) { v4l2src->ctrl_time += duration; } else { /* this is not very good (as it should be the next timestamp), * still good enough for linear fades (as long as it is not -1) */ v4l2src->ctrl_time = timestamp; } gst_object_sync_values (GST_OBJECT (src), v4l2src->ctrl_time); GST_INFO_OBJECT (src, "sync to %" GST_TIME_FORMAT " out ts %" GST_TIME_FORMAT, GST_TIME_ARGS (v4l2src->ctrl_time), GST_TIME_ARGS (timestamp)); /* use generated offset values only if there are not already valid ones * set by the v4l2 device */ if (!GST_BUFFER_OFFSET_IS_VALID (*buf) || !GST_BUFFER_OFFSET_END_IS_VALID (*buf)) { GST_BUFFER_OFFSET (*buf) = v4l2src->offset++; GST_BUFFER_OFFSET_END (*buf) = v4l2src->offset; } else { /* adjust raw v4l2 device sequence, will restart at null in case of renegotiation * (streamoff/streamon) */ GST_BUFFER_OFFSET (*buf) += v4l2src->renegotiation_adjust; GST_BUFFER_OFFSET_END (*buf) += v4l2src->renegotiation_adjust; /* check for frame loss with given (from v4l2 device) buffer offset */ if ((v4l2src->offset != 0) && (GST_BUFFER_OFFSET (*buf) != (v4l2src->offset + 1))) { guint64 lost_frame_count = GST_BUFFER_OFFSET (*buf) - v4l2src->offset - 1; GST_WARNING_OBJECT (v4l2src, "lost frames detected: count = %" G_GUINT64_FORMAT " - ts: %" GST_TIME_FORMAT, lost_frame_count, GST_TIME_ARGS (timestamp)); qos_msg = gst_message_new_qos (GST_OBJECT_CAST (v4l2src), TRUE, GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, timestamp, GST_CLOCK_TIME_IS_VALID (duration) ? lost_frame_count * duration : GST_CLOCK_TIME_NONE); gst_element_post_message (GST_ELEMENT_CAST (v4l2src), qos_msg); } v4l2src->offset = GST_BUFFER_OFFSET (*buf); } GST_BUFFER_TIMESTAMP (*buf) = timestamp; GST_BUFFER_DURATION (*buf) = duration; return ret; /* ERROR */ alloc_failed: { if (ret != GST_FLOW_FLUSHING) GST_ELEMENT_ERROR (src, RESOURCE, NO_SPACE_LEFT, ("Failed to allocate a buffer"), (NULL)); return ret; } error: { if (ret == GST_V4L2_FLOW_LAST_BUFFER) { GST_ELEMENT_ERROR (src, RESOURCE, FAILED, ("Driver returned a buffer with no payload, this most likely " "indicate a bug in the driver."), (NULL)); ret = GST_FLOW_ERROR; } else { GST_DEBUG_OBJECT (src, "error processing buffer %d (%s)", ret, gst_flow_get_name (ret)); } return ret; } }
static gboolean gst_genicamsrc_start (GstBaseSrc * bsrc) { GstGenicamSrc *src = GST_GENICAM_SRC (bsrc); GC_ERROR ret; uint32_t i, num_ifaces, num_devs; guint32 width, height, bpp, stride; GstVideoInfo vinfo; GST_DEBUG_OBJECT (src, "start"); /* bind functions from CTI */ if (!gst_genicamsrc_bind_functions (src)) { GST_ELEMENT_ERROR (src, LIBRARY, INIT, ("GenTL CTI could not be opened: %s", g_module_error ()), (NULL)); return FALSE; } /* initialize library and print info */ ret = GTL_GCInitLib (); HANDLE_GTL_ERROR ("GenTL Producer library could not be initialized"); gst_genicam_print_gentl_impl_info (src); /* open GenTL, print info, and update interface list */ ret = GTL_TLOpen (&src->hTL); HANDLE_GTL_ERROR ("System module failed to open"); gst_genicam_print_system_info (src); ret = GTL_TLUpdateInterfaceList (src->hTL, NULL, src->timeout); HANDLE_GTL_ERROR ("Failed to update interface list within timeout"); /* print info for all interfaces and open specified interface */ ret = GTL_TLGetNumInterfaces (src->hTL, &num_ifaces); HANDLE_GTL_ERROR ("Failed to get number of interfaces"); if (num_ifaces > 0) { GST_DEBUG_OBJECT (src, "Found %dGenTL interfaces", num_ifaces); for (i = 0; i < num_ifaces; ++i) { gst_genicam_print_interface_info (src, i); } } else { GST_ELEMENT_ERROR (src, LIBRARY, FAILED, ("No interfaces found"), (NULL)); goto error; } if (!src->interface_id || src->interface_id[0] == 0) { size_t id_size; GST_DEBUG_OBJECT (src, "Trying to find interface ID at index %d", src->interface_index); ret = GTL_TLGetInterfaceID (src->hTL, src->interface_index, NULL, &id_size); HANDLE_GTL_ERROR ("Failed to get interface ID at specified index"); if (src->interface_id) { g_free (src->interface_id); } src->interface_id = (gchar *) g_malloc (id_size); ret = GTL_TLGetInterfaceID (src->hTL, src->interface_index, src->interface_id, &id_size); HANDLE_GTL_ERROR ("Failed to get interface ID at specified index"); } GST_DEBUG_OBJECT (src, "Trying to open interface '%s'", src->interface_id); ret = GTL_TLOpenInterface (src->hTL, src->interface_id, &src->hIF); HANDLE_GTL_ERROR ("Interface module failed to open"); ret = GTL_IFUpdateDeviceList (src->hIF, NULL, src->timeout); HANDLE_GTL_ERROR ("Failed to update device list within timeout"); /* print info for all devices and open specified device */ ret = GTL_IFGetNumDevices (src->hIF, &num_devs); HANDLE_GTL_ERROR ("Failed to get number of devices"); if (num_devs > 0) { for (i = 0; i < num_devs; ++i) { gst_genicam_print_device_info (src, i); } } else { GST_ELEMENT_ERROR (src, LIBRARY, FAILED, ("No devices found on interface"), (NULL)); goto error; } if (!src->device_id || src->device_id[0] == 0) { size_t id_size; GST_DEBUG_OBJECT (src, "Trying to find device ID at index %d", src->device_index); GTL_IFGetDeviceID (src->hIF, src->device_index, NULL, &id_size); HANDLE_GTL_ERROR ("Failed to get device ID at specified index"); if (src->device_id) { g_free (src->device_id); } src->device_id = (gchar *) g_malloc (id_size); GTL_IFGetDeviceID (src->hIF, src->device_index, src->device_id, &id_size); HANDLE_GTL_ERROR ("Failed to get device ID at specified index"); } GST_DEBUG_OBJECT (src, "Trying to open device '%s'", src->device_id); ret = GTL_IFOpenDevice (src->hIF, src->device_id, DEVICE_ACCESS_CONTROL, &src->hDEV); HANDLE_GTL_ERROR ("Failed to open device"); /* find and open specified data stream id */ if (!src->stream_id || src->stream_id[0] == 0) { size_t id_size; GST_DEBUG_OBJECT (src, "Trying to find stream ID at index %d", src->stream_index); GTL_DevGetDataStreamID (src->hDEV, src->stream_index, NULL, &id_size); HANDLE_GTL_ERROR ("Failed to get stream ID at specified index"); if (src->stream_id) { g_free (src->stream_id); } src->stream_id = (gchar *) g_malloc (id_size); GTL_DevGetDataStreamID (src->hDEV, src->stream_index, src->stream_id, &id_size); HANDLE_GTL_ERROR ("Failed to get stream ID at specified index"); } GST_DEBUG_OBJECT (src, "Trying to open data stream '%s'", src->stream_id); ret = GTL_DevOpenDataStream (src->hDEV, src->stream_id, &src->hDS); HANDLE_GTL_ERROR ("Failed to open data stream"); { uint32_t num_urls = 0; char url[2048]; size_t url_len = sizeof (url); INFO_DATATYPE datatype; const uint32_t url_index = 0; ret = GTL_DevGetPort (src->hDEV, &src->hDevPort); HANDLE_GTL_ERROR ("Failed to get port on device"); ret = GTL_GCGetNumPortURLs (src->hDevPort, &num_urls); HANDLE_GTL_ERROR ("Failed to get number of port URLs"); GST_DEBUG_OBJECT (src, "Found %d port URLs", num_urls); GST_DEBUG_OBJECT (src, "Trying to get URL index %d", url_index); GTL_GCGetPortURLInfo (src->hDevPort, url_index, URL_INFO_URL, &datatype, url, &url_len); HANDLE_GTL_ERROR ("Failed to get URL"); GST_DEBUG_OBJECT (src, "Found URL '%s'", url); g_assert (url_len > 6); if (g_str_has_prefix (url, "file")) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("file url not supported yet"), (NULL)); goto error; } else if (g_str_has_prefix (url, "local")) { GError *err = NULL; GMatchInfo *matchInfo; GRegex *regex; gchar *filename, *addr_str, *len_str; uint64_t addr; size_t len; gchar *buf; regex = g_regex_new ("local:(?:///)?(?<filename>[^;]+);(?<address>[^;]+);(?<length>[^?]+)(?:[?]SchemaVersion=([^&]+))?", (GRegexCompileFlags) 0, (GRegexMatchFlags) 0, &err); if (!regex) { goto error; } g_regex_match (regex, url, (GRegexMatchFlags) 0, &matchInfo); filename = g_match_info_fetch_named (matchInfo, "filename"); addr_str = g_match_info_fetch_named (matchInfo, "address"); len_str = g_match_info_fetch_named (matchInfo, "length"); if (!filename || !addr_str || !len_str) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to parse local URL"), (NULL)); goto error; } addr = g_ascii_strtoull (addr_str, NULL, 16); len = g_ascii_strtoull (len_str, NULL, 16); buf = (gchar *) g_malloc (len); GTL_GCReadPort (src->hDevPort, addr, buf, &len); HANDLE_GTL_ERROR ("Failed to read XML from port"); if (g_str_has_suffix (filename, "zip")) { gchar *zipfilepath; unzFile uf; unz_file_info64 fileinfo; gchar xmlfilename[2048]; gchar *xml; zipfilepath = g_build_filename (g_get_tmp_dir (), filename, NULL); if (!g_file_set_contents (zipfilepath, buf, len, &err)) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to write zipped XML to %s", zipfilepath), (NULL)); goto error; } uf = unzOpen64 (zipfilepath); if (!uf) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to open zipped XML %s", zipfilepath), (NULL)); goto error; } //ret = unzGetGlobalInfo64(uf, &gi); ret = unzGetCurrentFileInfo64 (uf, &fileinfo, xmlfilename, sizeof (xmlfilename), NULL, 0, NULL, 0); if (ret != UNZ_OK) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to query zip file %s", zipfilepath), (NULL)); goto error; } ret = unzOpenCurrentFile (uf); if (ret != UNZ_OK) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to extract file %s", xmlfilename), (NULL)); goto error; } xml = (gchar *) g_malloc (fileinfo.uncompressed_size); if (!xml) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to allocate memory to extract XML file"), (NULL)); goto error; } ret = unzReadCurrentFile (uf, xml, fileinfo.uncompressed_size); if (ret != fileinfo.uncompressed_size) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to extract XML file %s", xmlfilename), (NULL)); goto error; } unzClose (uf); g_free (zipfilepath); zipfilepath = g_build_filename (g_get_tmp_dir (), xmlfilename, NULL); g_file_set_contents (zipfilepath, xml, fileinfo.uncompressed_size, &err); g_free (zipfilepath); g_free (xml); //GZlibDecompressor *decompress; //char *unzipped; //gsize outbuf_size, bytes_read, bytes_written; //GInputStream *zippedstream, *unzippedstream; //decompress = g_zlib_decompressor_new (G_ZLIB_COMPRESSOR_FORMAT_ZLIB); ////zippedstream = g_memory_input_stream_new_from_data(buf, len, g_free); ////unzippedstream = g_converter_input_stream_new (zippedstream, G_CONVERTER(decompress)); ////g_input_stream_read_all (G_INPUT_STREAM(unzippedstream), //// g_converter_output_stream //outbuf_size = 10000000; //unzipped = (gchar*) g_malloc(outbuf_size); //g_converter_convert (G_CONVERTER (decompress), buf, len, unzipped, outbuf_size, G_CONVERTER_NO_FLAGS, &bytes_read, &bytes_written, &err); //GST_DEBUG_OBJECT (src, unzipped); } g_free (filename); g_free (addr_str); g_free (len_str); g_free (buf); } else if (g_str_has_prefix (url, "http")) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("file url not supported yet"), (NULL)); goto error; } } { // TODO: use Genicam node map for this guint32 val = 0; size_t datasize = 4; ret = GTL_GCReadPort (src->hDevPort, 0x30204, &val, &datasize); HANDLE_GTL_ERROR ("Failed to get width"); width = GUINT32_FROM_BE (val); ret = GTL_GCReadPort (src->hDevPort, 0x30224, &val, &datasize); HANDLE_GTL_ERROR ("Failed to get height"); height = GUINT32_FROM_BE (val); bpp = 8; } if (!gst_genicamsrc_prepare_buffers (src)) { GST_ELEMENT_ERROR (src, RESOURCE, TOO_LAZY, ("Failed to prepare buffers"), (NULL)); goto error; } { ret = GTL_GCRegisterEvent (src->hDS, EVENT_NEW_BUFFER, &src->hNewBufferEvent); HANDLE_GTL_ERROR ("Failed to register New Buffer event"); } ret = GTL_DSStartAcquisition (src->hDS, ACQ_START_FLAGS_DEFAULT, GENTL_INFINITE); HANDLE_GTL_ERROR ("Failed to start stream acquisition"); { // TODO: use Genicam node map for this guint32 val; size_t datasize; /* set AcquisitionMode to Continuous */ val = GUINT32_TO_BE (2); datasize = sizeof (val); ret = GTL_GCWritePort (src->hDevPort, 0x40004, &val, &datasize); HANDLE_GTL_ERROR ("Failed to start device acquisition"); /* send AcquisitionStart command */ val = GUINT32_TO_BE (1); datasize = sizeof (val); ret = GTL_GCWritePort (src->hDevPort, 0x40024, &val, &datasize); HANDLE_GTL_ERROR ("Failed to start device acquisition"); } /* create caps */ if (src->caps) { gst_caps_unref (src->caps); src->caps = NULL; } gst_video_info_init (&vinfo); if (bpp <= 8) { gst_video_info_set_format (&vinfo, GST_VIDEO_FORMAT_GRAY8, width, height); src->caps = gst_video_info_to_caps (&vinfo); } else if (bpp > 8 && bpp <= 16) { GValue val = G_VALUE_INIT; GstStructure *s; if (G_BYTE_ORDER == G_LITTLE_ENDIAN) { gst_video_info_set_format (&vinfo, GST_VIDEO_FORMAT_GRAY16_LE, width, height); } else if (G_BYTE_ORDER == G_BIG_ENDIAN) { gst_video_info_set_format (&vinfo, GST_VIDEO_FORMAT_GRAY16_BE, width, height); } src->caps = gst_video_info_to_caps (&vinfo); /* set bpp, extra info for GRAY16 so elements can scale properly */ s = gst_caps_get_structure (src->caps, 0); g_value_init (&val, G_TYPE_INT); g_value_set_int (&val, bpp); gst_structure_set_value (s, "bpp", &val); g_value_unset (&val); } else { GST_ELEMENT_ERROR (src, STREAM, WRONG_TYPE, ("Unknown or unsupported bit depth (%d).", bpp), (NULL)); return FALSE; } src->height = vinfo.height; src->gst_stride = GST_VIDEO_INFO_COMP_STRIDE (&vinfo, 0); GST_DEBUG_OBJECT (src, "starting acquisition"); //TODO: start acquisition engine /* TODO: check timestamps on buffers vs start time */ src->acq_start_time = gst_clock_get_time (gst_element_get_clock (GST_ELEMENT (src))); return TRUE; error: if (src->hDS) { GTL_DSClose (src->hDS); src->hDS = NULL; } if (src->hDEV) { GTL_DevClose (src->hDEV); src->hDEV = NULL; } if (src->hIF) { GTL_IFClose (src->hIF); src->hIF = NULL; } if (src->hTL) { GTL_TLClose (src->hTL); src->hTL = NULL; } GTL_GCCloseLib (); return FALSE; }
/* MT safe */ static GstStateChangeReturn gst_pipeline_change_state (GstElement * element, GstStateChange transition) { GstStateChangeReturn result = GST_STATE_CHANGE_SUCCESS; GstPipeline *pipeline = GST_PIPELINE_CAST (element); GstClock *clock; switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY: GST_OBJECT_LOCK (element); if (element->bus) gst_bus_set_flushing (element->bus, FALSE); GST_OBJECT_UNLOCK (element); break; case GST_STATE_CHANGE_READY_TO_PAUSED: GST_OBJECT_LOCK (element); pipeline->priv->update_clock = TRUE; GST_OBJECT_UNLOCK (element); /* READY to PAUSED starts running_time from 0 */ reset_start_time (pipeline, 0); break; case GST_STATE_CHANGE_PAUSED_TO_PLAYING: { GstClockTime now, start_time, last_start_time, delay; gboolean update_clock; GstClock *cur_clock; GST_DEBUG_OBJECT (element, "selecting clock and base_time"); GST_OBJECT_LOCK (element); cur_clock = element->clock; if (cur_clock) gst_object_ref (cur_clock); /* get the desired running_time of the first buffer aka the start_time */ start_time = GST_ELEMENT_START_TIME (pipeline); last_start_time = pipeline->priv->last_start_time; pipeline->priv->last_start_time = start_time; /* see if we need to update the clock */ update_clock = pipeline->priv->update_clock; pipeline->priv->update_clock = FALSE; delay = pipeline->delay; GST_OBJECT_UNLOCK (element); /* running time changed, either with a PAUSED or a flush, we need to check * if there is a new clock & update the base time */ /* only do this for top-level, however */ if (GST_OBJECT_PARENT (element) == NULL && (update_clock || last_start_time != start_time)) { GST_DEBUG_OBJECT (pipeline, "Need to update start_time"); /* when going to PLAYING, select a clock when needed. If we just got * flushed, we don't reselect the clock. */ if (update_clock) { GST_DEBUG_OBJECT (pipeline, "Need to update clock."); clock = gst_element_provide_clock (element); } else { GST_DEBUG_OBJECT (pipeline, "Don't need to update clock, using old clock."); /* only try to ref if cur_clock is not NULL */ if (cur_clock) gst_object_ref (cur_clock); clock = cur_clock; } if (clock) { now = gst_clock_get_time (clock); } else { GST_DEBUG_OBJECT (pipeline, "no clock, using base time of NONE"); now = GST_CLOCK_TIME_NONE; } if (clock != cur_clock) { /* now distribute the clock (which could be NULL). If some * element refuses the clock, this will return FALSE and * we effectively fail the state change. */ if (!gst_element_set_clock (element, clock)) goto invalid_clock; /* if we selected and distributed a new clock, let the app * know about it */ gst_element_post_message (element, gst_message_new_new_clock (GST_OBJECT_CAST (element), clock)); } if (clock) gst_object_unref (clock); if (start_time != GST_CLOCK_TIME_NONE && now != GST_CLOCK_TIME_NONE) { GstClockTime new_base_time = now - start_time + delay; GST_DEBUG_OBJECT (element, "start_time=%" GST_TIME_FORMAT ", now=%" GST_TIME_FORMAT ", base_time %" GST_TIME_FORMAT, GST_TIME_ARGS (start_time), GST_TIME_ARGS (now), GST_TIME_ARGS (new_base_time)); gst_element_set_base_time (element, new_base_time); } else { GST_DEBUG_OBJECT (pipeline, "NOT adjusting base_time because start_time is NONE"); } } else { GST_DEBUG_OBJECT (pipeline, "NOT adjusting base_time because we selected one before"); } if (cur_clock) gst_object_unref (cur_clock); break; } case GST_STATE_CHANGE_PLAYING_TO_PAUSED: { /* we take a start_time snapshot before calling the children state changes * so that they know about when the pipeline PAUSED. */ pipeline_update_start_time (element); break; } case GST_STATE_CHANGE_PAUSED_TO_READY: reset_start_time (pipeline, 0); break; case GST_STATE_CHANGE_READY_TO_NULL: break; } result = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); switch (transition) { case GST_STATE_CHANGE_NULL_TO_READY: break; case GST_STATE_CHANGE_READY_TO_PAUSED: break; case GST_STATE_CHANGE_PAUSED_TO_PLAYING: break; case GST_STATE_CHANGE_PLAYING_TO_PAUSED: { /* Take a new snapshot of the start_time after calling the state change on * all children. This will be the running_time of the pipeline when we go * back to PLAYING */ pipeline_update_start_time (element); break; } case GST_STATE_CHANGE_PAUSED_TO_READY: break; case GST_STATE_CHANGE_READY_TO_NULL: { GstBus *bus; gboolean auto_flush; /* grab some stuff before we release the lock to flush out the bus */ GST_OBJECT_LOCK (element); if ((bus = element->bus)) gst_object_ref (bus); auto_flush = pipeline->priv->auto_flush_bus; GST_OBJECT_UNLOCK (element); if (bus) { if (auto_flush) { gst_bus_set_flushing (bus, TRUE); } else { GST_INFO_OBJECT (element, "not flushing bus, auto-flushing disabled"); } gst_object_unref (bus); } break; } } return result; /* ERRORS */ invalid_clock: { /* we generate this error when the selected clock was not * accepted by some element */ GST_ELEMENT_ERROR (pipeline, CORE, CLOCK, (_("Selected clock cannot be used in pipeline.")), ("Pipeline cannot operate with selected clock")); GST_DEBUG_OBJECT (pipeline, "Pipeline cannot operate with selected clock %p", clock); if (clock) gst_object_unref (clock); return GST_STATE_CHANGE_FAILURE; } }
static gpointer gst_net_time_provider_thread (gpointer data) { GstNetTimeProvider *self = data; struct sockaddr_in tmpaddr; socklen_t len; GstNetTimePacket *packet; gint ret; while (TRUE) { GST_LOG_OBJECT (self, "doing select"); ret = gst_poll_wait (self->priv->fdset, GST_CLOCK_TIME_NONE); GST_LOG_OBJECT (self, "select returned %d", ret); if (ret <= 0) { if (errno == EBUSY) { GST_LOG_OBJECT (self, "stop"); goto stopped; } else if (errno != EAGAIN && errno != EINTR) goto select_error; else continue; } else { /* got data in */ len = sizeof (struct sockaddr); packet = gst_net_time_packet_receive (self->priv->sock.fd, (struct sockaddr *) &tmpaddr, &len); if (!packet) goto receive_error; if (IS_ACTIVE (self)) { /* do what we were asked to and send the packet back */ packet->remote_time = gst_clock_get_time (self->clock); /* ignore errors */ gst_net_time_packet_send (packet, self->priv->sock.fd, (struct sockaddr *) &tmpaddr, len); } g_free (packet); continue; } g_assert_not_reached (); /* log errors and keep going */ select_error: { GST_DEBUG_OBJECT (self, "select error %d: %s (%d)", ret, g_strerror (errno), errno); continue; } stopped: { GST_DEBUG_OBJECT (self, "shutting down"); /* close socket */ return NULL; } receive_error: { GST_DEBUG_OBJECT (self, "receive error"); continue; } g_assert_not_reached (); } g_assert_not_reached (); return NULL; }