BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder) { const char* appsrc = "appsrc name=source ! decodebin name=decoder !"; const char* video = "autovideoconvert ! videoscale !"; const char* audio = "audioconvert ! audiorate ! audioresample ! volume name=audiovolume !"; char pipeline[1024]; if (!mdecoder) return FALSE; /* TODO: Construction of the pipeline from a string allows easy overwrite with arguments. * The only fixed elements necessary are appsrc and the volume element for audio streams. * The rest could easily be provided in gstreamer pipeline notation from command line. */ if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) snprintf(pipeline, sizeof(pipeline), "%s %s %s name=outsink", appsrc, video, tsmf_platform_get_video_sink()); else snprintf(pipeline, sizeof(pipeline), "%s %s %s name=outsink", appsrc, audio, tsmf_platform_get_audio_sink()); DEBUG_TSMF("pipeline=%s", pipeline); mdecoder->pipe = gst_parse_launch(pipeline, NULL); if (!mdecoder->pipe) { WLog_ERR(TAG, "Failed to create new pipe"); return FALSE; } mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "source"); if (!mdecoder->src) { WLog_ERR(TAG, "Failed to get appsrc"); return FALSE; } mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "outsink"); if (!mdecoder->outsink) { WLog_ERR(TAG, "Failed to get sink"); return FALSE; } if (mdecoder->media_type != TSMF_MAJOR_TYPE_VIDEO) { mdecoder->volume = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiovolume"); if (!mdecoder->volume) { WLog_ERR(TAG, "Failed to get volume"); return FALSE; } } tsmf_platform_register_handler(mdecoder); /* AppSrc settings */ GstAppSrcCallbacks callbacks = { tsmf_gstreamer_need_data, tsmf_gstreamer_enough_data, tsmf_gstreamer_seek_data }; g_object_set(mdecoder->src, "format", GST_FORMAT_TIME, NULL); g_object_set(mdecoder->src, "is-live", TRUE, NULL); g_object_set(mdecoder->src, "block", TRUE, NULL); gst_app_src_set_caps((GstAppSrc *) mdecoder->src, mdecoder->gst_caps); gst_app_src_set_callbacks((GstAppSrc *)mdecoder->src, &callbacks, mdecoder, NULL); gst_app_src_set_stream_type((GstAppSrc *) mdecoder->src, GST_APP_STREAM_TYPE_SEEKABLE); tsmf_window_create(mdecoder); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_READY); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING); mdecoder->pipeline_start_time_valid = 0; mdecoder->shutdown = 0; GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(mdecoder->pipe), GST_DEBUG_GRAPH_SHOW_ALL, get_type(mdecoder)); return TRUE; }
BOOL tsmf_gstreamer_pipeline_build(TSMFGstreamerDecoder* mdecoder) { #if GST_VERSION_MAJOR > 0 const char* video = "appsrc name=videosource ! queue2 name=videoqueue ! decodebin name=videodecoder !"; const char* audio = "appsrc name=audiosource ! queue2 name=audioqueue ! decodebin name=audiodecoder ! audioconvert ! audiorate ! audioresample ! volume name=audiovolume !"; #else const char* video = "appsrc name=videosource ! queue2 name=videoqueue ! decodebin2 name=videodecoder !"; const char* audio = "appsrc name=audiosource ! queue2 name=audioqueue ! decodebin2 name=audiodecoder ! audioconvert ! audiorate ! audioresample ! volume name=audiovolume !"; #endif char pipeline[1024]; if (!mdecoder) return FALSE; /* TODO: Construction of the pipeline from a string allows easy overwrite with arguments. * The only fixed elements necessary are appsrc and the volume element for audio streams. * The rest could easily be provided in gstreamer pipeline notation from command line. */ if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) sprintf_s(pipeline, sizeof(pipeline), "%s %s name=videosink", video, tsmf_platform_get_video_sink()); else sprintf_s(pipeline, sizeof(pipeline), "%s %s name=audiosink", audio, tsmf_platform_get_audio_sink()); DEBUG_TSMF("pipeline=%s", pipeline); mdecoder->pipe = gst_parse_launch(pipeline, NULL); if (!mdecoder->pipe) { WLog_ERR(TAG, "Failed to create new pipe"); return FALSE; } if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "videosource"); else mdecoder->src = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiosource"); if (!mdecoder->src) { WLog_ERR(TAG, "Failed to get appsrc"); return FALSE; } if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) mdecoder->queue = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "videoqueue"); else mdecoder->queue = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audioqueue"); if (!mdecoder->queue) { WLog_ERR(TAG, "Failed to get queue"); return FALSE; } if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "videosink"); else mdecoder->outsink = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiosink"); if (!mdecoder->outsink) { WLog_ERR(TAG, "Failed to get sink"); return FALSE; } g_signal_connect(mdecoder->outsink, "child-added", G_CALLBACK(cb_child_added), mdecoder); if (mdecoder->media_type == TSMF_MAJOR_TYPE_AUDIO) { mdecoder->volume = gst_bin_get_by_name(GST_BIN(mdecoder->pipe), "audiovolume"); if (!mdecoder->volume) { WLog_ERR(TAG, "Failed to get volume"); return FALSE; } tsmf_gstreamer_change_volume((ITSMFDecoder*)mdecoder, mdecoder->gstVolume*((double) 10000), mdecoder->gstMuted); } tsmf_platform_register_handler(mdecoder); /* AppSrc settings */ GstAppSrcCallbacks callbacks = { tsmf_gstreamer_need_data, tsmf_gstreamer_enough_data, tsmf_gstreamer_seek_data }; g_object_set(mdecoder->src, "format", GST_FORMAT_TIME, NULL); g_object_set(mdecoder->src, "is-live", FALSE, NULL); g_object_set(mdecoder->src, "block", FALSE, NULL); g_object_set(mdecoder->src, "blocksize", 1024, NULL); gst_app_src_set_caps((GstAppSrc *) mdecoder->src, mdecoder->gst_caps); gst_app_src_set_callbacks((GstAppSrc *)mdecoder->src, &callbacks, mdecoder, NULL); gst_app_src_set_stream_type((GstAppSrc *) mdecoder->src, GST_APP_STREAM_TYPE_SEEKABLE); gst_app_src_set_latency((GstAppSrc *) mdecoder->src, 0, -1); gst_app_src_set_max_bytes((GstAppSrc *) mdecoder->src, (guint64) 0);//unlimited g_object_set(G_OBJECT(mdecoder->queue), "use-buffering", FALSE, NULL); g_object_set(G_OBJECT(mdecoder->queue), "use-rate-estimate", FALSE, NULL); g_object_set(G_OBJECT(mdecoder->queue), "max-size-buffers", 0, NULL); g_object_set(G_OBJECT(mdecoder->queue), "max-size-bytes", 0, NULL); g_object_set(G_OBJECT(mdecoder->queue), "max-size-time", (guint64) 0, NULL); /* Only set these properties if not an autosink, otherwise we will set properties when real sinks are added */ if (!g_strcmp0(G_OBJECT_TYPE_NAME(mdecoder->outsink), "GstAutoVideoSink") && !g_strcmp0(G_OBJECT_TYPE_NAME(mdecoder->outsink), "GstAutoAudioSink")) { if (mdecoder->media_type == TSMF_MAJOR_TYPE_VIDEO) { gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); /* nanoseconds */ } else { gst_base_sink_set_max_lateness((GstBaseSink *) mdecoder->outsink, 10000000); /* nanoseconds */ g_object_set(G_OBJECT(mdecoder->outsink), "buffer-time", (gint64) 20000, NULL); /* microseconds */ g_object_set(G_OBJECT(mdecoder->outsink), "drift-tolerance", (gint64) 20000, NULL); /* microseconds */ g_object_set(G_OBJECT(mdecoder->outsink), "latency-time", (gint64) 10000, NULL); /* microseconds */ g_object_set(G_OBJECT(mdecoder->outsink), "slave-method", 1, NULL); } g_object_set(G_OBJECT(mdecoder->outsink), "sync", TRUE, NULL); /* synchronize on the clock */ g_object_set(G_OBJECT(mdecoder->outsink), "async", TRUE, NULL); /* no async state changes */ } tsmf_window_create(mdecoder); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_READY); tsmf_gstreamer_pipeline_set_state(mdecoder, GST_STATE_PLAYING); mdecoder->pipeline_start_time_valid = 0; mdecoder->shutdown = 0; mdecoder->paused = FALSE; GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(mdecoder->pipe), GST_DEBUG_GRAPH_SHOW_ALL, get_type(mdecoder)); return TRUE; }