void AudioFileReader::plugDeinterleave(GstPad* pad) { // A decodebin pad was added, plug in a deinterleave element to // separate each planar channel. Sub pipeline looks like // ... decodebin2 ! audioconvert ! audioresample ! capsfilter ! deinterleave. GstElement* audioConvert = gst_element_factory_make("audioconvert", 0); GstElement* audioResample = gst_element_factory_make("audioresample", 0); GstElement* capsFilter = gst_element_factory_make("capsfilter", 0); m_deInterleave = gst_element_factory_make("deinterleave", "deinterleave"); g_object_set(m_deInterleave.get(), "keep-positions", TRUE, NULL); g_signal_connect(m_deInterleave.get(), "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this); g_signal_connect(m_deInterleave.get(), "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this); GstCaps* caps = getGstAudioCaps(2, m_sampleRate); g_object_set(capsFilter, "caps", caps, NULL); gst_caps_unref(caps); gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioResample, capsFilter, m_deInterleave.get(), NULL); GstPad* sinkPad = gst_element_get_static_pad(audioConvert, "sink"); gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING); gst_object_unref(GST_OBJECT(sinkPad)); gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(capsFilter, "src", m_deInterleave.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_sync_state_with_parent(audioConvert); gst_element_sync_state_with_parent(audioResample); gst_element_sync_state_with_parent(capsFilter); gst_element_sync_state_with_parent(m_deInterleave.get()); }
//#define AUDIO_FAKE_INPUT void AudioLiveInputPipeline::buildInputPipeline() { // Sub pipeline looks like: // ... autoaudiosrc ! audioconvert ! capsfilter ! deinterleave. m_pipeline = gst_pipeline_new("live-input"); #ifndef AUDIO_FAKE_INPUT // FIXME: Use autoaudiosrc instead of pulsesrc and set properties using // gstproxychild, as is being done in AudioDestinatio::configureSinkDevice. GstElement *source = gst_element_factory_make("pulsesrc", "liveinputsrc"); g_object_set(source, "blocksize", (gint64)1024, nullptr); g_object_set(source, "buffer-time", (gint64) 1451, nullptr); g_object_set(source, "latency-time", (gint64) 1451, nullptr); #else GstElement *source = gst_element_factory_make("audiotestsrc", "fakeinput"); g_object_set(source, "is-live", TRUE, nullptr); g_object_set(source, "blocksize", 2048, nullptr); g_object_set(source, "buffer-time", (gint64) 1451, nullptr); g_object_set(source, "latency-time", (guint64) 1451, nullptr); #endif m_source = source; GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr); GstElement* capsFilter = gst_element_factory_make("capsfilter", nullptr); m_deInterleave = gst_element_factory_make("deinterleave", "deinterleave"); g_object_set(m_deInterleave, "keep-positions", TRUE, nullptr); g_signal_connect(m_deInterleave, "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this); g_signal_connect(m_deInterleave, "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this); GstCaps* caps = getGstAudioCaps(2, m_sampleRate); g_object_set(capsFilter, "caps", caps, nullptr); gst_bin_add_many(GST_BIN(m_pipeline), source, audioConvert, capsFilter, m_deInterleave, nullptr); gst_element_link_pads_full(source, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(audioConvert, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(capsFilter, "src", m_deInterleave, "sink", GST_PAD_LINK_CHECK_NOTHING); GstPad* pad = gst_element_get_static_pad(m_deInterleave, "sink"); gst_pad_set_caps(pad, caps); m_ready = true; gst_element_sync_state_with_parent(source); gst_element_sync_state_with_parent(audioConvert); gst_element_sync_state_with_parent(capsFilter); gst_element_sync_state_with_parent(m_deInterleave); }