GstElement *QGstreamerCaptureSession::buildEncodeBin() { GstElement *encodeBin = gst_bin_new("encode-bin"); GstElement *muxer = gst_element_factory_make( m_mediaContainerControl->formatElementName().constData(), "muxer"); if (!muxer) { qWarning() << "Could not create a media muxer element:" << m_mediaContainerControl->formatElementName(); gst_object_unref(encodeBin); return 0; } GstElement *fileSink = gst_element_factory_make("filesink", "filesink"); g_object_set(G_OBJECT(fileSink), "location", m_sink.toString().toLocal8Bit().constData(), NULL); gst_bin_add_many(GST_BIN(encodeBin), muxer, fileSink, NULL); if (!gst_element_link(muxer, fileSink)) { gst_object_unref(encodeBin); return 0; } if (m_captureMode & Audio) { GstElement *audioConvert = gst_element_factory_make("audioconvert", "audioconvert"); GstElement *audioQueue = gst_element_factory_make("queue", "audio-encode-queue"); m_audioVolume = gst_element_factory_make("volume", "volume"); gst_bin_add_many(GST_BIN(encodeBin), audioConvert, audioQueue, m_audioVolume, NULL); GstElement *audioEncoder = m_audioEncodeControl->createEncoder(); if (!audioEncoder) { gst_object_unref(encodeBin); qWarning() << "Could not create an audio encoder element:" << m_audioEncodeControl->audioSettings().codec(); return 0; } gst_bin_add(GST_BIN(encodeBin), audioEncoder); if (!gst_element_link_many(audioConvert, audioQueue, m_audioVolume, audioEncoder, muxer, NULL)) { gst_object_unref(encodeBin); return 0; } g_object_set(G_OBJECT(m_audioVolume), "volume", (m_muted ? 0.0 : 1.0), NULL); // add ghostpads GstPad *pad = gst_element_get_static_pad(audioConvert, "sink"); gst_element_add_pad(GST_ELEMENT(encodeBin), gst_ghost_pad_new("audiosink", pad)); gst_object_unref(GST_OBJECT(pad)); } if (m_captureMode & Video) { GstElement *videoQueue = gst_element_factory_make("queue", "video-encode-queue"); GstElement *colorspace = gst_element_factory_make("ffmpegcolorspace", "ffmpegcolorspace-encoder"); GstElement *videoscale = gst_element_factory_make("videoscale","videoscale-encoder"); gst_bin_add_many(GST_BIN(encodeBin), videoQueue, colorspace, videoscale, NULL); GstElement *videoEncoder = m_videoEncodeControl->createEncoder(); if (!videoEncoder) { gst_object_unref(encodeBin); qWarning() << "Could not create a video encoder element:" << m_videoEncodeControl->videoSettings().codec(); return 0; } gst_bin_add(GST_BIN(encodeBin), videoEncoder); if (!gst_element_link_many(videoQueue, colorspace, videoscale, videoEncoder, muxer, NULL)) { gst_object_unref(encodeBin); return 0; } // add ghostpads GstPad *pad = gst_element_get_static_pad(videoQueue, "sink"); gst_element_add_pad(GST_ELEMENT(encodeBin), gst_ghost_pad_new("videosink", pad)); gst_object_unref(GST_OBJECT(pad)); } return encodeBin; }
/* * create_post_tee_bin * * The following chain is created after the tee for each output from the * source: * * +-------+ +---------------------+ +-------+ * ---+ queue +---+ conversion elements +---+ queue +--- * +-------+ +---------------------+ +-------+ */ static GstElement *create_post_tee_bin(OwrMediaSource *media_source, GstElement *source_bin, GstCaps *caps, GstPad *ghostpad, guint source_id) { OwrMediaType media_type; GstElement *post_tee_bin, *queue_pre, *queue_post, *capsfilter; GstPad *bin_pad, *queue_pre_pad, *srcpad; GSList *list = NULL; gchar *bin_name; bin_name = g_strdup_printf("source-post-tee-bin-%u", source_id); post_tee_bin = gst_bin_new(bin_name); if (!gst_bin_add(GST_BIN(source_bin), post_tee_bin)) { GST_ERROR("Failed to add %s to source bin", bin_name); g_free(bin_name); g_object_unref(post_tee_bin); post_tee_bin = NULL; goto done; } g_free(bin_name); gst_element_sync_state_with_parent(post_tee_bin); CREATE_ELEMENT_WITH_ID(queue_pre, "queue", "source-post-tee-queue", source_id); CREATE_ELEMENT_WITH_ID(capsfilter, "capsfilter", "source-output-capsfilter", source_id); list = g_slist_append(list, capsfilter); CREATE_ELEMENT_WITH_ID(queue_post, "queue", "source-output-queue", source_id); list = g_slist_append(list, queue_post); g_object_get(media_source, "media-type", &media_type, NULL); switch (media_type) { case OWR_MEDIA_TYPE_AUDIO: { GstElement *audioresample, *audioconvert; g_object_set(capsfilter, "caps", caps, NULL); CREATE_ELEMENT_WITH_ID(audioresample, "audioresample", "source-audio-resample", source_id); list = g_slist_prepend(list, audioresample); CREATE_ELEMENT_WITH_ID(audioconvert, "audioconvert", "source-audio-convert", source_id); list = g_slist_prepend(list, audioconvert); list = g_slist_prepend(list, queue_pre); gst_bin_add_many(GST_BIN(post_tee_bin), queue_pre, audioconvert, audioresample, capsfilter, queue_post, NULL); LINK_ELEMENTS(capsfilter, queue_post); LINK_ELEMENTS(audioresample, capsfilter); LINK_ELEMENTS(audioconvert, audioresample); LINK_ELEMENTS(queue_pre, audioconvert); break; } case OWR_MEDIA_TYPE_VIDEO: { GstElement *videorate, *videoscale, *videoconvert; GstCaps *source_caps; GstStructure *source_structure; gint fps_n = 0, fps_d = 1; source_caps = gst_caps_copy(caps); source_structure = gst_caps_get_structure(source_caps, 0); if (gst_structure_get_fraction(source_structure, "framerate", &fps_n, &fps_d)) gst_structure_remove_field(source_structure, "framerate"); g_object_set(capsfilter, "caps", source_caps, NULL); gst_caps_unref(source_caps); CREATE_ELEMENT_WITH_ID(videoconvert, VIDEO_CONVERT, "source-video-convert", source_id); list = g_slist_prepend(list, videoconvert); CREATE_ELEMENT_WITH_ID(videoscale, "videoscale", "source-video-scale", source_id); list = g_slist_prepend(list, videoscale); CREATE_ELEMENT_WITH_ID(videorate, "videorate", "source-video-rate", source_id); g_object_set(videorate, "drop-only", TRUE, "max-rate", fps_n / fps_d, NULL); list = g_slist_prepend(list, videorate); list = g_slist_prepend(list, queue_pre); gst_bin_add_many(GST_BIN(post_tee_bin), queue_pre, videorate, videoscale, videoconvert, capsfilter, queue_post, NULL); LINK_ELEMENTS(capsfilter, queue_post); LINK_ELEMENTS(videoconvert, capsfilter); LINK_ELEMENTS(videoscale, videoconvert); LINK_ELEMENTS(videorate, videoscale); LINK_ELEMENTS(queue_pre, videorate); break; } case OWR_MEDIA_TYPE_UNKNOWN: default: g_assert_not_reached(); goto done; } srcpad = gst_element_get_static_pad(queue_post, "src"); g_assert(srcpad); bin_pad = gst_ghost_pad_new("src", srcpad); gst_pad_set_active(bin_pad, TRUE); gst_element_add_pad(post_tee_bin, bin_pad); gst_object_unref(srcpad); gst_ghost_pad_set_target(GST_GHOST_PAD(ghostpad), bin_pad); gst_pad_set_active(ghostpad, TRUE); gst_element_add_pad(source_bin, ghostpad); g_slist_foreach(list, sync_to_parent, NULL); queue_pre_pad = gst_element_get_static_pad(queue_pre, "sink"); g_assert(queue_pre_pad); bin_pad = gst_ghost_pad_new("sink", queue_pre_pad); gst_pad_set_active(bin_pad, TRUE); gst_element_add_pad(post_tee_bin, bin_pad); gst_object_unref(queue_pre_pad); done: g_slist_free(list); list = NULL; return post_tee_bin; }
static GstElement * ges_track_video_transition_create_element (GESTrackObject * object) { GstElement *topbin, *iconva, *iconvb, *oconv; GObject *target = NULL; const gchar *propname = NULL; GstElement *mixer = NULL; GstPad *sinka_target, *sinkb_target, *src_target, *sinka, *sinkb, *src; GstController *controller; GstInterpolationControlSource *control_source; GESTrackVideoTransition *self; GESTrackVideoTransitionPrivate *priv; self = GES_TRACK_VIDEO_TRANSITION (object); priv = self->priv; GST_LOG ("creating a video bin"); topbin = gst_bin_new ("transition-bin"); iconva = gst_element_factory_make ("ffmpegcolorspace", "tr-csp-a"); iconvb = gst_element_factory_make ("ffmpegcolorspace", "tr-csp-b"); oconv = gst_element_factory_make ("ffmpegcolorspace", "tr-csp-output"); gst_bin_add_many (GST_BIN (topbin), iconva, iconvb, oconv, NULL); /* Prefer videomixer2 to videomixer */ mixer = gst_element_factory_make ("videomixer2", NULL); if (mixer == NULL) mixer = gst_element_factory_make ("videomixer", NULL); g_object_set (G_OBJECT (mixer), "background", 1, NULL); gst_bin_add (GST_BIN (topbin), mixer); if (priv->type != GES_VIDEO_STANDARD_TRANSITION_TYPE_CROSSFADE) { priv->sinka = (GstPad *) link_element_to_mixer_with_smpte (GST_BIN (topbin), iconva, mixer, priv->type, NULL); priv->sinkb = (GstPad *) link_element_to_mixer_with_smpte (GST_BIN (topbin), iconvb, mixer, priv->type, &priv->smpte); target = (GObject *) priv->smpte; propname = "position"; priv->start_value = 1.0; priv->end_value = 0.0; } else { priv->sinka = (GstPad *) link_element_to_mixer (iconva, mixer); priv->sinkb = (GstPad *) link_element_to_mixer (iconvb, mixer); target = (GObject *) priv->sinkb; propname = "alpha"; priv->start_value = 0.0; priv->end_value = 1.0; } priv->mixer = gst_object_ref (mixer); fast_element_link (mixer, oconv); sinka_target = gst_element_get_static_pad (iconva, "sink"); sinkb_target = gst_element_get_static_pad (iconvb, "sink"); src_target = gst_element_get_static_pad (oconv, "src"); sinka = gst_ghost_pad_new ("sinka", sinka_target); sinkb = gst_ghost_pad_new ("sinkb", sinkb_target); src = gst_ghost_pad_new ("src", src_target); gst_element_add_pad (topbin, src); gst_element_add_pad (topbin, sinka); gst_element_add_pad (topbin, sinkb); gst_object_unref (sinka_target); gst_object_unref (sinkb_target); gst_object_unref (src_target); /* set up interpolation */ g_object_set (target, propname, (gfloat) 0.0, NULL); controller = gst_object_control_properties (target, propname, NULL); control_source = gst_interpolation_control_source_new (); gst_controller_set_control_source (controller, propname, GST_CONTROL_SOURCE (control_source)); gst_interpolation_control_source_set_interpolation_mode (control_source, GST_INTERPOLATE_LINEAR); priv->controller = controller; priv->control_source = control_source; return topbin; }
Pipeline::Pipeline(QObject *parent) : QObject(parent), camerabin(0), videoSrc(0), viewfinder(0), effectBin(0), effectValve(0), effect(0), effectPreCS(0), effectPostCS(0), effectCapsFilter(0), windowId(0) { // camerabin camerabin = gst_element_factory_make("camerabin", NULL); g_object_set (camerabin, "framerate-rounding", TRUE, NULL); // video source videoSrc = gst_element_factory_make("subdevsrc", NULL); g_object_set(videoSrc, "queue-size", 5, NULL); g_object_set (G_OBJECT (camerabin), "video-source", videoSrc, NULL); // video encoder GstElement *videoEnc = gst_element_factory_make("dsph264enc", NULL); g_object_set(camerabin, "video-encoder", videoEnc, NULL); // videomux GstElement *videoMux = gst_element_factory_make("mp4mux", NULL); g_object_set(camerabin, "video-muxer", videoMux, NULL); // audio source GstElement *audioSrc = gst_element_factory_make("pulsesrc", NULL); g_object_set(camerabin, "audio-source", audioSrc, NULL); // audio encoder // we need to set a capsfilter in order to select the audio config GstElement *audioEncBin = gst_bin_new(NULL); GstElement *audioEnc = gst_element_factory_make("nokiaaacenc", NULL); GstElement *audioCapsFilter = gst_element_factory_make("capsfilter", NULL); GstCaps *audioCaps = gst_caps_from_string("audio/x-raw-int, channels=(int)2, width=(int)16, depth=(int)16, rate=(int)48000"); g_object_set(audioCapsFilter, "caps", audioCaps, NULL); gst_bin_add_many(GST_BIN(audioEncBin), audioCapsFilter, audioEnc, NULL); gst_element_link(audioCapsFilter, audioEnc); GstPad * pad; pad = gst_element_get_static_pad (audioCapsFilter, "sink"); gst_element_add_pad (audioEncBin, gst_ghost_pad_new ("sink", pad)); gst_object_unref (pad); pad = gst_element_get_static_pad (audioEnc, "src"); gst_element_add_pad (audioEncBin, gst_ghost_pad_new ("src", pad)); gst_object_unref (pad); g_object_set(camerabin, "audio-encoder", audioEncBin, NULL); // viewfinder viewfinder = gst_element_factory_make("omapxvsink", NULL); g_object_set(viewfinder, "autopaint-colorkey", FALSE, NULL); g_object_set (G_OBJECT (camerabin), "viewfinder-sink", viewfinder, NULL); //error handler GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(camerabin)); gst_bus_add_watch(bus, gStreamerMessageWatcher, this); gst_bus_set_sync_handler(bus, (GstBusSyncHandler) busSyncHandler, this); gst_object_unref(bus); // idle handler g_signal_connect(G_OBJECT(camerabin), "notify::idle", G_CALLBACK(idleNotificationCallback), this); // set initial values setVideoMode(); setupFileStorage(); setupEffectBins(); setResolution(VIDEO_RESOLUTION_DEFAULT); }
// This function creates and initializes some internal variables, and returns a // pointer to the element that should receive the data flow first GstElement* MediaPlayerPrivateGStreamerBase::createVideoSink(GstElement* pipeline) { if (!initializeGStreamer()) return 0; #if USE(NATIVE_FULLSCREEN_VIDEO) m_gstGWorld = GStreamerGWorld::createGWorld(pipeline); m_webkitVideoSink = webkitVideoSinkNew(m_gstGWorld.get()); #else UNUSED_PARAM(pipeline); m_webkitVideoSink = webkitVideoSinkNew(); #endif m_repaintHandler = g_signal_connect(m_webkitVideoSink.get(), "repaint-requested", G_CALLBACK(mediaPlayerPrivateRepaintCallback), this); #if USE(NATIVE_FULLSCREEN_VIDEO) // Build a new video sink consisting of a bin containing a tee // (meant to distribute data to multiple video sinks) and our // internal video sink. For fullscreen we create an autovideosink // and initially block the data flow towards it and configure it m_videoSinkBin = gst_bin_new("video-sink"); GstElement* videoTee = gst_element_factory_make("tee", "videoTee"); GstElement* queue = gst_element_factory_make("queue", 0); #ifdef GST_API_VERSION_1 GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(videoTee, "sink")); GST_OBJECT_FLAG_SET(GST_OBJECT(sinkPad.get()), GST_PAD_FLAG_PROXY_ALLOCATION); #endif gst_bin_add_many(GST_BIN(m_videoSinkBin.get()), videoTee, queue, NULL); // Link a new src pad from tee to queue1. gst_element_link_pads_full(videoTee, 0, queue, "sink", GST_PAD_LINK_CHECK_NOTHING); #endif GstElement* actualVideoSink = 0; m_fpsSink = gst_element_factory_make("fpsdisplaysink", "sink"); if (m_fpsSink) { // The verbose property has been added in -bad 0.10.22. Making // this whole code depend on it because we don't want // fpsdiplaysink to spit data on stdout. GstElementFactory* factory = GST_ELEMENT_FACTORY(GST_ELEMENT_GET_CLASS(m_fpsSink)->elementfactory); if (gst_plugin_feature_check_version(GST_PLUGIN_FEATURE(factory), 0, 10, 22)) { g_object_set(m_fpsSink, "silent", TRUE , NULL); // Turn off text overlay unless logging is enabled. #if LOG_DISABLED g_object_set(m_fpsSink, "text-overlay", FALSE , NULL); #else WTFLogChannel* channel = logChannelByName("Media"); if (channel->state != WTFLogChannelOn) g_object_set(m_fpsSink, "text-overlay", FALSE , NULL); #endif // LOG_DISABLED if (g_object_class_find_property(G_OBJECT_GET_CLASS(m_fpsSink), "video-sink")) { g_object_set(m_fpsSink, "video-sink", m_webkitVideoSink.get(), NULL); #if USE(NATIVE_FULLSCREEN_VIDEO) gst_bin_add(GST_BIN(m_videoSinkBin.get()), m_fpsSink); #endif actualVideoSink = m_fpsSink; } else m_fpsSink = 0; } else m_fpsSink = 0; } if (!m_fpsSink) { #if USE(NATIVE_FULLSCREEN_VIDEO) gst_bin_add(GST_BIN(m_videoSinkBin.get()), m_webkitVideoSink.get()); #endif actualVideoSink = m_webkitVideoSink.get(); } ASSERT(actualVideoSink); #if USE(NATIVE_FULLSCREEN_VIDEO) // Faster elements linking. gst_element_link_pads_full(queue, "src", actualVideoSink, "sink", GST_PAD_LINK_CHECK_NOTHING); // Add a ghostpad to the bin so it can proxy to tee. GRefPtr<GstPad> pad = adoptGRef(gst_element_get_static_pad(videoTee, "sink")); gst_element_add_pad(m_videoSinkBin.get(), gst_ghost_pad_new("sink", pad.get())); // Set the bin as video sink of playbin. return m_videoSinkBin.get(); #else return actualVideoSink; #endif }
static void empathy_video_src_init (EmpathyGstVideoSrc *obj) { EmpathyGstVideoSrcPrivate *priv = EMPATHY_GST_VIDEO_SRC_GET_PRIVATE (obj); GstElement *element, *element_back; GstPad *ghost, *src; GstCaps *caps; gchar *str; /* allocate caps here, so we can update it by optional elements */ caps = gst_caps_new_simple ("video/x-raw", "width", G_TYPE_INT, 320, "height", G_TYPE_INT, 240, NULL); /* allocate any data required by the object here */ if ((element = empathy_gst_add_to_bin (GST_BIN (obj), NULL, "v4l2src")) == NULL) g_error ("Couldn't add \"v4l2src\" (gst-plugins-good missing?)"); /* we need to save our source to priv->src */ priv->src = element; /* Drop EOS events, so that our sinks don't get confused when we restart the * source (triggering an EOS) */ src = gst_element_get_static_pad (element, "src"); gst_pad_add_probe (src, GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM, empathy_video_src_drop_eos, NULL, NULL); gst_object_unref (src); /* videorate with the required properties optional as it needs a currently * unreleased gst-plugins-base 0.10.36 */ element_back = element; element = empathy_gst_add_to_bin (GST_BIN (obj), element, "videorate"); if (element != NULL && g_object_class_find_property ( G_OBJECT_GET_CLASS (element), "max-rate") != NULL) { priv->videorate = element; g_object_set (G_OBJECT (element), "drop-only", TRUE, "average-period", GST_SECOND/2, NULL); } else { g_message ("videorate missing or doesn't have max-rate property, not" "doing dynamic framerate changes (Needs gst-plugins-base >= 0.10.36)"); /* Refcount owned by the bin */ gst_bin_remove (GST_BIN (obj), element); element = element_back; } gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION_RANGE, 1, 1, 30, 1, NULL); str = gst_caps_to_string (caps); DEBUG ("Current video src caps are : %s", str); g_free (str); if ((element = empathy_gst_add_to_bin (GST_BIN (obj), element, "videoconvert")) == NULL) g_error ("Failed to add \"videoconvert\" (gst-plugins-base missing?)"); if ((element = empathy_gst_add_to_bin (GST_BIN (obj), element, "videoscale")) == NULL) g_error ("Failed to add \"videoscale\", (gst-plugins-base missing?)"); if ((element = empathy_gst_add_to_bin (GST_BIN (obj), element, "capsfilter")) == NULL) g_error ( "Failed to add \"capsfilter\" (gstreamer core elements missing?)"); priv->capsfilter = element; g_object_set (G_OBJECT (element), "caps", caps, NULL); /* optionally add postproc_tmpnoise to improve the performance of encoders */ element_back = element; if ((element = empathy_gst_add_to_bin (GST_BIN (obj), element, "postproc_tmpnoise")) == NULL) { g_message ("Failed to add \"postproc_tmpnoise\" (gst-ffmpeg missing?)"); element = element_back; } src = gst_element_get_static_pad (element, "src"); g_assert (src != NULL); ghost = gst_ghost_pad_new ("src", src); if (ghost == NULL) g_error ("Unable to create ghost pad for the videosrc"); if (!gst_element_add_pad (GST_ELEMENT (obj), ghost)) g_error ("pad with the same name already existed or " "the pad already had another parent."); gst_object_unref (G_OBJECT (src)); }
static GstFlowReturn gst_cc_extractor_handle_meta (GstCCExtractor * filter, GstBuffer * buf, GstVideoCaptionMeta * meta) { GstBuffer *outbuf = NULL; GstEvent *event; gchar *captionid; GstFlowReturn flow; GST_DEBUG_OBJECT (filter, "Handling meta"); /* Check if the meta type matches the configured one */ if (filter->captionpad != NULL && meta->caption_type != filter->caption_type) { GST_ERROR_OBJECT (filter, "GstVideoCaptionMeta type changed, Not handled currently"); flow = GST_FLOW_NOT_NEGOTIATED; goto out; } if (filter->captionpad == NULL) { GstCaps *caption_caps = NULL; GstEvent *stream_event; GST_DEBUG_OBJECT (filter, "Creating new caption pad"); switch (meta->caption_type) { case GST_VIDEO_CAPTION_TYPE_CEA608_RAW: caption_caps = gst_caps_from_string ("closedcaption/x-cea-608,format=(string)raw"); break; case GST_VIDEO_CAPTION_TYPE_CEA608_IN_CEA708_RAW: caption_caps = gst_caps_from_string ("closedcaption/x-cea-608,format=(string)cc_data"); break; case GST_VIDEO_CAPTION_TYPE_CEA708_RAW: caption_caps = gst_caps_from_string ("closedcaption/x-cea-708,format=(string)cc_data"); break; case GST_VIDEO_CAPTION_TYPE_CEA708_CDP: caption_caps = gst_caps_from_string ("closedcaption/x-cea-708,format=(string)cdp"); break; default: break; } if (caption_caps == NULL) { GST_ERROR_OBJECT (filter, "Unknown/invalid caption type"); return GST_FLOW_NOT_NEGOTIATED; } /* Create the caption pad and set the caps */ filter->captionpad = gst_pad_new_from_static_template (&captiontemplate, "caption"); gst_pad_set_iterate_internal_links_function (filter->sinkpad, GST_DEBUG_FUNCPTR (gst_cc_extractor_iterate_internal_links)); gst_pad_set_active (filter->captionpad, TRUE); gst_element_add_pad (GST_ELEMENT (filter), filter->captionpad); gst_flow_combiner_add_pad (filter->combiner, filter->captionpad); captionid = gst_pad_create_stream_id (filter->captionpad, (GstElement *) filter, "caption"); stream_event = gst_event_new_stream_start (captionid); g_free (captionid); /* FIXME : Create a proper stream-id */ if ((event = gst_pad_get_sticky_event (filter->srcpad, GST_EVENT_STREAM_START, 0))) { guint group_id; if (gst_event_parse_group_id (event, &group_id)) gst_event_set_group_id (stream_event, group_id); gst_event_unref (event); } gst_pad_push_event (filter->captionpad, stream_event); gst_pad_set_caps (filter->captionpad, caption_caps); gst_caps_unref (caption_caps); /* Carry over sticky events */ if ((event = gst_pad_get_sticky_event (filter->srcpad, GST_EVENT_SEGMENT, 0))) gst_pad_push_event (filter->captionpad, event); if ((event = gst_pad_get_sticky_event (filter->srcpad, GST_EVENT_TAG, 0))) gst_pad_push_event (filter->captionpad, event); filter->caption_type = meta->caption_type; } GST_DEBUG_OBJECT (filter, "Creating new buffer of size %" G_GSIZE_FORMAT " bytes", meta->size); /* Extract caption data into new buffer with identical buffer timestamps */ outbuf = gst_buffer_new_allocate (NULL, meta->size, NULL); gst_buffer_fill (outbuf, 0, meta->data, meta->size); GST_BUFFER_PTS (outbuf) = GST_BUFFER_PTS (buf); GST_BUFFER_DTS (outbuf) = GST_BUFFER_DTS (buf); GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (buf); /* We don't really care about the flow return */ flow = gst_pad_push (filter->captionpad, outbuf); out: /* Set flow return on pad and return combined value */ return gst_flow_combiner_update_pad_flow (filter->combiner, filter->captionpad, flow); }
bool ofGstVideoPlayer::createPipeline(string name){ #ifndef OF_USE_GST_GL #if GST_VERSION_MAJOR==0 GstCaps *caps; int bpp; switch(internalPixelFormat){ case OF_PIXELS_GRAY: bpp = 8; caps = gst_caps_new_simple("video/x-raw-gray", "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 8, NULL); break; case OF_PIXELS_RGB: bpp = 24; caps = gst_caps_new_simple("video/x-raw-rgb", "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 24, "endianness",G_TYPE_INT,4321, "red_mask",G_TYPE_INT,0xff0000, "green_mask",G_TYPE_INT,0x00ff00, "blue_mask",G_TYPE_INT,0x0000ff, NULL); break; case OF_PIXELS_RGBA: bpp = 32; caps = gst_caps_new_simple("video/x-raw-rgb", "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 32, "endianness",G_TYPE_INT,4321, "red_mask",G_TYPE_INT,0xff000000, "green_mask",G_TYPE_INT,0x00ff0000, "blue_mask",G_TYPE_INT,0x0000ff00, "alpha_mask",G_TYPE_INT,0x000000ff, NULL); break; case OF_PIXELS_BGRA: bpp = 32; caps = gst_caps_new_simple("video/x-raw-rgb", "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 32, "endianness",G_TYPE_INT,4321, "red_mask",G_TYPE_INT,0x0000ff00, "green_mask",G_TYPE_INT,0x00ff0000, "blue_mask",G_TYPE_INT,0xff000000, "alpha_mask",G_TYPE_INT,0x000000ff, NULL); break; default: bpp = 32; caps = gst_caps_new_simple("video/x-raw-rgb", "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 24, "endianness",G_TYPE_INT,4321, "red_mask",G_TYPE_INT,0xff0000, "green_mask",G_TYPE_INT,0x00ff00, "blue_mask",G_TYPE_INT,0x0000ff, NULL); break; } #else string mime="video/x-raw"; GstCaps *caps; if(internalPixelFormat==OF_PIXELS_NATIVE){ caps = gst_caps_from_string((mime + ",format={RGBA,BGRA,RGB,BGR,RGB16,GRAY8,YV12,I420,NV12,NV21,YUY2}").c_str()); }else{ string format = ofGstVideoUtils::getGstFormatName(internalPixelFormat); caps = gst_caps_new_simple(mime.c_str(), "format", G_TYPE_STRING, format.c_str(), NULL); } #endif #if GST_VERSION_MAJOR==0 GstElement * gstPipeline = gst_element_factory_make("playbin2","player"); #else GstElement * gstPipeline = gst_element_factory_make("playbin","player"); #endif g_object_ref_sink(gstPipeline); g_object_set(G_OBJECT(gstPipeline), "uri", name.c_str(), (void*)NULL); // create the oF appsink for video rgb without sync to clock GstElement * gstSink = gst_element_factory_make("appsink", "app_sink"); gst_app_sink_set_caps(GST_APP_SINK(gstSink), caps); gst_caps_unref(caps); if(threadAppSink){ GstElement * appQueue = gst_element_factory_make("queue","appsink_queue"); g_object_set(G_OBJECT(appQueue), "leaky", 0, "silent", 1, (void*)NULL); GstElement* appBin = gst_bin_new("app_bin"); gst_bin_add(GST_BIN(appBin), appQueue); GstPad* appQueuePad = gst_element_get_static_pad(appQueue, "sink"); GstPad* ghostPad = gst_ghost_pad_new("app_bin_sink", appQueuePad); gst_object_unref(appQueuePad); gst_element_add_pad(appBin, ghostPad); gst_bin_add(GST_BIN(appBin), gstSink); gst_element_link(appQueue, gstSink); g_object_set (G_OBJECT(gstPipeline),"video-sink",appBin,(void*)NULL); }else{ g_object_set (G_OBJECT(gstPipeline),"video-sink",gstSink,(void*)NULL); } #ifdef TARGET_WIN32 GstElement *audioSink = gst_element_factory_make("directsoundsink", NULL); g_object_set (G_OBJECT(gstPipeline),"audio-sink",audioSink,(void*)NULL); #endif return videoUtils.setPipelineWithSink(gstPipeline,gstSink,bIsStream); #else /*auto gstPipeline = gst_parse_launch(("uridecodebin uri=" + name + " ! glcolorscale name=gl_filter ! appsink name=app_sink").c_str(),NULL); auto gstSink = gst_bin_get_by_name(GST_BIN(gstPipeline),"app_sink"); auto glfilter = gst_bin_get_by_name(GST_BIN(gstPipeline),"gl_filter"); gst_app_sink_set_caps(GST_APP_SINK(gstSink), caps); gst_caps_unref(caps); glXMakeCurrent (ofGetX11Display(), None, 0); glDisplay = (GstGLDisplay *)gst_gl_display_x11_new_with_display(ofGetX11Display()); glContext = gst_gl_context_new_wrapped (glDisplay, (guintptr) ofGetGLXContext(), GST_GL_PLATFORM_GLX, GST_GL_API_OPENGL); g_object_set (G_OBJECT (glfilter), "other-context", glContext, NULL); // FIXME: this seems to be the way to add the context in 1.4.5 // // GstBus * bus = gst_pipeline_get_bus (GST_PIPELINE(gstPipeline)); // gst_bus_enable_sync_message_emission (bus); // g_signal_connect (bus, "sync-message", G_CALLBACK (sync_bus_call), this); // gst_object_unref(bus); auto ret = videoUtils.setPipelineWithSink(gstPipeline,gstSink,bIsStream); glXMakeCurrent (ofGetX11Display(), ofGetX11Window(), ofGetGLXContext()); return ret;*/ return videoUtils.setPipeline("uridecodebin uri=" + name,internalPixelFormat,bIsStream,-1,-1); //return videoUtils.setPipeline("filesrc location=" + name + " ! qtdemux ",internalPixelFormat,bIsStream,-1,-1); #endif }
static gboolean activate_session (GstRDTManager * rdtmanager, GstRDTManagerSession * session, guint32 ssrc, guint8 pt) { GstPadTemplate *templ; GstElementClass *klass; gchar *name; GstCaps *caps; GValue ret = { 0 }; GValue args[3] = { {0} , {0} , {0} }; GST_DEBUG_OBJECT (rdtmanager, "creating stream"); session->ssrc = ssrc; session->pt = pt; /* get pt map */ g_value_init (&args[0], GST_TYPE_ELEMENT); g_value_set_object (&args[0], rdtmanager); g_value_init (&args[1], G_TYPE_UINT); g_value_set_uint (&args[1], session->id); g_value_init (&args[2], G_TYPE_UINT); g_value_set_uint (&args[2], pt); g_value_init (&ret, GST_TYPE_CAPS); g_value_set_boxed (&ret, NULL); g_signal_emitv (args, gst_rdt_manager_signals[SIGNAL_REQUEST_PT_MAP], 0, &ret); g_value_unset (&args[0]); g_value_unset (&args[1]); g_value_unset (&args[2]); caps = (GstCaps *) g_value_dup_boxed (&ret); g_value_unset (&ret); if (caps) gst_rdt_manager_parse_caps (rdtmanager, session, caps); name = g_strdup_printf ("recv_rtp_src_%d_%u_%d", session->id, ssrc, pt); klass = GST_ELEMENT_GET_CLASS (rdtmanager); templ = gst_element_class_get_pad_template (klass, "recv_rtp_src_%d_%d_%d"); session->recv_rtp_src = gst_pad_new_from_template (templ, name); g_free (name); gst_pad_set_caps (session->recv_rtp_src, caps); gst_caps_unref (caps); gst_pad_set_element_private (session->recv_rtp_src, session); gst_pad_set_query_function (session->recv_rtp_src, gst_rdt_manager_query_src); gst_pad_set_activatepush_function (session->recv_rtp_src, gst_rdt_manager_src_activate_push); gst_pad_set_active (session->recv_rtp_src, TRUE); gst_element_add_pad (GST_ELEMENT_CAST (rdtmanager), session->recv_rtp_src); return TRUE; }
static GstFlowReturn gst_real_audio_demux_parse_header (GstRealAudioDemux * demux) { const guint8 *data; gchar *codec_name = NULL; GstCaps *caps = NULL; GstEvent *event; gchar *stream_id; guint avail; g_assert (demux->ra_version == 4 || demux->ra_version == 3); avail = gst_adapter_available (demux->adapter); if (avail < 16) return GST_FLOW_OK; if (!gst_real_audio_demux_get_data_offset_from_header (demux)) return GST_FLOW_ERROR; /* shouldn't happen */ GST_DEBUG_OBJECT (demux, "data_offset = %u", demux->data_offset); if (avail + 6 < demux->data_offset) { GST_DEBUG_OBJECT (demux, "Need %u bytes, but only %u available now", demux->data_offset - 6, avail); return GST_FLOW_OK; } data = gst_adapter_map (demux->adapter, demux->data_offset - 6); g_assert (data); switch (demux->ra_version) { case 3: demux->fourcc = GST_RM_AUD_14_4; demux->packet_size = 20; demux->sample_rate = 8000; demux->channels = 1; demux->sample_width = 16; demux->flavour = 1; demux->leaf_size = 0; demux->height = 0; break; case 4: demux->flavour = GST_READ_UINT16_BE (data + 16); /* demux->frame_size = GST_READ_UINT32_BE (data + 36); */ demux->leaf_size = GST_READ_UINT16_BE (data + 38); demux->height = GST_READ_UINT16_BE (data + 34); demux->packet_size = GST_READ_UINT32_BE (data + 18); demux->sample_rate = GST_READ_UINT16_BE (data + 42); demux->sample_width = GST_READ_UINT16_BE (data + 46); demux->channels = GST_READ_UINT16_BE (data + 48); demux->fourcc = GST_READ_UINT32_LE (data + 56); demux->pending_tags = gst_rm_utils_read_tags (data + 63, demux->data_offset - 63, gst_rm_utils_read_string8); if (demux->pending_tags) gst_tag_list_set_scope (demux->pending_tags, GST_TAG_SCOPE_GLOBAL); break; default: g_assert_not_reached (); #if 0 case 5: demux->flavour = GST_READ_UINT16_BE (data + 16); /* demux->frame_size = GST_READ_UINT32_BE (data + 36); */ demux->leaf_size = GST_READ_UINT16_BE (data + 38); demux->height = GST_READ_UINT16_BE (data + 34); demux->sample_rate = GST_READ_UINT16_BE (data + 48); demux->sample_width = GST_READ_UINT16_BE (data + 52); demux->n_channels = GST_READ_UINT16_BE (data + 54); demux->fourcc = RMDEMUX_FOURCC_GET (data + 60); break; #endif } GST_INFO_OBJECT (demux, "packet_size = %u", demux->packet_size); GST_INFO_OBJECT (demux, "sample_rate = %u", demux->sample_rate); GST_INFO_OBJECT (demux, "sample_width = %u", demux->sample_width); GST_INFO_OBJECT (demux, "channels = %u", demux->channels); GST_INFO_OBJECT (demux, "fourcc = '%" GST_FOURCC_FORMAT "' (%08X)", GST_FOURCC_ARGS (demux->fourcc), demux->fourcc); switch (demux->fourcc) { case GST_RM_AUD_14_4: caps = gst_caps_new_simple ("audio/x-pn-realaudio", "raversion", G_TYPE_INT, 1, NULL); demux->byterate_num = 1000; demux->byterate_denom = 1; break; case GST_RM_AUD_28_8: /* FIXME: needs descrambling */ caps = gst_caps_new_simple ("audio/x-pn-realaudio", "raversion", G_TYPE_INT, 2, NULL); break; case GST_RM_AUD_DNET: caps = gst_caps_new_simple ("audio/x-ac3", "rate", G_TYPE_INT, demux->sample_rate, NULL); if (demux->packet_size == 0 || demux->sample_rate == 0) goto broken_file; demux->byterate_num = demux->packet_size * demux->sample_rate; demux->byterate_denom = 1536; break; /* Sipro/ACELP.NET Voice Codec (MIME unknown) */ case GST_RM_AUD_SIPR: caps = gst_caps_new_empty_simple ("audio/x-sipro"); break; default: GST_WARNING_OBJECT (demux, "unknown fourcc %08X", demux->fourcc); break; } if (caps == NULL) goto unknown_fourcc; gst_caps_set_simple (caps, "flavor", G_TYPE_INT, demux->flavour, "rate", G_TYPE_INT, demux->sample_rate, "channels", G_TYPE_INT, demux->channels, "width", G_TYPE_INT, demux->sample_width, "leaf_size", G_TYPE_INT, demux->leaf_size, "packet_size", G_TYPE_INT, demux->packet_size, "height", G_TYPE_INT, demux->height, NULL); GST_INFO_OBJECT (demux, "Adding source pad, caps %" GST_PTR_FORMAT, caps); demux->srcpad = gst_pad_new_from_static_template (&src_template, "src"); gst_pad_set_event_function (demux->srcpad, GST_DEBUG_FUNCPTR (gst_real_audio_demux_src_event)); gst_pad_set_query_function (demux->srcpad, GST_DEBUG_FUNCPTR (gst_real_audio_demux_src_query)); gst_pad_set_active (demux->srcpad, TRUE); gst_pad_use_fixed_caps (demux->srcpad); stream_id = gst_pad_create_stream_id (demux->srcpad, GST_ELEMENT_CAST (demux), NULL); event = gst_pad_get_sticky_event (demux->sinkpad, GST_EVENT_STREAM_START, 0); if (event) { if (gst_event_parse_group_id (event, &demux->group_id)) demux->have_group_id = TRUE; else demux->have_group_id = FALSE; gst_event_unref (event); } else if (!demux->have_group_id) { demux->have_group_id = TRUE; demux->group_id = gst_util_group_id_next (); } event = gst_event_new_stream_start (stream_id); if (demux->have_group_id) gst_event_set_group_id (event, demux->group_id); gst_pad_push_event (demux->srcpad, event); g_free (stream_id); gst_pad_set_caps (demux->srcpad, caps); codec_name = gst_pb_utils_get_codec_description (caps); gst_caps_unref (caps); gst_element_add_pad (GST_ELEMENT (demux), demux->srcpad); if (demux->byterate_num > 0 && demux->byterate_denom > 0) { GstFormat bformat = GST_FORMAT_BYTES; gint64 size_bytes = 0; GST_INFO_OBJECT (demux, "byte rate = %u/%u = %u bytes/sec", demux->byterate_num, demux->byterate_denom, demux->byterate_num / demux->byterate_denom); if (gst_pad_peer_query_duration (demux->sinkpad, bformat, &size_bytes)) { demux->duration = gst_real_demux_get_timestamp_from_offset (demux, size_bytes); demux->upstream_size = size_bytes; GST_INFO_OBJECT (demux, "upstream_size = %" G_GUINT64_FORMAT, demux->upstream_size); GST_INFO_OBJECT (demux, "duration = %" GST_TIME_FORMAT, GST_TIME_ARGS (demux->duration)); } } demux->need_newsegment = TRUE; if (codec_name) { if (demux->pending_tags == NULL) { demux->pending_tags = gst_tag_list_new_empty (); gst_tag_list_set_scope (demux->pending_tags, GST_TAG_SCOPE_GLOBAL); } gst_tag_list_add (demux->pending_tags, GST_TAG_MERGE_REPLACE, GST_TAG_AUDIO_CODEC, codec_name, NULL); g_free (codec_name); } gst_adapter_unmap (demux->adapter); gst_adapter_flush (demux->adapter, demux->data_offset - 6); demux->state = REAL_AUDIO_DEMUX_STATE_DATA; demux->need_newsegment = TRUE; return GST_FLOW_OK; /* ERRORS */ unknown_fourcc: { GST_ELEMENT_ERROR (GST_ELEMENT (demux), STREAM, DECODE, (NULL), ("Unknown fourcc '0x%" G_GINT32_MODIFIER "x'", demux->fourcc)); return GST_FLOW_ERROR; } broken_file: { GST_ELEMENT_ERROR (GST_ELEMENT (demux), STREAM, DECODE, (NULL), ("Broken file - invalid sample_rate or other header value")); return GST_FLOW_ERROR; } }
bool ofGstVideoPlayer::loadMovie(string name){ close(); if( name.find( "file://",0 ) != string::npos){ bIsStream = false; }else if( name.find( "://",0 ) == string::npos){ name = "file:///"+ofToDataPath(name,true); bIsStream = false; }else{ bIsStream = true; } ofLog(OF_LOG_VERBOSE,"loading "+name); ofGstUtils::startGstMainLoop(); #if GST_VERSION_MAJOR==0 GstElement * gstPipeline = gst_element_factory_make("playbin2","player"); #else GstElement * gstPipeline = gst_element_factory_make("playbin","player"); #endif g_object_set(G_OBJECT(gstPipeline), "uri", name.c_str(), (void*)NULL); // create the oF appsink for video rgb without sync to clock GstElement * gstSink = gst_element_factory_make("appsink", "app_sink"); gst_base_sink_set_sync(GST_BASE_SINK(gstSink), true); gst_app_sink_set_max_buffers(GST_APP_SINK(gstSink), 8); gst_app_sink_set_drop (GST_APP_SINK(gstSink),true); gst_base_sink_set_max_lateness (GST_BASE_SINK(gstSink), -1); #if GST_VERSION_MAJOR==0 int bpp; string mime; switch(internalPixelFormat){ case OF_PIXELS_MONO: mime = "video/x-raw-gray"; bpp = 8; break; case OF_PIXELS_RGB: mime = "video/x-raw-rgb"; bpp = 24; break; case OF_PIXELS_RGBA: case OF_PIXELS_BGRA: mime = "video/x-raw-rgb"; bpp = 32; break; default: mime = "video/x-raw-rgb"; bpp=24; break; } GstCaps *caps = gst_caps_new_simple(mime.c_str(), "bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 24, "endianness",G_TYPE_INT,4321, "red_mask",G_TYPE_INT,0xff0000, "green_mask",G_TYPE_INT,0x00ff00, "blue_mask",G_TYPE_INT,0x0000ff, "alpha_mask",G_TYPE_INT,0x000000ff, NULL); #else int bpp; string mime="video/x-raw"; string format; switch(internalPixelFormat){ case OF_PIXELS_MONO: format = "GRAY8"; bpp = 8; break; case OF_PIXELS_RGB: format = "RGB"; bpp = 24; break; case OF_PIXELS_RGBA: format = "RGBA"; bpp = 32; break; case OF_PIXELS_BGRA: format = "BGRA"; bpp = 32; break; default: format = "RGB"; bpp=24; break; } GstCaps *caps = gst_caps_new_simple(mime.c_str(), "format", G_TYPE_STRING, format.c_str(), /*"bpp", G_TYPE_INT, bpp, "depth", G_TYPE_INT, 24, "endianness",G_TYPE_INT,4321, "red_mask",G_TYPE_INT,0xff0000, "green_mask",G_TYPE_INT,0x00ff00, "blue_mask",G_TYPE_INT,0x0000ff, "alpha_mask",G_TYPE_INT,0x000000ff,*/ NULL); #endif gst_app_sink_set_caps(GST_APP_SINK(gstSink), caps); gst_caps_unref(caps); if(threadAppSink){ GstElement * appQueue = gst_element_factory_make("queue","appsink_queue"); g_object_set(G_OBJECT(appQueue), "leaky", 0, "silent", 1, (void*)NULL); GstElement* appBin = gst_bin_new("app_bin"); gst_bin_add(GST_BIN(appBin), appQueue); GstPad* appQueuePad = gst_element_get_static_pad(appQueue, "sink"); GstPad* ghostPad = gst_ghost_pad_new("app_bin_sink", appQueuePad); gst_object_unref(appQueuePad); gst_element_add_pad(appBin, ghostPad); gst_bin_add_many(GST_BIN(appBin), gstSink, NULL); gst_element_link_many(appQueue, gstSink, NULL); g_object_set (G_OBJECT(gstPipeline),"video-sink",appBin,(void*)NULL); }else{ g_object_set (G_OBJECT(gstPipeline),"video-sink",gstSink,(void*)NULL); } #ifdef TARGET_WIN32 GstElement *audioSink = gst_element_factory_make("directsoundsink", NULL); g_object_set (G_OBJECT(gstPipeline),"audio-sink",audioSink,(void*)NULL); #endif videoUtils.setPipelineWithSink(gstPipeline,gstSink,bIsStream); if(!bIsStream) return allocate(bpp); else return true; }
GstElement * purple_media_manager_get_element(PurpleMediaManager *manager, PurpleMediaSessionType type, PurpleMedia *media, const gchar *session_id, const gchar *participant) { #ifdef USE_VV GstElement *ret = NULL; PurpleMediaElementInfo *info = NULL; PurpleMediaElementType element_type; if (type & PURPLE_MEDIA_SEND_AUDIO) info = manager->priv->audio_src; else if (type & PURPLE_MEDIA_RECV_AUDIO) info = manager->priv->audio_sink; else if (type & PURPLE_MEDIA_SEND_VIDEO) info = manager->priv->video_src; else if (type & PURPLE_MEDIA_RECV_VIDEO) info = manager->priv->video_sink; if (info == NULL) return NULL; element_type = purple_media_element_info_get_element_type(info); if (element_type & PURPLE_MEDIA_ELEMENT_UNIQUE && element_type & PURPLE_MEDIA_ELEMENT_SRC) { GstElement *tee; GstPad *pad; GstPad *ghost; gchar *id = purple_media_element_info_get_id(info); ret = gst_bin_get_by_name(GST_BIN( purple_media_manager_get_pipeline( manager)), id); if (ret == NULL) { GstElement *bin, *fakesink; ret = purple_media_element_info_call_create(info, media, session_id, participant); bin = gst_bin_new(id); tee = gst_element_factory_make("tee", "tee"); gst_bin_add_many(GST_BIN(bin), ret, tee, NULL); gst_element_link(ret, tee); /* * This shouldn't be necessary, but it stops it from * giving a not-linked error upon destruction */ fakesink = gst_element_factory_make("fakesink", NULL); g_object_set(fakesink, "sync", FALSE, NULL); gst_bin_add(GST_BIN(bin), fakesink); gst_element_link(tee, fakesink); ret = bin; gst_object_ref(ret); gst_bin_add(GST_BIN(purple_media_manager_get_pipeline( manager)), ret); } g_free(id); tee = gst_bin_get_by_name(GST_BIN(ret), "tee"); pad = gst_element_get_request_pad(tee, "src%d"); gst_object_unref(tee); ghost = gst_ghost_pad_new(NULL, pad); gst_object_unref(pad); g_signal_connect(GST_PAD(ghost), "unlinked", G_CALLBACK(request_pad_unlinked_cb), NULL); gst_pad_set_active(ghost, TRUE); gst_element_add_pad(ret, ghost); } else { ret = purple_media_element_info_call_create(info, media, session_id, participant); } if (ret == NULL) purple_debug_error("media", "Error creating source or sink\n"); return ret; #else return NULL; #endif }
bool Player::prepare() { //Init Gst // QString caps_value = "audio/x-raw-int"; // On mac we bundle the gstreamer plugins with knowthelist #if defined(Q_OS_DARWIN) QString scanner_path; QString plugin_path; QString registry_filename; caps_value = "audio/x-raw-float"; QDir pd(QCoreApplication::applicationDirPath() + "/../plugins"); scanner_path = QCoreApplication::applicationDirPath() + "/../plugins/gst-plugin-scanner"; plugin_path = QCoreApplication::applicationDirPath() + "/../plugins/gstreamer"; registry_filename = QDesktopServices::storageLocation(QDesktopServices::DataLocation) + QString("/gst-registry-%1-bin").arg(QCoreApplication::applicationVersion()); if ( pd.exists()) setenv("GST_PLUGIN_SCANNER", scanner_path.toLocal8Bit().constData(), 1); if ( pd.exists()) { setenv("GST_PLUGIN_PATH", plugin_path.toLocal8Bit().constData(), 1); // Never load plugins from anywhere else. setenv("GST_PLUGIN_SYSTEM_PATH", plugin_path.toLocal8Bit().constData(), 1); } if (!registry_filename.isEmpty()) { setenv("GST_REGISTRY", registry_filename.toLocal8Bit().constData(), 1); } #endif gst_init (0, 0); //prepare GstElement *dec, *conv,*resample,*sink, *gain, *audio, *vol, *level, *equalizer; GstElement *levelout; GstPad *audiopad; GstCaps *caps; pipeline = gst_pipeline_new ("pipeline"); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); caps = gst_caps_new_simple (caps_value.toLatin1().data(), "channels", G_TYPE_INT, 2, NULL); dec = gst_element_factory_make ("decodebin2", "decoder"); g_signal_connect (dec, "new-decoded-pad", G_CALLBACK (cb_newpad), this); gst_bin_add (GST_BIN (pipeline), dec); audio = gst_bin_new ("audiobin"); conv = gst_element_factory_make ("audioconvert", "aconv"); resample = gst_element_factory_make ("audioresample", "resample"); audiopad = gst_element_get_static_pad (conv, "sink"); gain = gst_element_factory_make ("audioamplify", "gain"); level = gst_element_factory_make ("level", "levelintern"); vol = gst_element_factory_make ("volume", "volume"); levelout = gst_element_factory_make ("level", "levelout"); equalizer = gst_element_factory_make ("equalizer-3bands", "equalizer"); sink = gst_element_factory_make ("autoaudiosink", "sink"); g_object_set (level, "message", TRUE, NULL); g_object_set (levelout, "message", TRUE, NULL); g_object_set (level, "peak-ttl", 300000000000, NULL); gst_bin_add_many (GST_BIN (audio), conv, resample, level, gain, equalizer, levelout, vol, sink, NULL); gst_element_link (conv,resample); gst_element_link_filtered (resample, level, caps); gst_element_link (level, gain); gst_element_link (gain, equalizer); gst_element_link (equalizer, vol); gst_element_link_filtered (vol, levelout, caps); gst_element_link (levelout,sink); gst_element_add_pad (audio, gst_ghost_pad_new ("sink", audiopad)); gst_bin_add (GST_BIN (pipeline), audio); GstElement *l_src; l_src = gst_element_factory_make ("filesrc", "localsrc"); gst_bin_add_many (GST_BIN (pipeline), l_src, NULL); gst_element_set_state (l_src, GST_STATE_NULL); gst_element_link ( l_src,dec); gst_bus_set_sync_handler (bus, bus_cb, this); gst_object_unref (audiopad); return pipeline; }
static GstPad * find_or_create_demux_pad_for_ssrc (GstRtpSsrcDemux * demux, guint32 ssrc, PadType padtype) { GstPad *rtp_pad, *rtcp_pad; GstElementClass *klass; GstPadTemplate *templ; gchar *padname; GstRtpSsrcDemuxPad *demuxpad; GstPad *retpad; gulong rtp_block, rtcp_block; GST_PAD_LOCK (demux); demuxpad = find_demux_pad_for_ssrc (demux, ssrc); if (demuxpad != NULL) { gboolean forward = FALSE; switch (padtype) { case RTP_PAD: retpad = gst_object_ref (demuxpad->rtp_pad); if (!demuxpad->pushed_initial_rtp_events) { forward = TRUE; demuxpad->pushed_initial_rtp_events = TRUE; } break; case RTCP_PAD: retpad = gst_object_ref (demuxpad->rtcp_pad); if (!demuxpad->pushed_initial_rtcp_events) { forward = TRUE; demuxpad->pushed_initial_rtcp_events = TRUE; } break; default: retpad = NULL; g_assert_not_reached (); } GST_PAD_UNLOCK (demux); if (forward) forward_initial_events (demux, ssrc, retpad, padtype); return retpad; } GST_DEBUG_OBJECT (demux, "creating new pad for SSRC %08x", ssrc); klass = GST_ELEMENT_GET_CLASS (demux); templ = gst_element_class_get_pad_template (klass, "src_%u"); padname = g_strdup_printf ("src_%u", ssrc); rtp_pad = gst_pad_new_from_template (templ, padname); g_free (padname); templ = gst_element_class_get_pad_template (klass, "rtcp_src_%u"); padname = g_strdup_printf ("rtcp_src_%u", ssrc); rtcp_pad = gst_pad_new_from_template (templ, padname); g_free (padname); /* wrap in structure and add to list */ demuxpad = g_new0 (GstRtpSsrcDemuxPad, 1); demuxpad->ssrc = ssrc; demuxpad->rtp_pad = rtp_pad; demuxpad->rtcp_pad = rtcp_pad; gst_pad_set_element_private (rtp_pad, demuxpad); gst_pad_set_element_private (rtcp_pad, demuxpad); demux->srcpads = g_slist_prepend (demux->srcpads, demuxpad); gst_pad_set_query_function (rtp_pad, gst_rtp_ssrc_demux_src_query); gst_pad_set_iterate_internal_links_function (rtp_pad, gst_rtp_ssrc_demux_iterate_internal_links_src); gst_pad_set_event_function (rtp_pad, gst_rtp_ssrc_demux_src_event); gst_pad_use_fixed_caps (rtp_pad); gst_pad_set_active (rtp_pad, TRUE); gst_pad_set_event_function (rtcp_pad, gst_rtp_ssrc_demux_src_event); gst_pad_set_iterate_internal_links_function (rtcp_pad, gst_rtp_ssrc_demux_iterate_internal_links_src); gst_pad_use_fixed_caps (rtcp_pad); gst_pad_set_active (rtcp_pad, TRUE); if (padtype == RTP_PAD) { demuxpad->pushed_initial_rtp_events = TRUE; forward_initial_events (demux, ssrc, rtp_pad, padtype); } else if (padtype == RTCP_PAD) { demuxpad->pushed_initial_rtcp_events = TRUE; forward_initial_events (demux, ssrc, rtcp_pad, padtype); } else { g_assert_not_reached (); } gst_element_add_pad (GST_ELEMENT_CAST (demux), rtp_pad); gst_element_add_pad (GST_ELEMENT_CAST (demux), rtcp_pad); switch (padtype) { case RTP_PAD: retpad = gst_object_ref (demuxpad->rtp_pad); break; case RTCP_PAD: retpad = gst_object_ref (demuxpad->rtcp_pad); break; default: retpad = NULL; g_assert_not_reached (); } gst_object_ref (rtp_pad); gst_object_ref (rtcp_pad); rtp_block = gst_pad_add_probe (rtp_pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM, NULL, NULL, NULL); rtcp_block = gst_pad_add_probe (rtcp_pad, GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM, NULL, NULL, NULL); GST_PAD_UNLOCK (demux); g_signal_emit (G_OBJECT (demux), gst_rtp_ssrc_demux_signals[SIGNAL_NEW_SSRC_PAD], 0, ssrc, rtp_pad); gst_pad_remove_probe (rtp_pad, rtp_block); gst_pad_remove_probe (rtcp_pad, rtcp_block); gst_object_unref (rtp_pad); gst_object_unref (rtcp_pad); return retpad; }
static GstPad * gst_live_adder_request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * unused) { gchar *name; GstLiveAdder *adder; GstPad *newpad; gint padcount; GstLiveAdderPadPrivate *padprivate = NULL; if (templ->direction != GST_PAD_SINK) goto not_sink; adder = GST_LIVE_ADDER (element); /* increment pad counter */ padcount = g_atomic_int_exchange_and_add (&adder->padcount, 1); name = g_strdup_printf ("sink%d", padcount); newpad = gst_pad_new_from_template (templ, name); GST_DEBUG_OBJECT (adder, "request new pad %s", name); g_free (name); gst_pad_set_getcaps_function (newpad, GST_DEBUG_FUNCPTR (gst_live_adder_sink_getcaps)); gst_pad_set_setcaps_function (newpad, GST_DEBUG_FUNCPTR (gst_live_adder_setcaps)); gst_pad_set_event_function (newpad, GST_DEBUG_FUNCPTR (gst_live_adder_sink_event)); padprivate = g_new0 (GstLiveAdderPadPrivate, 1); gst_segment_init (&padprivate->segment, GST_FORMAT_UNDEFINED); padprivate->eos = FALSE; padprivate->expected_timestamp = GST_CLOCK_TIME_NONE; gst_pad_set_element_private (newpad, padprivate); gst_pad_set_chain_function (newpad, gst_live_live_adder_chain); if (!gst_pad_set_active (newpad, TRUE)) goto could_not_activate; /* takes ownership of the pad */ if (!gst_element_add_pad (GST_ELEMENT (adder), newpad)) goto could_not_add; GST_OBJECT_LOCK (adder); adder->sinkpads = g_list_prepend (adder->sinkpads, newpad); GST_OBJECT_UNLOCK (adder); return newpad; /* errors */ not_sink: { g_warning ("gstadder: request new pad that is not a SINK pad\n"); return NULL; } could_not_add: { GST_DEBUG_OBJECT (adder, "could not add pad"); g_free (padprivate); gst_object_unref (newpad); return NULL; } could_not_activate: { GST_DEBUG_OBJECT (adder, "could not activate new pad"); g_free (padprivate); gst_object_unref (newpad); return NULL; } }
GstElement * ges_source_create_topbin (const gchar * bin_name, GstElement * sub_element, ...) { va_list argp; GstElement *element; GstElement *prev = NULL; GstElement *first = NULL; GstElement *bin; GstPad *sub_srcpad; va_start (argp, sub_element); bin = gst_bin_new (bin_name); gst_bin_add (GST_BIN (bin), sub_element); while ((element = va_arg (argp, GstElement *)) != NULL) { gst_bin_add (GST_BIN (bin), element); if (prev) gst_element_link (prev, element); prev = element; if (first == NULL) first = element; } va_end (argp); sub_srcpad = gst_element_get_static_pad (sub_element, "src"); if (prev != NULL) { GstPad *srcpad, *sinkpad, *ghost; srcpad = gst_element_get_static_pad (prev, "src"); ghost = gst_ghost_pad_new ("src", srcpad); gst_pad_set_active (ghost, TRUE); gst_element_add_pad (bin, ghost); sinkpad = gst_element_get_static_pad (first, "sink"); if (sub_srcpad) gst_pad_link (sub_srcpad, sinkpad); else g_signal_connect (sub_element, "pad-added", G_CALLBACK (_pad_added_cb), sinkpad); gst_object_unref (srcpad); gst_object_unref (sinkpad); } else if (sub_srcpad) { GstPad *ghost; ghost = gst_ghost_pad_new ("src", sub_srcpad); gst_pad_set_active (ghost, TRUE); gst_element_add_pad (bin, ghost); } else { g_signal_connect (sub_element, "pad-added", G_CALLBACK (_ghost_pad_added_cb), bin); } if (sub_srcpad) gst_object_unref (sub_srcpad); return bin; }
/* with PAD_LOCK */ static GstRtpSsrcDemuxPad * create_demux_pad_for_ssrc (GstRtpSsrcDemux * demux, guint32 ssrc, GstClockTime timestamp) { GstPad *rtp_pad, *rtcp_pad; GstElementClass *klass; GstPadTemplate *templ; gchar *padname; GstRtpSsrcDemuxPad *demuxpad; GST_DEBUG_OBJECT (demux, "creating pad for SSRC %08x", ssrc); klass = GST_ELEMENT_GET_CLASS (demux); templ = gst_element_class_get_pad_template (klass, "src_%d"); padname = g_strdup_printf ("src_%d", ssrc); rtp_pad = gst_pad_new_from_template (templ, padname); g_free (padname); templ = gst_element_class_get_pad_template (klass, "rtcp_src_%d"); padname = g_strdup_printf ("rtcp_src_%d", ssrc); rtcp_pad = gst_pad_new_from_template (templ, padname); g_free (padname); /* we use the first timestamp received to calculate the difference between * timestamps on all streams */ GST_DEBUG_OBJECT (demux, "SSRC %08x, first timestamp %" GST_TIME_FORMAT, ssrc, GST_TIME_ARGS (timestamp)); /* wrap in structure and add to list */ demuxpad = g_new0 (GstRtpSsrcDemuxPad, 1); demuxpad->ssrc = ssrc; demuxpad->rtp_pad = rtp_pad; demuxpad->rtcp_pad = rtcp_pad; GST_DEBUG_OBJECT (demux, "first timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); gst_pad_set_element_private (rtp_pad, demuxpad); gst_pad_set_element_private (rtcp_pad, demuxpad); demux->srcpads = g_slist_prepend (demux->srcpads, demuxpad); /* copy caps from input */ gst_pad_set_caps (rtp_pad, GST_PAD_CAPS (demux->rtp_sink)); gst_pad_use_fixed_caps (rtp_pad); gst_pad_set_caps (rtcp_pad, GST_PAD_CAPS (demux->rtcp_sink)); gst_pad_use_fixed_caps (rtcp_pad); gst_pad_set_event_function (rtp_pad, gst_rtp_ssrc_demux_src_event); gst_pad_set_query_function (rtp_pad, gst_rtp_ssrc_demux_src_query); gst_pad_set_iterate_internal_links_function (rtp_pad, gst_rtp_ssrc_demux_iterate_internal_links); gst_pad_set_active (rtp_pad, TRUE); gst_pad_set_event_function (rtcp_pad, gst_rtp_ssrc_demux_src_event); gst_pad_set_iterate_internal_links_function (rtcp_pad, gst_rtp_ssrc_demux_iterate_internal_links); gst_pad_set_active (rtcp_pad, TRUE); gst_element_add_pad (GST_ELEMENT_CAST (demux), rtp_pad); gst_element_add_pad (GST_ELEMENT_CAST (demux), rtcp_pad); g_signal_emit (G_OBJECT (demux), gst_rtp_ssrc_demux_signals[SIGNAL_NEW_SSRC_PAD], 0, ssrc, rtp_pad); return demuxpad; }
GstElement *create_video_sink() { GstElement *bin, *decoder = NULL; GstIterator *iter; GstIteratorResult res; GError *error = NULL; GstPad *pad; gpointer element = NULL; const char* decoder_name; #ifndef DESKTOP /* create pipeline */ decoder_name = "tividdec20"; bin = gst_parse_launch_full("TIViddec2 genTimeStamps=FALSE \ engineName=decode \ codecName=h264dec numFrames=-1 \ ! videoscale method=0 \ ! video/x-raw-yuv, format=(fourcc)I420, width=320, height=240 \ ! ffmpegcolorspace \ ! video/x-raw-rgb, bpp=16 \ ! TIDmaiVideoSink displayStd=fbdev displayDevice=/dev/fb0 videoStd=QVGA \ videoOutput=LCD resizer=FALSE accelFrameCopy=TRUE", NULL, 0, &error); #else decoder_name = "decodebin"; bin = gst_parse_launch_full("decodebin \ ! videoscale method=0 \ ! video/x-raw-yuv, format=(fourcc)I420, width=320, height=240 \ ! xvimagesink", NULL, 0, &error); #endif if (!bin) { g_error("GStreamer: failed to parse video sink pipeline\n"); return NULL; } gst_object_set_name(GST_OBJECT(bin), "video-sink"); iter = gst_bin_iterate_elements(GST_BIN(bin)); res = gst_iterator_next (iter, &element); while (res == GST_ITERATOR_OK) { gchar *name; name = gst_object_get_name(GST_OBJECT (element)); if (name) { if (!strncmp(name, decoder_name, strlen(decoder_name))) { decoder = GST_ELEMENT(element); } g_printf("GS: video sink element: %s \n", name); g_free (name); } gst_object_unref (element); element = NULL; res = gst_iterator_next (iter, &element); } gst_iterator_free (iter); if (!decoder) { /* mem leak */ g_printf("decoder element not found\n"); return NULL; } /* add ghostpad */ pad = gst_element_get_static_pad (decoder, "sink"); gst_element_add_pad(bin, gst_ghost_pad_new("sink", pad)); gst_object_unref(GST_OBJECT(pad)); return bin; }
int main(int argc, char *argv[]) { GstElement *pipeline, *bin, *effect_element, *convert, *sink; GstPad *pad, *ghost_pad; char *pipeline_str; GIOChannel *io_stdin = g_io_channel_unix_new(fileno(stdin)); CustomData data; GstStateChangeReturn ret; gboolean list_effects = FALSE; gchar *effect_name = NULL; GError *error = NULL; GstPlugin *gaudiplugin; gchar *props_str = NULL; GOptionContext *context; GOptionEntry options[] = { { "list-effects", 'l', 0, G_OPTION_ARG_NONE, &list_effects, "list available effects and exits", NULL }, { "effect", 'e', 0, G_OPTION_ARG_STRING, &effect_name, "set the desired effect", NULL }, { "props", 'p', 0, G_OPTION_ARG_STRING, &props_str, "for property setting (-p \"silent,bool,true;adjustement,uint,150\")", NULL }, { NULL } }; setlocale(LC_ALL, "fr_FR.utf8"); gst_init(&argc, &argv); gaudiplugin = gst_registry_find_plugin(GET_PLUGIN_REGISTRY, "gaudieffects"); if (gaudiplugin == NULL) { g_print("Pas de plugin “gaudieffects” trouvé !! :(\n"); return -1; } context = g_option_context_new(""); g_option_context_add_main_entries(context, options, ""); if (!g_option_context_parse(context, &argc, &argv, &error)) { g_print("option parsing failed: %s\n", error->message); return -1; } g_option_context_free(context); if (list_effects == TRUE) return list_gaudieffects_features(); if (argc > 1) { if (g_str_has_prefix(argv[1], "http://") || g_str_has_prefix(argv[1], "ftp://")) pipeline_str = g_strdup_printf("%s uri=\"%s\"", PLAYBIN, argv[1]); else if (argv[1][0] == '~') pipeline_str = g_strdup_printf("%s uri=\"file://%s%s\"", PLAYBIN, g_get_home_dir(), argv[1]+1); else if (g_file_test(argv[1], G_FILE_TEST_IS_REGULAR)) pipeline_str = g_strdup_printf("playbin uri=\"file://%s\"", argv[1]); else pipeline_str = g_strdup_printf("%s uri=%s", PLAYBIN, DEFAULT_URI); } else pipeline_str = g_strdup_printf("%s uri=%s", PLAYBIN, DEFAULT_URI); g_io_add_watch(io_stdin, G_IO_IN, (GIOFunc)handle_keyboard, &data); pipeline = gst_parse_launch(pipeline_str, NULL); if (gst_plugin_is_loaded(gaudiplugin) == FALSE) gst_plugin_load(gaudiplugin); if (effect_name == NULL) effect_name = "solarize"; effect_element = gst_element_factory_make(effect_name, effect_name); convert = gst_element_factory_make("videoconvert", "convert"); sink = gst_element_factory_make("autovideosink", "video_sink"); if (!effect_element || !convert || !sink) { g_printerr("Not all elements could be created.\n"); return -1; } bin = gst_bin_new("video_sink_bin"); gst_bin_add_many(GST_BIN(bin), effect_element, convert, sink, NULL); gst_element_link_many(effect_element, convert, sink, NULL); pad = gst_element_get_static_pad(effect_element, "sink"); ghost_pad = gst_ghost_pad_new("sink", pad); gst_pad_set_active(ghost_pad, TRUE); gst_element_add_pad(bin, ghost_pad); gst_object_unref(pad); g_object_set(GST_OBJECT(pipeline), "video-sink", bin, NULL); if (props_str != NULL) set_props(effect_element, props_str); ret = gst_element_set_state(pipeline, GST_STATE_PLAYING); if (ret == GST_STATE_CHANGE_FAILURE) { g_printerr("Unable to set the pipeline to the playing state.\n"); gst_object_unref(pipeline); return -1; } data.loop = g_main_loop_new(NULL, FALSE); g_main_loop_run(data.loop); g_io_channel_unref(io_stdin); gst_element_set_state(pipeline, GST_STATE_NULL); gst_object_unref(pipeline); return 0; }
static GstElement * fs_rtp_dtmf_sound_source_build (FsRtpSpecialSource *source, GList *negotiated_codecs, FsCodec *selected_codec, GError **error) { FsCodec *telephony_codec = NULL; GstCaps *caps = NULL; GstPad *pad = NULL; GstElement *dtmfsrc = NULL; GstElement *capsfilter = NULL; GstPad *ghostpad = NULL; GstElement *bin = NULL; GstElement *encoder = NULL; GstElement *payloader = NULL; gchar *encoder_name = NULL; gchar *payloader_name = NULL; telephony_codec = get_pcm_law_sound_codec (negotiated_codecs, &encoder_name, &payloader_name); if (!telephony_codec) { g_set_error (error, FS_ERROR, FS_ERROR_INTERNAL, "Could not find a pcma/pcmu to send dtmf on"); return NULL; } bin = gst_bin_new (NULL); dtmfsrc = gst_element_factory_make ("dtmfsrc", NULL); if (!dtmfsrc) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not make rtpdtmfsrc"); goto error; } if (!gst_bin_add (GST_BIN (bin), dtmfsrc)) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not add rtpdtmfsrc to bin"); gst_object_unref (dtmfsrc); goto error; } encoder = gst_element_factory_make (encoder_name, NULL); if (!encoder) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not make %s", encoder_name); goto error; } if (!gst_bin_add (GST_BIN (bin), encoder)) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not add %s to bin", encoder_name); gst_object_unref (dtmfsrc); goto error; } if (!gst_element_link_pads (dtmfsrc, "src", encoder, "sink")) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not link the rtpdtmfsrc and %s", encoder_name); goto error; } payloader = gst_element_factory_make (payloader_name, NULL); if (!payloader) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not make %s", payloader_name); goto error; } if (!gst_bin_add (GST_BIN (bin), payloader)) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not add %s to bin", payloader_name); gst_object_unref (dtmfsrc); goto error; } if (!gst_element_link_pads (encoder, "src", payloader, "sink")) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not link the %s and %s", encoder_name, payloader_name); goto error; } capsfilter = gst_element_factory_make ("capsfilter", NULL); if (!capsfilter) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not make capsfilter"); goto error; } if (!gst_bin_add (GST_BIN (bin), capsfilter)) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not add capsfilter to bin"); gst_object_unref (capsfilter); goto error; } caps = fs_codec_to_gst_caps (telephony_codec); g_object_set (capsfilter, "caps", caps, NULL); { gchar *str = gst_caps_to_string (caps); GST_DEBUG ("Using caps %s for dtmf", str); g_free (str); } gst_caps_unref (caps); if (!gst_element_link_pads (payloader, "src", capsfilter, "sink")) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not link the %s and its capsfilter", payloader_name); goto error; } pad = gst_element_get_static_pad (capsfilter, "src"); if (!pad) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not get \"src\" pad from capsfilter"); goto error; } ghostpad = gst_ghost_pad_new ("src", pad); if (!ghostpad) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not create a ghostpad for capsfilter src pad for dtmfsrc"); goto error; } if (!gst_element_add_pad (bin, ghostpad)) { g_set_error (error, FS_ERROR, FS_ERROR_CONSTRUCTION, "Could not get \"src\" ghostpad to dtmf sound source bin"); gst_object_unref (pad); goto error; } gst_object_unref (pad); return bin; error: gst_object_unref (bin); return NULL; }
/**************************************************** * GstElement vmetods * ****************************************************/ static GstPad * _request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * name, const GstCaps * caps) { GstPad *audioresample_srcpad, *audioconvert_sinkpad, *tmpghost; GstPad *ghost; GstElement *audioconvert, *audioresample; PadInfos *infos = g_slice_new0 (PadInfos); GESSmartAdder *self = GES_SMART_ADDER (element); infos->adder_pad = gst_element_request_pad (self->adder, gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (self->adder), "sink_%u"), NULL, caps); if (infos->adder_pad == NULL) { GST_WARNING_OBJECT (element, "Could not get any pad from GstAdder"); g_slice_free (PadInfos, infos); return NULL; } infos->self = self; infos->bin = gst_bin_new (NULL); audioconvert = gst_element_factory_make ("audioconvert", NULL); audioresample = gst_element_factory_make ("audioresample", NULL); gst_bin_add_many (GST_BIN (infos->bin), audioconvert, audioresample, NULL); gst_element_link_many (audioconvert, audioresample, NULL); audioconvert_sinkpad = gst_element_get_static_pad (audioconvert, "sink"); tmpghost = GST_PAD (gst_ghost_pad_new (NULL, audioconvert_sinkpad)); gst_object_unref (audioconvert_sinkpad); gst_pad_set_active (tmpghost, TRUE); gst_element_add_pad (GST_ELEMENT (infos->bin), tmpghost); gst_bin_add (GST_BIN (self), infos->bin); ghost = gst_ghost_pad_new (NULL, tmpghost); gst_pad_set_active (ghost, TRUE); if (!gst_element_add_pad (GST_ELEMENT (self), ghost)) goto could_not_add; audioresample_srcpad = gst_element_get_static_pad (audioresample, "src"); tmpghost = GST_PAD (gst_ghost_pad_new (NULL, audioresample_srcpad)); gst_object_unref (audioresample_srcpad); gst_pad_set_active (tmpghost, TRUE); gst_element_add_pad (GST_ELEMENT (infos->bin), tmpghost); gst_pad_link (tmpghost, infos->adder_pad); LOCK (self); g_hash_table_insert (self->pads_infos, ghost, infos); UNLOCK (self); GST_DEBUG_OBJECT (self, "Returning new pad %" GST_PTR_FORMAT, ghost); return ghost; could_not_add: { GST_ERROR_OBJECT (self, "could not add pad"); destroy_pad (infos); return NULL; } }
static GstPad * gst_ffmpegmux_request_new_pad (GstElement * element, GstPadTemplate * templ, const gchar * name) { GstFFMpegMux *ffmpegmux = (GstFFMpegMux *) element; GstElementClass *klass = GST_ELEMENT_GET_CLASS (element); GstFFMpegMuxPad *collect_pad; gchar *padname; GstPad *pad; AVStream *st; enum CodecType type; gint bitrate = 0, framesize = 0; g_return_val_if_fail (templ != NULL, NULL); g_return_val_if_fail (templ->direction == GST_PAD_SINK, NULL); g_return_val_if_fail (ffmpegmux->opened == FALSE, NULL); /* figure out a name that *we* like */ if (templ == gst_element_class_get_pad_template (klass, "video_%d")) { padname = g_strdup_printf ("video_%d", ffmpegmux->videopads++); type = CODEC_TYPE_VIDEO; bitrate = 64 * 1024; framesize = 1152; } else if (templ == gst_element_class_get_pad_template (klass, "audio_%d")) { padname = g_strdup_printf ("audio_%d", ffmpegmux->audiopads++); type = CODEC_TYPE_AUDIO; bitrate = 285 * 1024; } else { g_warning ("ffmux: unknown pad template!"); return NULL; } /* create pad */ pad = gst_pad_new_from_template (templ, padname); collect_pad = (GstFFMpegMuxPad *) gst_collect_pads_add_pad (ffmpegmux->collect, pad, sizeof (GstFFMpegMuxPad)); collect_pad->padnum = ffmpegmux->context->nb_streams; /* small hack to put our own event pad function and chain up to collect pad */ ffmpegmux->event_function = GST_PAD_EVENTFUNC (pad); gst_pad_set_event_function (pad, GST_DEBUG_FUNCPTR (gst_ffmpegmux_sink_event)); gst_pad_set_setcaps_function (pad, GST_DEBUG_FUNCPTR (gst_ffmpegmux_setcaps)); gst_element_add_pad (element, pad); /* AVStream needs to be created */ st = av_new_stream (ffmpegmux->context, collect_pad->padnum); st->codec->codec_type = type; st->codec->codec_id = CODEC_ID_NONE; /* this is a check afterwards */ st->stream_copy = 1; /* we're not the actual encoder */ st->codec->bit_rate = bitrate; st->codec->frame_size = framesize; /* we fill in codec during capsnego */ /* we love debug output (c) (tm) (r) */ GST_DEBUG ("Created %s pad for ffmux_%s element", padname, ((GstFFMpegMuxClass *) klass)->in_plugin->name); g_free (padname); return pad; }
static gboolean gst_wavpack_parse_create_src_pad (GstWavpackParse * wvparse, GstBuffer * buf, WavpackHeader * header) { GstWavpackMetadata meta; GstCaps *caps = NULL; guchar *bufptr; g_assert (wvparse->srcpad == NULL); bufptr = GST_BUFFER_DATA (buf) + sizeof (WavpackHeader); while (gst_wavpack_read_metadata (&meta, GST_BUFFER_DATA (buf), &bufptr)) { switch (meta.id) { case ID_WVC_BITSTREAM:{ caps = gst_caps_new_simple ("audio/x-wavpack-correction", "framed", G_TYPE_BOOLEAN, TRUE, NULL); wvparse->srcpad = gst_pad_new_from_template (gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (wvparse), "wvcsrc"), "wvcsrc"); break; } case ID_WV_BITSTREAM: case ID_WVX_BITSTREAM:{ WavpackStreamReader *stream_reader = gst_wavpack_stream_reader_new (); WavpackContext *wpc; gchar error_msg[80]; read_id rid; gint channel_mask; rid.buffer = GST_BUFFER_DATA (buf); rid.length = GST_BUFFER_SIZE (buf); rid.position = 0; wpc = WavpackOpenFileInputEx (stream_reader, &rid, NULL, error_msg, 0, 0); if (!wpc) return FALSE; wvparse->samplerate = WavpackGetSampleRate (wpc); wvparse->channels = WavpackGetNumChannels (wpc); wvparse->total_samples = (header->total_samples == 0xffffffff) ? G_GINT64_CONSTANT (-1) : header->total_samples; caps = gst_caps_new_simple ("audio/x-wavpack", "width", G_TYPE_INT, WavpackGetBitsPerSample (wpc), "channels", G_TYPE_INT, wvparse->channels, "rate", G_TYPE_INT, wvparse->samplerate, "framed", G_TYPE_BOOLEAN, TRUE, NULL); #ifdef WAVPACK_OLD_API channel_mask = wpc->config.channel_mask; #else channel_mask = WavpackGetChannelMask (wpc); #endif if (channel_mask == 0) channel_mask = gst_wavpack_get_default_channel_mask (wvparse->channels); if (channel_mask != 0) { if (!gst_wavpack_set_channel_layout (caps, channel_mask)) { GST_WARNING_OBJECT (wvparse, "Failed to set channel layout"); gst_caps_unref (caps); caps = NULL; WavpackCloseFile (wpc); g_free (stream_reader); break; } } wvparse->srcpad = gst_pad_new_from_template (gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (wvparse), "src"), "src"); WavpackCloseFile (wpc); g_free (stream_reader); break; } default:{ GST_LOG_OBJECT (wvparse, "unhandled ID: 0x%02x", meta.id); break; } } if (caps != NULL) break; } if (caps == NULL || wvparse->srcpad == NULL) return FALSE; GST_DEBUG_OBJECT (wvparse, "Added src pad with caps %" GST_PTR_FORMAT, caps); gst_pad_set_query_function (wvparse->srcpad, GST_DEBUG_FUNCPTR (gst_wavpack_parse_src_query)); gst_pad_set_query_type_function (wvparse->srcpad, GST_DEBUG_FUNCPTR (gst_wavpack_parse_get_src_query_types)); gst_pad_set_event_function (wvparse->srcpad, GST_DEBUG_FUNCPTR (gst_wavpack_parse_src_event)); gst_pad_set_caps (wvparse->srcpad, caps); gst_caps_unref (caps); gst_pad_use_fixed_caps (wvparse->srcpad); gst_object_ref (wvparse->srcpad); gst_pad_set_active (wvparse->srcpad, TRUE); gst_element_add_pad (GST_ELEMENT (wvparse), wvparse->srcpad); gst_element_no_more_pads (GST_ELEMENT (wvparse)); return TRUE; }
static void gst_nle_source_pad_added_cb (GstElement * element, GstPad * pad, GstNleSource * nlesrc) { GstCaps *caps; const GstStructure *s; const gchar *mime; GstElement *appsink = NULL; GstPad *sink_pad; GstAppSinkCallbacks appsink_cbs; GstNleSrcItem *item; item = (GstNleSrcItem *) g_list_nth_data (nlesrc->queue, nlesrc->index); caps = gst_pad_get_caps_reffed (pad); s = gst_caps_get_structure (caps, 0); mime = gst_structure_get_name (s); GST_DEBUG_OBJECT (nlesrc, "Found mime type: %s", mime); if (g_strrstr (mime, "video") && !nlesrc->video_linked) { appsink = gst_element_factory_make ("appsink", NULL); memset (&appsink_cbs, 0, sizeof (appsink_cbs)); appsink_cbs.eos = gst_nle_source_on_video_eos; appsink_cbs.new_preroll = gst_nle_source_on_preroll_buffer; appsink_cbs.new_buffer = gst_nle_source_on_video_buffer; nlesrc->video_linked = TRUE; if (!nlesrc->video_srcpad_added) { gst_pad_set_active (nlesrc->video_srcpad, TRUE); gst_element_add_pad (GST_ELEMENT (nlesrc), gst_object_ref (nlesrc->video_srcpad)); nlesrc->video_srcpad_added = TRUE; } gst_pad_add_event_probe (GST_BASE_SINK_PAD (GST_BASE_SINK (appsink)), (GCallback) gst_nle_source_video_pad_probe_cb, nlesrc); nlesrc->video_eos = FALSE; } else if (g_strrstr (mime, "audio") && nlesrc->with_audio && !nlesrc->audio_linked && (item ? item->rate == 1.0 : TRUE)) { appsink = gst_element_factory_make ("appsink", NULL); memset (&appsink_cbs, 0, sizeof (appsink_cbs)); appsink_cbs.eos = gst_nle_source_on_audio_eos; appsink_cbs.new_preroll = gst_nle_source_on_preroll_buffer; appsink_cbs.new_buffer = gst_nle_source_on_audio_buffer; nlesrc->audio_linked = TRUE; if (!nlesrc->audio_srcpad_added) { gst_pad_set_active (nlesrc->audio_srcpad, TRUE); gst_element_add_pad (GST_ELEMENT (nlesrc), gst_object_ref (nlesrc->audio_srcpad)); nlesrc->audio_srcpad_added = TRUE; } gst_pad_add_event_probe (GST_BASE_SINK_PAD (GST_BASE_SINK (appsink)), (GCallback) gst_nle_source_audio_pad_probe_cb, nlesrc); nlesrc->audio_eos = FALSE; } if (appsink != NULL) { g_object_set (appsink, "sync", FALSE, NULL); gst_app_sink_set_callbacks (GST_APP_SINK (appsink), &appsink_cbs, nlesrc, NULL); gst_bin_add (GST_BIN (nlesrc->decoder), appsink); sink_pad = gst_element_get_static_pad (appsink, "sink"); gst_pad_link (pad, sink_pad); gst_element_sync_state_with_parent (appsink); gst_object_unref (sink_pad); } }
bool GstEnginePipeline::Init() { // Here we create all the parts of the gstreamer pipeline - from the source // to the sink. The parts of the pipeline are split up into bins: // uri decode bin -> audio bin // The uri decode bin is a gstreamer builtin that automatically picks the // right type of source and decoder for the URI. // The audio bin gets created here and contains: // queue ! audioconvert ! <caps32> // ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee // rgvolume and rglimiter are only created when replaygain is enabled. // After the tee the pipeline splits. One split is converted to 16-bit int // samples for the scope, the other is kept as float32 and sent to the // speaker. // tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink // tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale // ! convert ! audiosink // Audio bin audiobin_ = gst_bin_new("audiobin"); gst_bin_add(GST_BIN(pipeline_), audiobin_); // Create the sink if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false; if (GstEngine:: DoesThisSinkSupportChangingTheOutputDeviceToAUserEditableString( sink_) && !device_.isEmpty()) g_object_set(G_OBJECT(audiosink_), "device", device_.toUtf8().constData(), nullptr); // Create all the other elements GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue, *convert; queue_ = engine_->CreateElement("queue2", audiobin_); audioconvert_ = engine_->CreateElement("audioconvert", audiobin_); tee = engine_->CreateElement("tee", audiobin_); probe_queue = engine_->CreateElement("queue", audiobin_); probe_converter = engine_->CreateElement("audioconvert", audiobin_); probe_sink = engine_->CreateElement("fakesink", audiobin_); audio_queue = engine_->CreateElement("queue", audiobin_); equalizer_preamp_ = engine_->CreateElement("volume", audiobin_); equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_); stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_); volume_ = engine_->CreateElement("volume", audiobin_); audioscale_ = engine_->CreateElement("audioresample", audiobin_); convert = engine_->CreateElement("audioconvert", audiobin_); if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter || !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ || !stereo_panorama_ || !volume_ || !audioscale_ || !convert) { return false; } // Create the replaygain elements if it's enabled. event_probe is the // audioconvert element we attach the probe to, which will change depending // on whether replaygain is enabled. convert_sink is the element after the // first audioconvert, which again will change. GstElement* event_probe = audioconvert_; GstElement* convert_sink = tee; if (rg_enabled_) { rgvolume_ = engine_->CreateElement("rgvolume", audiobin_); rglimiter_ = engine_->CreateElement("rglimiter", audiobin_); audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_); event_probe = audioconvert2_; convert_sink = rgvolume_; if (!rgvolume_ || !rglimiter_ || !audioconvert2_) { return false; } // Set replaygain settings g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr); g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr); g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_), nullptr); } // Create a pad on the outside of the audiobin and connect it to the pad of // the first element. GstPad* pad = gst_element_get_static_pad(queue_, "sink"); gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad)); gst_object_unref(pad); // Add a data probe on the src pad of the audioconvert element for our scope. // We do it here because we want pre-equalized and pre-volume samples // so that our visualization are not be affected by them. pad = gst_element_get_static_pad(event_probe, "src"); gst_pad_add_event_probe(pad, G_CALLBACK(EventHandoffCallback), this); gst_object_unref(pad); // Configure the fakesink properly g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr); // Set the equalizer bands g_object_set(G_OBJECT(equalizer_), "num-bands", 10, nullptr); int last_band_frequency = 0; for (int i = 0; i < kEqBandCount; ++i) { GstObject* band = gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), i); const float frequency = kEqBandFrequencies[i]; const float bandwidth = frequency - last_band_frequency; last_band_frequency = frequency; g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth, "gain", 0.0f, nullptr); g_object_unref(G_OBJECT(band)); } // Set the stereo balance. g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_, nullptr); // Set the buffer duration. We set this on this queue instead of the // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin // only affects network sources. // Disable the default buffer and byte limits, so we only buffer based on // time. g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr); g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr); g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_, nullptr); g_object_set(G_OBJECT(queue_), "low-percent", 1, nullptr); if (buffer_duration_nanosec_ > 0) { g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr); } gst_element_link(queue_, audioconvert_); // Create the caps to put in each path in the tee. The scope path gets 16-bit // ints and the audiosink path gets float32. GstCaps* caps16 = gst_caps_new_simple("audio/x-raw-int", "width", G_TYPE_INT, 16, "signed", G_TYPE_BOOLEAN, true, nullptr); GstCaps* caps32 = gst_caps_new_simple("audio/x-raw-float", "width", G_TYPE_INT, 32, nullptr); if (mono_playback_) { gst_caps_set_simple(caps32, "channels", G_TYPE_INT, 1, nullptr); } // Link the elements with special caps gst_element_link_filtered(probe_converter, probe_sink, caps16); gst_element_link_filtered(audioconvert_, convert_sink, caps32); gst_caps_unref(caps16); gst_caps_unref(caps32); // Link the outputs of tee to the queues on each path. gst_pad_link(gst_element_get_request_pad(tee, "src%d"), gst_element_get_static_pad(probe_queue, "sink")); gst_pad_link(gst_element_get_request_pad(tee, "src%d"), gst_element_get_static_pad(audio_queue, "sink")); // Link replaygain elements if enabled. if (rg_enabled_) { gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr); } // Link everything else. gst_element_link(probe_queue, probe_converter); gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_, stereo_panorama_, volume_, audioscale_, convert, audiosink_, nullptr); // Add probes and handlers. gst_pad_add_buffer_probe(gst_element_get_static_pad(probe_converter, "src"), G_CALLBACK(HandoffCallback), this); gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)), BusCallbackSync, this); bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)), BusCallback, this); MaybeLinkDecodeToAudio(); return true; }
bool GstEnginePipeline::Init() { // Here we create all the parts of the gstreamer pipeline - from the source // to the sink. The parts of the pipeline are split up into bins: // uri decode bin -> audio bin // The uri decode bin is a gstreamer builtin that automatically picks the // right type of source and decoder for the URI. // The audio bin gets created here and contains: // queue ! audioconvert ! <caps32> // ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee // rgvolume and rglimiter are only created when replaygain is enabled. // After the tee the pipeline splits. One split is converted to 16-bit int // samples for the scope, the other is kept as float32 and sent to the // speaker. // tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink // tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale // ! convert ! audiosink gst_segment_init(&last_decodebin_segment_, GST_FORMAT_TIME); // Audio bin audiobin_ = gst_bin_new("audiobin"); gst_bin_add(GST_BIN(pipeline_), audiobin_); // Create the sink if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false; if (g_object_class_find_property(G_OBJECT_GET_CLASS(audiosink_), "device") && !device_.toString().isEmpty()) { switch (device_.type()) { case QVariant::Int: g_object_set(G_OBJECT(audiosink_), "device", device_.toInt(), nullptr); break; case QVariant::String: g_object_set(G_OBJECT(audiosink_), "device", device_.toString().toUtf8().constData(), nullptr); break; #ifdef Q_OS_WIN32 case QVariant::ByteArray: { GUID guid = QUuid(device_.toByteArray()); g_object_set(G_OBJECT(audiosink_), "device", &guid, nullptr); break; } #endif // Q_OS_WIN32 default: qLog(Warning) << "Unknown device type" << device_; break; } } // Create all the other elements GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue, *convert; queue_ = engine_->CreateElement("queue2", audiobin_); audioconvert_ = engine_->CreateElement("audioconvert", audiobin_); tee = engine_->CreateElement("tee", audiobin_); probe_queue = engine_->CreateElement("queue", audiobin_); probe_converter = engine_->CreateElement("audioconvert", audiobin_); probe_sink = engine_->CreateElement("fakesink", audiobin_); audio_queue = engine_->CreateElement("queue", audiobin_); equalizer_preamp_ = engine_->CreateElement("volume", audiobin_); equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_); stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_); volume_ = engine_->CreateElement("volume", audiobin_); audioscale_ = engine_->CreateElement("audioresample", audiobin_); convert = engine_->CreateElement("audioconvert", audiobin_); if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter || !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ || !stereo_panorama_ || !volume_ || !audioscale_ || !convert) { return false; } // Create the replaygain elements if it's enabled. event_probe is the // audioconvert element we attach the probe to, which will change depending // on whether replaygain is enabled. convert_sink is the element after the // first audioconvert, which again will change. GstElement* event_probe = audioconvert_; GstElement* convert_sink = tee; if (rg_enabled_) { rgvolume_ = engine_->CreateElement("rgvolume", audiobin_); rglimiter_ = engine_->CreateElement("rglimiter", audiobin_); audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_); event_probe = audioconvert2_; convert_sink = rgvolume_; if (!rgvolume_ || !rglimiter_ || !audioconvert2_) { return false; } // Set replaygain settings g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr); g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr); g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_), nullptr); } // Create a pad on the outside of the audiobin and connect it to the pad of // the first element. GstPad* pad = gst_element_get_static_pad(queue_, "sink"); gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad)); gst_object_unref(pad); // Add a data probe on the src pad of the audioconvert element for our scope. // We do it here because we want pre-equalized and pre-volume samples // so that our visualization are not be affected by them. pad = gst_element_get_static_pad(event_probe, "src"); gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM, &EventHandoffCallback, this, NULL); gst_object_unref(pad); // Configure the fakesink properly g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr); // Setting the equalizer bands: // // GStreamer's GstIirEqualizerNBands sets up shelve filters for the first and // last bands as corner cases. That was causing the "inverted slider" bug. // As a workaround, we create two dummy bands at both ends of the spectrum. // This causes the actual first and last adjustable bands to be // implemented using band-pass filters. g_object_set(G_OBJECT(equalizer_), "num-bands", 10 + 2, nullptr); // Dummy first band (bandwidth 0, cutting below 20Hz): GstObject* first_band = GST_OBJECT( gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), 0)); g_object_set(G_OBJECT(first_band), "freq", 20.0, "bandwidth", 0, "gain", 0.0f, nullptr); g_object_unref(G_OBJECT(first_band)); // Dummy last band (bandwidth 0, cutting over 20KHz): GstObject* last_band = GST_OBJECT(gst_child_proxy_get_child_by_index( GST_CHILD_PROXY(equalizer_), kEqBandCount + 1)); g_object_set(G_OBJECT(last_band), "freq", 20000.0, "bandwidth", 0, "gain", 0.0f, nullptr); g_object_unref(G_OBJECT(last_band)); int last_band_frequency = 0; for (int i = 0; i < kEqBandCount; ++i) { const int index_in_eq = i + 1; GstObject* band = GST_OBJECT(gst_child_proxy_get_child_by_index( GST_CHILD_PROXY(equalizer_), index_in_eq)); const float frequency = kEqBandFrequencies[i]; const float bandwidth = frequency - last_band_frequency; last_band_frequency = frequency; g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth, "gain", 0.0f, nullptr); g_object_unref(G_OBJECT(band)); } // Set the stereo balance. g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_, nullptr); // Set the buffer duration. We set this on this queue instead of the // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin // only affects network sources. // Disable the default buffer and byte limits, so we only buffer based on // time. g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr); g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr); g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_, nullptr); g_object_set(G_OBJECT(queue_), "low-percent", buffer_min_fill_, nullptr); if (buffer_duration_nanosec_ > 0) { g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr); } gst_element_link_many(queue_, audioconvert_, convert_sink, nullptr); // Link the elements with special caps // The scope path through the tee gets 16-bit ints. GstCaps* caps16 = gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING, "S16LE", NULL); gst_element_link_filtered(probe_converter, probe_sink, caps16); gst_caps_unref(caps16); // Link the outputs of tee to the queues on each path. gst_pad_link(gst_element_get_request_pad(tee, "src_%u"), gst_element_get_static_pad(probe_queue, "sink")); gst_pad_link(gst_element_get_request_pad(tee, "src_%u"), gst_element_get_static_pad(audio_queue, "sink")); // Link replaygain elements if enabled. if (rg_enabled_) { gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr); } // Link everything else. gst_element_link(probe_queue, probe_converter); gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_, stereo_panorama_, volume_, audioscale_, convert, nullptr); // add caps for fixed sample rate and mono, but only if requested if (sample_rate_ != GstEngine::kAutoSampleRate && sample_rate_ > 0) { GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, sample_rate_, nullptr); if (mono_playback_) { gst_caps_set_simple(caps, "channels", G_TYPE_INT, 1, nullptr); } gst_element_link_filtered(convert, audiosink_, caps); gst_caps_unref(caps); } else if (mono_playback_) { GstCaps* capsmono = gst_caps_new_simple("audio/x-raw", "channels", G_TYPE_INT, 1, nullptr); gst_element_link_filtered(convert, audiosink_, capsmono); gst_caps_unref(capsmono); } else { gst_element_link(convert, audiosink_); } // Add probes and handlers. gst_pad_add_probe(gst_element_get_static_pad(probe_converter, "src"), GST_PAD_PROBE_TYPE_BUFFER, HandoffCallback, this, nullptr); gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)), BusCallbackSync, this, nullptr); bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)), BusCallback, this); MaybeLinkDecodeToAudio(); return true; }
static void gst_goo_encjpeg_init (GstGooEncJpeg* self, GstGooEncJpegClass* klass) { GST_DEBUG (""); self->factory = goo_ti_component_factory_get_instance (); self->component = goo_component_factory_get_component(self->factory, GOO_TI_JPEG_ENCODER); /* param */ { GOO_TI_JPEGENC_GET_PARAM (self->component)->nQFactor = QUALITY_DEFAULT; } /* inport */ { self->inport = goo_component_get_port (self->component, "input0"); g_assert (self->inport != NULL); OMX_PARAM_PORTDEFINITIONTYPE* param; param = GOO_PORT_GET_DEFINITION (self->inport); param->format.image.nFrameWidth = WIDTH_DEFAULT; param->format.image.nFrameHeight = HEIGHT_DEFAULT; param->format.image.eColorFormat = COLOR_FORMAT_DEFAULT; g_object_set (self->inport, "buffercount", NUM_INPUT_BUFFERS_DEFAULT, NULL); } /* outport */ { self->outport = goo_component_get_port (self->component, "output0"); g_assert (self->outport != NULL); OMX_PARAM_PORTDEFINITIONTYPE* param; param = GOO_PORT_GET_DEFINITION (self->outport); param->format.image.nFrameWidth = WIDTH_DEFAULT; param->format.image.nFrameHeight = HEIGHT_DEFAULT; param->format.image.eColorFormat = COLOR_FORMAT_DEFAULT; g_object_set (self->outport, "buffercount", NUM_INPUT_BUFFERS_DEFAULT, NULL); } GstGooEncJpegPrivate* priv = GST_GOO_ENCJPEG_GET_PRIVATE (self); priv->num_input_buffers = NUM_INPUT_BUFFERS_DEFAULT; priv->num_output_buffers = NUM_OUTPUT_BUFFERS_DEFAULT; priv->incount = 0; priv->outcount = 0; priv->quality = QUALITY_DEFAULT; priv->colorformat = COLOR_FORMAT_DEFAULT; priv->width = WIDTH_DEFAULT; priv->height = HEIGHT_DEFAULT; priv->omxbufsiz = 0; /* GST */ GstPadTemplate* pad_template; pad_template = gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink"); g_return_if_fail (pad_template != NULL); self->sinkpad = gst_pad_new_from_template (pad_template, "sink"); gst_pad_set_chain_function (self->sinkpad, GST_DEBUG_FUNCPTR (gst_goo_encjpeg_chain)); gst_pad_set_setcaps_function (self->sinkpad, GST_DEBUG_FUNCPTR (gst_goo_encjpeg_setcaps)); /* gst_pad_set_bufferalloc_function (self->sinkpad, GST_DEBUG_FUNCPTR (gst_goo_encjpeg_buffer_alloc)); */ gst_element_add_pad (GST_ELEMENT (self), self->sinkpad); pad_template = gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src"); g_return_if_fail (pad_template != NULL); self->srcpad = gst_pad_new_from_template (pad_template, "src"); gst_element_add_pad (GST_ELEMENT (self), self->srcpad); self->adapter = gst_goo_adapter_new (); g_object_set_data (G_OBJECT (self->component), "gst", self); g_object_set_data (G_OBJECT (self), "goo", self->component); return; }
void EntradaFitxer::crea(int k, GstElement *pipeline, QString nom_fitxer) { //Elements de font d'entrada de fitxer QString sbin("bin_font%1"), ssource("source_%1"), sdec("decoder%1"), svolumen_m("volumen_mix%1"), squeue("audio_queue%1"); QString saconv("audio_conv_%1"), sabin("bin_audio_%1"), sconv("video_conv_%1"), ssink("video_sink_%1"); //Creem entrada de fitxer i el decodebin, els afegim al pipeline i els linkem. bin_font = gst_bin_new ((char*)sbin.arg(k).toStdString().c_str()); source = gst_element_factory_make ("filesrc", (char*)ssource.arg(k).toStdString().c_str()); dec = gst_element_factory_make ("decodebin2", (char*)sdec.arg(k).toStdString().c_str()); //Comprovem que s'han pogut crear tots els elements d'entrada if(!bin_font || !source || !dec){ g_printerr ("Un dels elements de l'entrada de fitxer no s'ha pogut crear. Sortint.\n"); } g_signal_connect (dec, "new-decoded-pad", G_CALLBACK (cb_newpad_audio), this); g_signal_connect (dec, "new-decoded-pad", G_CALLBACK (cb_newpad_video), this); gst_bin_add_many (GST_BIN (bin_font), source, dec, NULL); gst_element_link (source, dec); //Creem l'entrada d'àudio a.bin = gst_bin_new ((char*)sabin.arg(k).toStdString().c_str()); conv_audio = gst_element_factory_make("audioconvert", (char*)saconv.arg(k).toStdString().c_str()); audiopad = gst_element_get_static_pad (conv_audio, "sink"); a.queue_mix= gst_element_factory_make("queue2", (char*)squeue.arg(k).toStdString().c_str()); a.volume_mix = gst_element_factory_make("volume", (char*)svolumen_m.arg(k).toStdString().c_str()); //Comprovem que s'han pogut crear tots els elements d'entrada if(!a.bin || !conv_audio || !audiopad || !a.queue_mix || !a.volume_mix){ g_printerr ("Un dels elements de l'entrada de fitxer d'àudio no s'ha pogut crear. Sortint.\n"); } gst_bin_add_many (GST_BIN (a.bin), conv_audio, a.queue_mix, a.volume_mix, NULL); gst_element_link_many (conv_audio, a.queue_mix, a.volume_mix, NULL); gst_element_add_pad (a.bin, gst_ghost_pad_new ("sink", audiopad)); gst_object_unref (audiopad); gst_bin_add (GST_BIN (bin_font), a.bin); //Creem l'entrada de vídeo v.creacomuns(k,"video_fitxer"); v.creatransformadors(k); conv_video = gst_element_factory_make ("ffmpegcolorspace", (char*)sconv.arg(k).toStdString().c_str()); videopad = gst_element_get_static_pad (conv_video, "sink"); v.sink = gst_element_factory_make ("xvimagesink", (char*)ssink.arg(k).toStdString().c_str()); //Comprovem que s'han pogut crear tots els elements d'entrada if( !videopad || !conv_video || !v.sink){ g_printerr ("Un dels elements de l'entrada de fitxer de vídeo no s'ha pogut crear. Sortint.\n"); } gst_bin_add_many (GST_BIN (v.bin), conv_video, v.tee, v.queue, v.scale, v.sink, v.queue_mix, v.color_conv, v.scale_mix, NULL); gst_element_link_many (conv_video, v.tee, v.queue, v.scale, v.sink, NULL); gst_element_add_pad (v.bin, gst_ghost_pad_new ("sink", videopad)); gst_object_unref (videopad); gst_bin_add (GST_BIN (bin_font), v.bin); //Seleccionem el fitxer d'entrada const char *c_nom_fitxer = nom_fitxer.toStdString().c_str(); g_object_set (G_OBJECT (source), "location", c_nom_fitxer, NULL); gst_element_set_state(v.sink, GST_STATE_READY); //Afegim el bin_video_pgm al pipeline gst_bin_add (GST_BIN (pipeline),bin_font); }
static GstMultipartPad * gst_multipart_find_pad_by_mime (GstMultipartDemux * demux, gchar * mime, gboolean * created) { GSList *walk; walk = demux->srcpads; while (walk) { GstMultipartPad *pad = (GstMultipartPad *) walk->data; if (!strcmp (pad->mime, mime)) { if (created) { *created = FALSE; } return pad; } walk = walk->next; } /* pad not found, create it */ { GstPad *pad; GstMultipartPad *mppad; gchar *name; const gchar *capsname; GstCaps *caps; mppad = g_new0 (GstMultipartPad, 1); GST_DEBUG_OBJECT (demux, "creating pad with mime: %s", mime); name = g_strdup_printf ("src_%d", demux->numpads); pad = gst_pad_new_from_static_template (&multipart_demux_src_template_factory, name); g_free (name); /* take the mime type, convert it to the caps name */ capsname = gst_multipart_demux_get_gstname (demux, mime); caps = gst_caps_from_string (capsname); GST_DEBUG_OBJECT (demux, "caps for pad: %s", capsname); gst_pad_use_fixed_caps (pad); gst_pad_set_caps (pad, caps); gst_caps_unref (caps); mppad->pad = pad; mppad->mime = g_strdup (mime); mppad->last_ret = GST_FLOW_OK; demux->srcpads = g_slist_prepend (demux->srcpads, mppad); demux->numpads++; gst_pad_set_active (pad, TRUE); gst_element_add_pad (GST_ELEMENT_CAST (demux), pad); if (created) { *created = TRUE; } return mppad; } }
static void gst_decklink_sink_init (GstDecklinkSink * decklinksink) { GstDecklinkSinkClass *decklinksink_class; decklinksink_class = GST_DECKLINK_SINK_GET_CLASS (decklinksink); decklinksink->videosinkpad = gst_pad_new_from_template (gst_element_class_get_pad_template (GST_ELEMENT_CLASS (decklinksink_class), "videosink"), "videosink"); gst_pad_set_chain_function (decklinksink->videosinkpad, GST_DEBUG_FUNCPTR (gst_decklink_sink_videosink_chain)); gst_pad_set_event_function (decklinksink->videosinkpad, GST_DEBUG_FUNCPTR (gst_decklink_sink_videosink_event)); gst_pad_set_query_function (decklinksink->videosinkpad, GST_DEBUG_FUNCPTR (gst_decklink_sink_videosink_query)); gst_element_add_pad (GST_ELEMENT (decklinksink), decklinksink->videosinkpad); decklinksink->audiosinkpad = gst_pad_new_from_static_template (&gst_decklink_sink_audiosink_template, "audiosink"); gst_pad_set_chain_function (decklinksink->audiosinkpad, GST_DEBUG_FUNCPTR (gst_decklink_sink_audiosink_chain)); gst_pad_set_event_function (decklinksink->audiosinkpad, GST_DEBUG_FUNCPTR (gst_decklink_sink_audiosink_event)); gst_pad_set_query_function (decklinksink->audiosinkpad, GST_DEBUG_FUNCPTR (gst_decklink_sink_audiosink_query)); gst_element_add_pad (GST_ELEMENT (decklinksink), decklinksink->audiosinkpad); GST_OBJECT_FLAG_SET (decklinksink, GST_ELEMENT_FLAG_SINK); g_cond_init (&decklinksink->cond); g_mutex_init (&decklinksink->mutex); g_mutex_init (&decklinksink->audio_mutex); g_cond_init (&decklinksink->audio_cond); decklinksink->mode = GST_DECKLINK_MODE_NTSC; decklinksink->device_number = 0; decklinksink->callback = new Output; decklinksink->callback->decklinksink = decklinksink; #ifdef _MSC_VER g_mutex_init (&decklinksink->com_init_lock); g_mutex_init (&decklinksink->com_deinit_lock); g_cond_init (&decklinksink->com_initialized); g_cond_init (&decklinksink->com_uninitialize); g_cond_init (&decklinksink->com_uninitialized); g_mutex_lock (&decklinksink->com_init_lock); /* create the COM initialization thread */ g_thread_create ((GThreadFunc) gst_decklink_sink_com_thread, decklinksink, FALSE, NULL); /* wait until the COM thread signals that COM has been initialized */ g_cond_wait (&decklinksink->com_initialized, &decklinksink->com_init_lock); g_mutex_unlock (&decklinksink->com_init_lock); #endif /* _MSC_VER */ }