Exemplo n.º 1
2
static void
gst_nle_source_setup (GstNleSource * nlesrc)
{
  GstElement *rotate, *videorate, *videoscale, *colorspace, *vident, *cairooverlay, *colorspace2;
  GstElement *audiorate, *audioconvert, *audioresample, *aident;
  GstElement *a_capsfilter, *v_capsfilter, *last;
  GstPad *v_pad, *a_pad;
  GstCaps *v_caps, *a_caps;

  rotate = gst_element_factory_make ("flurotate", NULL);
  videorate = gst_element_factory_make ("videorate", NULL);
  nlesrc->videocrop = gst_element_factory_make ("videocrop", NULL);
  videoscale = gst_element_factory_make ("videoscale", NULL);
  colorspace = gst_element_factory_make ("ffmpegcolorspace", NULL);
  v_capsfilter = gst_element_factory_make ("capsfilter", "video_capsfilter");
  nlesrc->textoverlay = gst_element_factory_make ("textoverlay", NULL);
  cairooverlay = gst_element_factory_make ("cairooverlay", "overlay");
  colorspace2 = gst_element_factory_make ("ffmpegcolorspace", NULL);

  vident = gst_element_factory_make ("identity", NULL);

  v_caps = gst_caps_new_simple ("video/x-raw-yuv",
      "format", GST_TYPE_FOURCC, GST_STR_FOURCC ("I420"),
      "width", G_TYPE_INT, (gint) nlesrc->width,
      "height", G_TYPE_INT, (gint) nlesrc->height,
      "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1,
      "framerate", GST_TYPE_FRACTION,
      (gint) nlesrc->fps_n, (gint) nlesrc->fps_d, NULL);

  if (rotate) {
    gst_caps_set_simple (v_caps, "rotation", G_TYPE_INT, (gint) 0, NULL);
  } else {
    rotate = gst_element_factory_make ("identity", NULL); 
  }

  gst_pad_set_caps (nlesrc->video_srcpad, v_caps);

  g_object_set (videoscale, "add-borders", TRUE, NULL);
  g_object_set (vident, "single-segment", TRUE, NULL);
  g_object_set (v_capsfilter, "caps", v_caps, NULL);
  g_object_set (nlesrc->textoverlay, "valignment", 2, "halignment", 0,
      "auto-resize", TRUE, "wrap-mode", 0, "silent", !nlesrc->overlay_title,
      NULL);

  g_signal_connect (cairooverlay, "draw",
      G_CALLBACK (gst_nle_source_draw_overlay), nlesrc);

  /* As videorate can duplicate a lot of buffers we want to put it last in this
     transformation bin */
  gst_bin_add_many (GST_BIN (nlesrc), rotate, nlesrc->videocrop,
      videoscale, colorspace, nlesrc->textoverlay, videorate, v_capsfilter,
      vident, NULL);
  /* cairooverlay forces a colorpsace conversion ro RGB that we want to avoid
   * when we are not rendering the watermark */
  if (nlesrc->watermark != NULL) {
    gst_bin_add_many (GST_BIN (nlesrc), cairooverlay, colorspace2, NULL);
  }

  gst_element_link_many (rotate, nlesrc->videocrop, videoscale, colorspace,
      nlesrc->textoverlay, NULL);

  if (nlesrc->watermark != NULL) {
    gst_element_link_many (nlesrc->textoverlay, cairooverlay, colorspace2, NULL);
    last = colorspace2;
  } else {
    last = nlesrc->textoverlay;
  }

  gst_element_link_many (last, videorate, v_capsfilter, vident, NULL);

  /* Ghost source and sink pads */
  v_pad = gst_element_get_pad (vident, "src");
  gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->video_srcpad), v_pad);
  gst_object_unref (v_pad);

  v_pad = gst_element_get_pad (rotate, "sink");
  gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->video_sinkpad), v_pad);
  gst_object_unref (v_pad);

  if (nlesrc->with_audio) {
    audiorate = gst_element_factory_make ("audiorate", NULL);
    audioconvert = gst_element_factory_make ("audioconvert", NULL);
    audioresample = gst_element_factory_make ("audioresample", NULL);
    a_capsfilter = gst_element_factory_make ("capsfilter", NULL);
    aident = gst_element_factory_make ("identity", NULL);

    gst_bin_add_many (GST_BIN (nlesrc), audioresample, audioconvert,
        audiorate, a_capsfilter, aident, NULL);
    gst_element_link_many (audioconvert, audioresample,
        audiorate, a_capsfilter, aident, NULL);

    a_caps = gst_nle_source_get_audio_caps (nlesrc);
    gst_pad_set_caps (nlesrc->audio_srcpad, a_caps);
    g_object_set (a_capsfilter, "caps", a_caps, NULL);

    g_object_set (aident, "single-segment", TRUE, NULL);

    /* Ghost sink and source pads */
    a_pad = gst_element_get_pad (aident, "src");
    gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->audio_srcpad), a_pad);
    gst_object_unref (a_pad);

    a_pad = gst_element_get_pad (audioconvert, "sink");
    gst_ghost_pad_set_target (GST_GHOST_PAD (nlesrc->audio_sinkpad), a_pad);
    gst_object_unref (a_pad);
  }
  nlesrc->index = -1;
  nlesrc->accu_time = 0;
  nlesrc->video_srcpad_added = FALSE;
  nlesrc->audio_srcpad_added = FALSE;
}
Exemplo n.º 2
0
int
main (int argc, char *argv[])
{
  GstElement *bin;
  GstElement *src, *capsfilter, *equalizer, *spectrum, *audioconvert, *sink;
  GstCaps *caps;
  GstBus *bus;
  GtkWidget *appwindow, *vbox, *hbox, *widget;
  int i;

  gst_init (&argc, &argv);
  gtk_init (&argc, &argv);

  bin = gst_pipeline_new ("bin");

  /* White noise */
  src = gst_element_factory_make ("audiotestsrc", "src");
  g_object_set (G_OBJECT (src), "wave", 5, "volume", 0.8, NULL);

  /* Force float32 samples */
  capsfilter = gst_element_factory_make ("capsfilter", "capsfilter");
  caps =
      gst_caps_new_simple ("audio/x-raw-float", "width", G_TYPE_INT, 32, NULL);
  g_object_set (capsfilter, "caps", caps, NULL);

  equalizer = gst_element_factory_make ("equalizer-nbands", "equalizer");
  g_object_set (G_OBJECT (equalizer), "num-bands", NBANDS, NULL);

  spectrum = gst_element_factory_make ("spectrum", "spectrum");
  g_object_set (G_OBJECT (spectrum), "bands", spect_bands, "threshold", -80,
      "message", TRUE, "interval", 500 * GST_MSECOND, NULL);

  audioconvert = gst_element_factory_make ("audioconvert", "audioconvert");

  sink = gst_element_factory_make ("autoaudiosink", "sink");

  gst_bin_add_many (GST_BIN (bin), src, capsfilter, equalizer, spectrum,
      audioconvert, sink, NULL);
  if (!gst_element_link_many (src, capsfilter, equalizer, spectrum,
          audioconvert, sink, NULL)) {
    fprintf (stderr, "can't link elements\n");
    exit (1);
  }

  bus = gst_element_get_bus (bin);
  gst_bus_add_watch (bus, message_handler, NULL);
  gst_object_unref (bus);

  appwindow = gtk_window_new (GTK_WINDOW_TOPLEVEL);
  g_signal_connect (G_OBJECT (appwindow), "destroy",
      G_CALLBACK (on_window_destroy), NULL);
  vbox = gtk_vbox_new (FALSE, 6);

  drawingarea = gtk_drawing_area_new ();
  gtk_widget_set_size_request (drawingarea, spect_bands, spect_height);
  g_signal_connect (G_OBJECT (drawingarea), "configure-event",
      G_CALLBACK (on_configure_event), (gpointer) spectrum);
  gtk_box_pack_start (GTK_BOX (vbox), drawingarea, TRUE, TRUE, 0);

  hbox = gtk_hbox_new (FALSE, 20);

  for (i = 0; i < NBANDS; i++) {
    GObject *band;
    gdouble freq;
    gdouble bw;
    gdouble gain;
    gchar *label;
    GtkWidget *frame, *scales_hbox;

    band = gst_child_proxy_get_child_by_index (GST_CHILD_PROXY (equalizer), i);
    g_assert (band != NULL);
    g_object_get (band, "freq", &freq, NULL);
    g_object_get (band, "bandwidth", &bw, NULL);
    g_object_get (band, "gain", &gain, NULL);

    label = g_strdup_printf ("%d Hz", (int) (freq + 0.5));
    frame = gtk_frame_new (label);
    g_free (label);

    scales_hbox = gtk_hbox_new (FALSE, 6);

    widget = gtk_vscale_new_with_range (-24.0, 12.0, 0.5);
    gtk_scale_set_draw_value (GTK_SCALE (widget), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (widget), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (widget), gain);
    gtk_widget_set_size_request (widget, 25, 150);
    g_signal_connect (G_OBJECT (widget), "value-changed",
        G_CALLBACK (on_gain_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), widget, FALSE, FALSE, 0);

    widget = gtk_vscale_new_with_range (0.0, 20000.0, 5.0);
    gtk_scale_set_draw_value (GTK_SCALE (widget), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (widget), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (widget), bw);
    gtk_widget_set_size_request (widget, 25, 150);
    g_signal_connect (G_OBJECT (widget), "value-changed",
        G_CALLBACK (on_bandwidth_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), widget, TRUE, TRUE, 0);

    widget = gtk_vscale_new_with_range (20.0, 20000.0, 5.0);
    gtk_scale_set_draw_value (GTK_SCALE (widget), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (widget), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (widget), freq);
    gtk_widget_set_size_request (widget, 25, 150);
    g_signal_connect (G_OBJECT (widget), "value-changed",
        G_CALLBACK (on_freq_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), widget, TRUE, TRUE, 0);

    gtk_container_add (GTK_CONTAINER (frame), scales_hbox);

    gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
  }

  gtk_box_pack_start (GTK_BOX (vbox), hbox, TRUE, TRUE, 0);

  gtk_container_add (GTK_CONTAINER (appwindow), vbox);
  gtk_widget_show_all (appwindow);

  gst_element_set_state (bin, GST_STATE_PLAYING);
  gtk_main ();
  gst_element_set_state (bin, GST_STATE_NULL);

  gst_object_unref (bin);

  return 0;
}
Exemplo n.º 3
0
bool MediaImpl::loadMovie(QString filename)
{
  _uri = filename;

  qDebug() << "Opening movie: " << filename << ".";
  this->_frame = NULL;

  // Free previously allocated structures
  unloadMovie();

  //_firstFrameTime=_formatContext->start_time;

  // Initialize GStreamer.
  gst_init (NULL, NULL);
  GstElement *capsFilter = NULL;
  GstElement *videoScale = NULL;

  // Create the elements.
  _source =          gst_element_factory_make ("uridecodebin", "source");

//  _audioQueue =      gst_element_factory_make ("queue", "aqueue");
//  _audioConvert =    gst_element_factory_make ("audioconvert", "aconvert");
//  _audioResample =   gst_element_factory_make ("audioresample", "aresample");
//  _audioSink =       gst_element_factory_make ("appsink", "asink");
//
  _videoQueue =      gst_element_factory_make ("queue", "vqueue");
  _videoColorSpace = gst_element_factory_make ("videoconvert", "vcolorspace");
  videoScale = gst_element_factory_make ("videoscale", "videoscale0");
  capsFilter = gst_element_factory_make ("capsfilter", "capsfilter0");
  _videoSink =       gst_element_factory_make ("appsink", "vsink");

  // Prepare handler data.
//  _padHandlerData.audioToConnect   = _audioQueue;
  _padHandlerData.videoToConnect   = _videoQueue;
  _padHandlerData.videoSink        = _videoSink;
  //_padHandlerData.audioIsConnected = false;
  _padHandlerData.videoIsConnected = false;

//  _newAudioBufferHandlerData.audioSink          = _audioSink;
//  _newAudioBufferHandlerData.audioBufferAdapter = _audioBufferAdapter;

  // Create the empty pipeline.
  _pipeline = gst_pipeline_new ( "video-source-pipeline" );

  if (!_pipeline || !_source ||
//      !_audioQueue || !_audioConvert || !_audioResample || !_audioSink ||
      !_videoQueue || !_videoColorSpace || ! videoScale || ! capsFilter || ! _videoSink)
  {
    g_printerr ("Not all elements could be created.\n");
    unloadMovie();
    return -1;
  }

  // Build the pipeline. Note that we are NOT linking the source at this
  // point. We will do it later.
  gst_bin_add_many (GST_BIN (_pipeline), _source,
//                    _audioQueue, _audioConvert, _audioResample, _audioSink,
                    _videoQueue, _videoColorSpace, videoScale, capsFilter, _videoSink, NULL);

//  if (!gst_element_link_many(_audioQueue, _audioConvert, _audioResample, _audioSink, NULL)) {
//    g_printerr ("Audio elements could not be linked.\n");
//    unloadMovie();
//    return false;
//  }

  if (!gst_element_link_many (_videoQueue, _videoColorSpace, capsFilter, videoScale, _videoSink, NULL)) {
    g_printerr ("Video elements could not be linked.\n");
    unloadMovie();
    return false;
  }

  // Process URI.
  gchar* uri = (gchar*) filename.toUtf8().constData();
  if (!gst_uri_is_valid(uri))
  {
    // Try to convert filename to URI.
    GError* error = NULL;
    uri = gst_filename_to_uri(uri, &error);
    if (error) {
      qDebug() << "Filename to URI error: " << error->message;
      g_error_free(error);
      gst_object_unref (uri);
      freeResources();
      return false;
    }
  }

  // Set URI to be played.
  qDebug() << "URI for uridecodebin: " << uri;
  // FIXME: sometimes it's just the path to the directory that is given, not the file itself.
  g_object_set (_source, "uri", uri, NULL);
  // Connect to the pad-added signal
  g_signal_connect (_source, "pad-added", G_CALLBACK (MediaImpl::gstPadAddedCallback), &_padHandlerData);

  // Configure audio appsink.
  // TODO: change from mono to stereo
//  gchar* audioCapsText = g_strdup_printf ("audio/x-raw-float,channels=1,rate=%d,signed=(boolean)true,width=%d,depth=%d,endianness=BYTE_ORDER",
//                                          Engine::signalInfo().sampleRate(), (int)(sizeof(Signal_T)*8), (int)(sizeof(Signal_T)*8) );
//  GstCaps* audioCaps = gst_caps_from_string (audioCapsText);
//  g_object_set (_audioSink, "emit-signals", TRUE,
//                            "caps", audioCaps,
////                            "max-buffers", 1,     // only one buffer (the last) is maintained in the queue
////                            "drop", TRUE,         // ... other buffers are dropped
//                            NULL);
//  g_signal_connect (_audioSink, "new-buffer", G_CALLBACK (VideoImpl::gstNewAudioBufferCallback), &_newAudioBufferHandlerData);
//  gst_caps_unref (audioCaps);
//  g_free (audioCapsText);

  // Configure video appsink.
//  GstCaps *videoCaps = gst_caps_from_string ("video/x-raw-rgb");
  GstCaps *videoCaps = gst_caps_from_string ("video/x-raw,format=RGBA");
  g_object_set (capsFilter, "caps", videoCaps, NULL);
  g_object_set (_videoSink, "emit-signals", TRUE,
                            "max-buffers", 1,     // only one buffer (the last) is maintained in the queue
                            "drop", TRUE,         // ... other buffers are dropped
                            NULL);
  g_signal_connect (_videoSink, "new-sample", G_CALLBACK (MediaImpl::gstNewSampleCallback), this);
  gst_caps_unref (videoCaps);

  // Listen to the bus.
  _bus = gst_element_get_bus (_pipeline);

  // Start playing.
  if (!setPlayState(true))
    return false;

  qDebug() << "Pipeline started.";

  //_movieReady = true;
  return true;
}
int
main (int argc, char *argv[])
{
  GstPipeline *pipeline;
  GstBus *bus;

  GstElement *srcbin;
  GstElement *tee;
  GstElement *queue[N_ACTORS], *sink[N_ACTORS];
  GstElement *upload[N_ACTORS];
/*
  GstElement *effect[N_ACTORS];
*/
  ClutterActor *stage;
  GstGLClutterActor *actor[N_ACTORS];
  Display *disp;
  Window stage_win;
  const gchar *desc;
  gint i;
  gint ok = FALSE;
  ClutterInitError clutter_err = CLUTTER_INIT_ERROR_UNKNOWN;

  clutter_err = clutter_init (&argc, &argv);
  if (clutter_err != CLUTTER_INIT_SUCCESS)
    g_warning ("Failed to initalize clutter: %d\n", clutter_err);

  gst_init (&argc, &argv);

  disp = clutter_x11_get_default_display ();
  if (!clutter_x11_has_composite_extension ()) {
    g_error ("XComposite extension missing");
  }

  stage = clutter_stage_get_default ();
  clutter_actor_set_size (CLUTTER_ACTOR (stage),
      W * COLS + (COLS - 1), H * ROWS + (ROWS - 1));

  stage_win = clutter_x11_get_stage_window (CLUTTER_STAGE (stage));
  XCompositeRedirectSubwindows (disp, stage_win, CompositeRedirectManual);

  for (i = 0; i < N_ACTORS; i++) {
    actor[i] = g_new0 (GstGLClutterActor, 1);
    actor[i]->stage = stage;
    actor[i]->win = XCreateSimpleWindow (disp, stage_win, 0, 0, W, H, 0, 0, 0);
    XMapRaised (disp, actor[i]->win);
    XSync (disp, FALSE);
  }
/*
  desc = g_strdup_printf ("v4l2src ! "
                          "video/x-raw, width=640, height=480, framerate=30/1 ! "
                          "videoscale !"
                          "video/x-raw, width=%d, height=%d ! "
                          "identity", W, H);
*/
  desc = g_strdup_printf ("videotestsrc ! "
      "video/x-raw, format=RGB, width=%d, height=%d !" "identity", W, H);
  pipeline = GST_PIPELINE (gst_pipeline_new (NULL));

  srcbin = gst_parse_bin_from_description (desc, TRUE, NULL);
  if (!srcbin)
    g_error ("Source bin creation failed");

  tee = gst_element_factory_make ("tee", NULL);

  gst_bin_add_many (GST_BIN (pipeline), srcbin, tee, NULL);

  for (i = 0; i < N_ACTORS; i++) {
    queue[i] = gst_element_factory_make ("queue", NULL);
    upload[i] = gst_element_factory_make ("glupload", NULL);
/*      effect[i] = gst_element_factory_make ("gleffects", NULL); */
    sink[i] = gst_element_factory_make ("glimagesink", NULL);
/*    gst_bin_add_many (GST_BIN (pipeline),
        queue[i], upload[i], effect[i], sink[i], NULL); */
    gst_bin_add_many (GST_BIN (pipeline), queue[i], upload[i], sink[i], NULL);
  }

  gst_element_link_many (srcbin, tee, NULL);

  for (i = 0; i < N_ACTORS; i++) {
    ok |=
//        gst_element_link_many (tee, queue[i], upload[i], effect[i], sink[i],
        gst_element_link_many (tee, queue[i], upload[i], sink[i], NULL);
  }

  if (!ok)
    g_error ("Failed to link one or more elements");

/*
  for (i = 0; i < N_ACTORS; i++) {
    g_message ("setting effect %d on %s", i + 1,
        gst_element_get_name (effect[i]));
    g_object_set (G_OBJECT (effect[i]), "effect", i + 1, NULL);
  }
*/

  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));

  gst_bus_set_sync_handler (bus, (GstBusSyncHandler) create_window, actor,
      NULL);

  gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_PLAYING);

  clutter_actor_show_all (stage);

  clutter_main ();

  gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_NULL);
  gst_object_unref (pipeline);

  return 0;
}
Exemplo n.º 5
0
int
main (int argc, char **argv)
{
  GstElement *source, *filter, *encoder, *conv, *resampler, *sink, *oggmux;
  GstCaps *caps;
  GstBus *bus;
  guint bus_watch_id;
  struct AudioMessage audio_message;
  int abort_send = 0;

  typedef void (*SignalHandlerPointer) (int);

  SignalHandlerPointer inthandler, termhandler;
  inthandler = signal (SIGINT, signalhandler);
  termhandler = signal (SIGTERM, signalhandler);

#ifdef DEBUG_RECORD_PURE_OGG
  dump_pure_ogg = getenv ("GNUNET_RECORD_PURE_OGG") ? 1 : 0;
#endif

#ifdef WINDOWS
  setmode (1, _O_BINARY);
#endif

  /* Initialisation */
  gst_init (&argc, &argv);

  GNUNET_assert (GNUNET_OK ==
		 GNUNET_log_setup ("gnunet-helper-audio-record",
				   "WARNING",
				   NULL));

  GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
	      "Audio source starts\n");

  audio_message.header.type = htons (GNUNET_MESSAGE_TYPE_CONVERSATION_AUDIO);

  /* Create gstreamer elements */
  pipeline = gst_pipeline_new ("audio-recorder");
  source   = gst_element_factory_make ("autoaudiosrc",  "audiosource");
  filter   = gst_element_factory_make ("capsfilter",    "filter");
  conv     = gst_element_factory_make ("audioconvert",  "converter");
  resampler= gst_element_factory_make ("audioresample", "resampler");
  encoder  = gst_element_factory_make ("opusenc",       "opus-encoder");
  oggmux   = gst_element_factory_make ("oggmux",        "ogg-muxer");
  sink     = gst_element_factory_make ("appsink",       "audio-output");

  if (!pipeline || !filter || !source || !conv || !resampler || !encoder || !oggmux || !sink)
  {
    GNUNET_log (GNUNET_ERROR_TYPE_ERROR,
        "One element could not be created. Exiting.\n");
    return -1;
  }

  g_signal_connect (source, "child-added", G_CALLBACK (source_child_added), NULL);

  /* Set up the pipeline */

  caps = gst_caps_new_simple ("audio/x-raw",
    "format", G_TYPE_STRING, "S16LE",
/*    "rate", G_TYPE_INT, SAMPLING_RATE,*/
    "channels", G_TYPE_INT, OPUS_CHANNELS,
/*    "layout", G_TYPE_STRING, "interleaved",*/
     NULL);
  g_object_set (G_OBJECT (filter),
      "caps", caps,
      NULL);
  gst_caps_unref (caps);

  g_object_set (G_OBJECT (encoder),
/*      "bitrate", 64000, */
/*      "bandwidth", OPUS_BANDWIDTH_FULLBAND, */
      "inband-fec", INBAND_FEC_MODE,
      "packet-loss-percentage", PACKET_LOSS_PERCENTAGE,
      "max-payload-size", MAX_PAYLOAD_SIZE,
      "audio", FALSE, /* VoIP, not audio */
      "frame-size", OPUS_FRAME_SIZE,
      NULL);

  g_object_set (G_OBJECT (oggmux),
      "max-delay", OGG_MAX_DELAY,
      "max-page-delay", OGG_MAX_PAGE_DELAY,
      NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, pipeline);
  gst_object_unref (bus);

  /* we add all elements into the pipeline */
  /* audiosource | converter | resampler | opus-encoder | audio-output */
  gst_bin_add_many (GST_BIN (pipeline), source, filter, conv, resampler, encoder,
      oggmux, sink, NULL);

  /* we link the elements together */
  gst_element_link_many (source, filter, conv, resampler, encoder, oggmux, sink, NULL);

  /* Set the pipeline to "playing" state*/
  GNUNET_log (GNUNET_ERROR_TYPE_INFO, "Now playing\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);


  GNUNET_log (GNUNET_ERROR_TYPE_INFO, "Running...\n");
  /* Iterate */
  while (!abort_send)
  {
    GstSample *s;
    GstBuffer *b;
    GstMapInfo m;
    size_t len, msg_size;
    const char *ptr;
    int phase;

    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "pulling...\n");
    s = gst_app_sink_pull_sample (GST_APP_SINK (sink));
    if (NULL == s)
    {
      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "pulled NULL\n");
      break;
    }
    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "...pulled!\n");
    {
      const GstStructure *si;
      char *si_str;
      GstCaps *s_caps;
      char *caps_str;
      si = gst_sample_get_info (s);
      if (si)
      {
        si_str = gst_structure_to_string (si);
        if (si_str)
        {
          GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got sample %s\n", si_str);
          g_free (si_str);
        }
      }
      else
      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got sample with no info\n");
      s_caps = gst_sample_get_caps (s);
      if (s_caps)
      {
        caps_str = gst_caps_to_string (s_caps);
        if (caps_str)
        {
          GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got sample with caps %s\n", caps_str);
          g_free (caps_str);
        }
      }
      else
        GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "Got sample with no caps\n");
    }
    b = gst_sample_get_buffer (s);
    if (NULL == b || !gst_buffer_map (b, &m, GST_MAP_READ))
    {
      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG, "got NULL buffer %p or failed to map the buffer\n", b);
      gst_sample_unref (s);
      continue;
    }

    len = m.size;
    if (len > UINT16_MAX - sizeof (struct AudioMessage))
    {
      GNUNET_break (0);
      len = UINT16_MAX - sizeof (struct AudioMessage);
    }
    msg_size = sizeof (struct AudioMessage) + len;
    audio_message.header.size = htons ((uint16_t) msg_size);

    GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
        "Sending %u bytes of audio data\n", (unsigned int) msg_size);
    for (phase = 0; phase < 2; phase++)
    {
      size_t offset;
      size_t to_send;
      ssize_t ret;
      if (0 == phase)
      {
#ifdef DEBUG_RECORD_PURE_OGG
        if (dump_pure_ogg)
          continue;
#endif
        ptr = (const char *) &audio_message;
        to_send = sizeof (audio_message);
      }
      else
      {
        ptr = (const char *) m.data;
        to_send = len;
      }
      GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
          "Sending %u bytes on phase %d\n", (unsigned int) to_send, phase);
      for (offset = 0; offset < to_send; offset += ret)
      {
        ret = write (1, &ptr[offset], to_send - offset);
        if (0 >= ret)
        {
          if (-1 == ret)
            GNUNET_log (GNUNET_ERROR_TYPE_DEBUG,
                "Failed to write %u bytes at offset %u (total %u) in phase %d: %s\n",
                (unsigned int) to_send - offset, (unsigned int) offset,
                (unsigned int) (to_send + offset), phase, strerror (errno));
          abort_send = 1;
          break;
        }
      }
      if (abort_send)
        break;
    }
    gst_buffer_unmap (b, &m);
    gst_sample_unref (s);
  }

  signal (SIGINT, inthandler);
  signal (SIGINT, termhandler);

  GNUNET_log (GNUNET_ERROR_TYPE_INFO, "Returned, stopping playback\n");
  quit ();

  GNUNET_log (GNUNET_ERROR_TYPE_INFO, "Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  pipeline = NULL;
  g_source_remove (bus_watch_id);

  return 0;
}
Exemplo n.º 6
0
int ogg (int   argc,
         char *argv[])
{

    GstElement *pipeline, *source, *demuxer, *decodera, *decoderv , *conv, *sinka , *sinkv , *queuea, *queuev, *volume, *gamma , *videobalance;
    GstBus *bus;

    /* Initialisation */
    gst_init (&argc, &argv);
    double vol=1;
    double gam=1.0;
    double sat=1;
    double cont=1;
    double br=0;
    bool ascii=false;

    int  i;
//gestion des éléments arg[i] initialisation des options
    for(i=1; i<argc; i++) {
        if(strstr (argv[i], "volume")!=NULL) {
            vol=(double)atof(getExt(argv[i])+1);
        }
        else if(strstr (argv[i], "gamma")!=NULL) {
            gam=(double)atof(getExt(argv[i])+1);
        }
        else if(strstr (argv[i], "saturation")!=NULL) {
            sat=(double)atof(getExt(argv[i])+1);
        }
        else if(strstr (argv[i], "contrast")!=NULL) {
            cont=(double)atof(getExt(argv[i])+1);
        }
        else if(strstr (argv[i], "brightness")!=NULL) {
            br=(double)atof(getExt(argv[i])+1);
        }
        else if(strstr (argv[i], "ascii")!=NULL) {
            ascii=true;
        }
        else if(strstr (argv[i], "start")!=NULL) {
            search=(atoi(getExt(argv[i])+1));
        }
        else if(strstr (argv[i], "info")!=NULL) {
            tag(argc,argv);
        }
        else if(strstr (argv[i], "options")!=NULL) {
            printf("volume=[0.0-10.0]\ngamma=[0.0-10.0]\nsaturation=[0.0-2.0]\ncontrast=[0.0-2.0]\nbrightness=[-1.0-1.0]\n");
        }
    }


    loop = g_main_loop_new (NULL, FALSE);

    /* Verification des arguments d'entree */
    if (argc < 2) {
        g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[3]);
        return -1;
    }

    /* Creation des elements gstreamer */
    pipeline = gst_pipeline_new ("video-player");
    source   = gst_element_factory_make ("filesrc",       "file-source");
    demuxer  = gst_element_factory_make ("oggdemux",      "ogg-demuxer");
    queuea  = gst_element_factory_make ("queue",      "queue-audio");
    queuev  = gst_element_factory_make ("queue",      "queue-video");
    decodera  = gst_element_factory_make ("vorbisdec",     "vorbis-decoder");
    decoderv  = gst_element_factory_make ("theoradec",     "theora-decoder");
    conv     = gst_element_factory_make ("audioconvert",  "converter");
    sinka     = gst_element_factory_make ("autoaudiosink", "audio-output");
    if(!ascii) sinkv     = gst_element_factory_make ("xvimagesink", "video-output");
    else sinkv     = gst_element_factory_make ("aasink", "video-output");
    volume     = gst_element_factory_make ("volume", "option1");
    gamma     = gst_element_factory_make ("gamma", "option2");
    videobalance     = gst_element_factory_make ("videobalance", "option3");



    if (!pipeline || !source || !demuxer || !queuea || !queuev || !videobalance || !gamma || !volume || !decodera || !decoderv || !conv || !sinka || !sinkv) {
        g_printerr ("One element could not be created. Exiting.\n");
        return -1;
    }


    /* Mise en place du pipeline */
    /* on configurer le nom du fichier a l'element source */
//configuration des valeurs des éléments options
    g_object_set (G_OBJECT (source), "location", argv[1], NULL);
    g_object_set(G_OBJECT(volume), "volume",vol, NULL);
    g_object_set(G_OBJECT(gamma), "gamma",gam, NULL);
    g_object_set(G_OBJECT(videobalance), "saturation",sat, NULL);
    g_object_set(G_OBJECT(videobalance), "contrast",cont, NULL);
    g_object_set(G_OBJECT(videobalance), "brightness",br, NULL);

//printf("source: %s",argv[1]);

    /* on rajoute une gestion de messages */
    bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
    gst_bus_add_watch (bus, bus_call, pipeline);//on passe ici en paramètre le pipeline et non loop car on veut pouvoir interagir avec dans bus_call
    gst_object_unref (bus);




    /* on rajoute tous les elements dans le pipeline */
    gst_bin_add_many (GST_BIN (pipeline),
                      source, demuxer , queuea , decodera, conv, volume, sinka, queuev , decoderv, gamma, videobalance, sinkv , NULL);

    /* On relie les elements entre eux */
    gst_element_link (source, demuxer);
    gst_element_link_many (queuev, decoderv, gamma , videobalance , /*conv,*/ sinkv, NULL);
    gst_element_link_many (queuea, decodera , conv, volume, sinka, NULL);
    g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), queuea);
    g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), queuev);

    /* passage a l'etat "playing" du pipeline */
    g_print ("Lecture de : %s\n", argv[1]);

    gst_element_set_state (pipeline, GST_STATE_PLAYING);

    /* Iteration */
    g_print ("En cours...\n");
    g_main_loop_run (loop);



    /* En dehors de la boucle principale, on nettoie proprement */
    g_print ("Arret de la lecture\n");
    gst_element_set_state (pipeline, GST_STATE_NULL);
    g_print ("Suppression du pipeline\n");
    gst_object_unref (GST_OBJECT (pipeline));
    return 0;
}
Exemplo n.º 7
0
static void
do_perfect_stream_test (guint rate, guint width, gdouble drop_probability,
    gdouble inject_probability)
{
  GstElement *pipe, *src, *conv, *filter, *injector, *audiorate, *sink;
  GstMessage *msg;
  GstCaps *caps;
  GstPad *srcpad;
  GList *l, *bufs = NULL;
  GstClockTime next_time = GST_CLOCK_TIME_NONE;
  guint64 next_offset = GST_BUFFER_OFFSET_NONE;

  caps = gst_caps_new_simple ("audio/x-raw-int", "rate", G_TYPE_INT,
      rate, "width", G_TYPE_INT, width, NULL);

  GST_INFO ("-------- drop=%.0f%% caps = %" GST_PTR_FORMAT " ---------- ",
      drop_probability * 100.0, caps);

  g_assert (drop_probability >= 0.0 && drop_probability <= 1.0);
  g_assert (inject_probability >= 0.0 && inject_probability <= 1.0);
  g_assert (width > 0 && (width % 8) == 0);

  pipe = gst_pipeline_new ("pipeline");
  fail_unless (pipe != NULL);

  src = gst_element_factory_make ("audiotestsrc", "audiotestsrc");
  fail_unless (src != NULL);

  g_object_set (src, "num-buffers", 100, NULL);

  conv = gst_element_factory_make ("audioconvert", "audioconvert");
  fail_unless (conv != NULL);

  filter = gst_element_factory_make ("capsfilter", "capsfilter");
  fail_unless (filter != NULL);

  g_object_set (filter, "caps", caps, NULL);

  injector_inject_probability = inject_probability;

  injector = GST_ELEMENT (g_object_new (test_injector_get_type (), NULL));

  srcpad = gst_element_get_pad (injector, "src");
  fail_unless (srcpad != NULL);
  gst_pad_add_buffer_probe (srcpad, G_CALLBACK (probe_cb), &drop_probability);
  gst_object_unref (srcpad);

  audiorate = gst_element_factory_make ("audiorate", "audiorate");
  fail_unless (audiorate != NULL);

  sink = gst_element_factory_make ("fakesink", "fakesink");
  fail_unless (sink != NULL);

  g_object_set (sink, "signal-handoffs", TRUE, NULL);

  g_signal_connect (sink, "handoff", G_CALLBACK (got_buf), &bufs);

  gst_bin_add_many (GST_BIN (pipe), src, conv, filter, injector, audiorate,
      sink, NULL);
  gst_element_link_many (src, conv, filter, injector, audiorate, sink, NULL);

  fail_unless_equals_int (gst_element_set_state (pipe, GST_STATE_PLAYING),
      GST_STATE_CHANGE_ASYNC);

  fail_unless_equals_int (gst_element_get_state (pipe, NULL, NULL, -1),
      GST_STATE_CHANGE_SUCCESS);

  msg = gst_bus_poll (GST_ELEMENT_BUS (pipe),
      GST_MESSAGE_EOS | GST_MESSAGE_ERROR, -1);
  fail_unless_equals_string (GST_MESSAGE_TYPE_NAME (msg), "eos");

  for (l = bufs; l != NULL; l = l->next) {
    GstBuffer *buf = GST_BUFFER (l->data);
    guint num_samples;

    fail_unless (GST_BUFFER_TIMESTAMP_IS_VALID (buf));
    fail_unless (GST_BUFFER_DURATION_IS_VALID (buf));
    fail_unless (GST_BUFFER_OFFSET_IS_VALID (buf));
    fail_unless (GST_BUFFER_OFFSET_END_IS_VALID (buf));

    GST_LOG ("buffer: ts=%" GST_TIME_FORMAT ", end_ts=%" GST_TIME_FORMAT
        " off=%" G_GINT64_FORMAT ", end_off=%" G_GINT64_FORMAT,
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf)),
        GST_BUFFER_OFFSET (buf), GST_BUFFER_OFFSET_END (buf));

    if (GST_CLOCK_TIME_IS_VALID (next_time)) {
      fail_unless_equals_uint64 (next_time, GST_BUFFER_TIMESTAMP (buf));
    }
    if (next_offset != GST_BUFFER_OFFSET_NONE) {
      fail_unless_equals_uint64 (next_offset, GST_BUFFER_OFFSET (buf));
    }

    /* check buffer size for sanity */
    fail_unless_equals_int (GST_BUFFER_SIZE (buf) % (width / 8), 0);

    /* check there is actually as much data as there should be */
    num_samples = GST_BUFFER_OFFSET_END (buf) - GST_BUFFER_OFFSET (buf);
    fail_unless_equals_int (GST_BUFFER_SIZE (buf), num_samples * (width / 8));

    next_time = GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
    next_offset = GST_BUFFER_OFFSET_END (buf);
  }

  gst_message_unref (msg);
  gst_element_set_state (pipe, GST_STATE_NULL);
  gst_object_unref (pipe);

  g_list_foreach (bufs, (GFunc) gst_mini_object_unref, NULL);
  g_list_free (bufs);

  gst_caps_unref (caps);
}
Exemplo n.º 8
0
void  _receive_video_init_gstreamer(NiceAgent *magent, guint stream_id, CustomData *data)
{
  GstElement *pipeline, *source, *capsfilter, *videoconvert, *h263p, *rtph263pdepay, *sink;
  GstBus *bus;
  GstMessage *msg;
  GstStateChangeReturn ret;
  GSource *bus_source;

  GST_INFO ("Pipeline initialization");
  // TODO: figure out showing video

  source = gst_element_factory_make ("udpsrc", "source");
  //videoconvert = gst_element_factory_make ("videoconvert", "convert");
  //capsfilter = gst_element_factory_make ("capsfilter", "caps");
  rtph263pdepay = gst_element_factory_make ("rtph263pdepay", "rtph263pdepay");
  h263p = gst_element_factory_make ("avdec_h263p", "h263p");
  sink = gst_element_factory_make ("autovideosink", "sink");

  /*
  g_object_set (source, "agent", magent, NULL);
  g_object_set (source, "stream", stream_id, NULL);
  g_object_set (source, "component", 1, NULL);
  */

  g_object_set (source, "address", "127.0.0.1", NULL);
  g_object_set (source, "port", 1234, NULL);

  g_object_set (source, "caps",
    gst_caps_from_string("application/x-rtp"), NULL);
  /*
  g_object_set (source, "caps", gst_caps_from_string(
    "application/x-rtp\,\ media\=\(string\)video\,\ "
    "clock-rate\=\(int\)90000\,\ "
    "encoding-name\=\(string\)H263-1998\,\ "
    "payload\=\(int\)96"),
    NULL);
  */

  //g_object_set (sink, "sync", FALSE, NULL);

  pipeline = gst_pipeline_new ("Video receive pipeline");

  if (!pipeline || !source || //!capsfilter ||
      !h263p || !rtph263pdepay || !sink)
  {
    g_printerr ("Not all elements could be created.\n");
    return;
  }

  // Build the pipeline
  gst_bin_add_many (GST_BIN (pipeline), source, //capsfilter,
            rtph263pdepay, h263p, sink, NULL);

  if (gst_element_link_many (source, //capsfilter,
                rtph263pdepay, h263p, sink, NULL) != TRUE) {
    g_printerr ("Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return;
  }

  // TODO: this is just output dump pipeline
  /*
  source = gst_element_factory_make ("nicesrc", "source");
  sink = gst_element_factory_make ("fakesink", "sink");

  g_object_set (source, "agent", magent, NULL);
  g_object_set (source, "stream", stream_id, NULL);
  g_object_set (source, "component", 1, NULL);
  g_object_set (sink, "dump", 1, NULL);


  pipeline = gst_pipeline_new ("Video send pipeline");
  if (!pipeline || !source || !sink) {
    g_printerr ("Not all elements could be created.\n");
    return;
  }

  // Build the pipeline
  gst_bin_add_many (GST_BIN (pipeline), source, sink, NULL);
  if (gst_element_link (source, sink) != TRUE) {
    g_printerr ("Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return;
  }
  */

  GST_INFO ("Pipeline created, registing on bus");

  bus = gst_element_get_bus (pipeline);
  gst_bus_enable_sync_message_emission (bus);
  gst_bus_add_signal_watch (bus);

  g_signal_connect (bus, "message::error",
      (GCallback) on_error, NULL);

  GST_INFO ("Registing pipeline on bus");

  data->pipeline = pipeline;
  ret = gst_element_set_state(data->pipeline, GST_STATE_PLAYING);

  if (ret == GST_STATE_CHANGE_FAILURE) {
    g_printerr ("Unable to set the pipeline to the playing state.\n");
    gst_object_unref (pipeline);
    return;
  }
}
Exemplo n.º 9
0
int
main (int argc, char *argv[])
{
  GstElement *bin;
  GstElement *decodebin, *decconvert;
  GstElement *capsfilter, *equalizer, *spectrum, *sinkconvert, *sink;
  GstCaps *caps;
  GstBus *bus;
  GtkWidget *appwindow, *vbox, *hbox, *scale;
  int i, num_bands = NBANDS;

  GOptionEntry options[] = {
    {"bands", 'b', 0, G_OPTION_ARG_INT, &num_bands,
        "Number of bands", NULL},
    {NULL}
  };
  GOptionContext *ctx;
  GError *err = NULL;

  ctx = g_option_context_new ("- demo of audio equalizer");
  g_option_context_add_main_entries (ctx, options, NULL);
  g_option_context_add_group (ctx, gst_init_get_option_group ());
  g_option_context_add_group (ctx, gtk_get_option_group (TRUE));

  if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
    g_print ("Error initializing: %s\n", err->message);
    exit (1);
  }

  if (argc < 2) {
    g_print ("Usage: %s <uri to play>\n", argv[0]);
    g_print ("    For optional arguments: --help\n");
    exit (-1);
  }

  gst_init (&argc, &argv);
  gtk_init (&argc, &argv);

  bin = gst_pipeline_new ("bin");

  /* Uri decoding */
  decodebin = gst_element_factory_make ("uridecodebin", "decoder");
  g_object_set (G_OBJECT (decodebin), "uri", argv[1], NULL);

  /* Force float32 samples */
  decconvert = gst_element_factory_make ("audioconvert", "decconvert");
  capsfilter = gst_element_factory_make ("capsfilter", "capsfilter");
  caps =
      gst_caps_new_simple ("audio/x-raw", "format", G_TYPE_STRING, "F32LE",
      NULL);
  g_object_set (capsfilter, "caps", caps, NULL);

  equalizer = gst_element_factory_make ("equalizer-nbands", "equalizer");
  g_object_set (G_OBJECT (equalizer), "num-bands", num_bands, NULL);

  spectrum = gst_element_factory_make ("spectrum", "spectrum");
  g_object_set (G_OBJECT (spectrum), "bands", spect_bands, "threshold", -80,
      "post-messages", TRUE, "interval", 500 * GST_MSECOND, NULL);

  sinkconvert = gst_element_factory_make ("audioconvert", "sinkconvert");

  sink = gst_element_factory_make ("autoaudiosink", "sink");

  gst_bin_add_many (GST_BIN (bin), decodebin, decconvert, capsfilter, equalizer,
      spectrum, sinkconvert, sink, NULL);
  if (!gst_element_link_many (decconvert, capsfilter, equalizer, spectrum,
          sinkconvert, sink, NULL)) {
    fprintf (stderr, "can't link elements\n");
    exit (1);
  }

  /* Handle dynamic pads */
  g_signal_connect (G_OBJECT (decodebin), "pad-added",
      G_CALLBACK (dynamic_link), gst_element_get_static_pad (decconvert,
          "sink"));

  bus = gst_element_get_bus (bin);
  gst_bus_add_watch (bus, message_handler, NULL);
  gst_object_unref (bus);

  appwindow = gtk_window_new (GTK_WINDOW_TOPLEVEL);
  gtk_window_set_title (GTK_WINDOW (appwindow), "Equalizer Demo");
  g_signal_connect (G_OBJECT (appwindow), "destroy",
      G_CALLBACK (on_window_destroy), NULL);
  vbox = gtk_box_new (GTK_ORIENTATION_VERTICAL, 6);

  drawingarea = gtk_drawing_area_new ();
  gtk_widget_set_size_request (drawingarea, spect_bands, spect_height);
  g_signal_connect (G_OBJECT (drawingarea), "configure-event",
      G_CALLBACK (on_configure_event), (gpointer) spectrum);
  gtk_box_pack_start (GTK_BOX (vbox), drawingarea, TRUE, TRUE, 0);

  hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 20);

  for (i = 0; i < num_bands; i++) {
    GObject *band;
    gdouble freq;
    gdouble bw;
    gdouble gain;
    gchar *label;
    GtkWidget *frame, *scales_hbox;

    band = gst_child_proxy_get_child_by_index (GST_CHILD_PROXY (equalizer), i);
    g_assert (band != NULL);
    g_object_get (band, "freq", &freq, NULL);
    g_object_get (band, "bandwidth", &bw, NULL);
    g_object_get (band, "gain", &gain, NULL);

    label = g_strdup_printf ("%d Hz", (int) (freq + 0.5));
    frame = gtk_frame_new (label);
    g_free (label);

    scales_hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 6);

    /* Create gain scale */
    scale = gtk_scale_new_with_range (GTK_ORIENTATION_VERTICAL,
        -24.0, 12.0, 0.5);
    gtk_scale_set_draw_value (GTK_SCALE (scale), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (scale), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (scale), gain);
    gtk_widget_set_size_request (scale, 35, 150);
    g_signal_connect (G_OBJECT (scale), "value-changed",
        G_CALLBACK (on_gain_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), scale, FALSE, FALSE, 0);

    /* Create bandwidth scale */
    scale = gtk_scale_new_with_range (GTK_ORIENTATION_VERTICAL,
        0.0, 20000.0, 5.0);
    gtk_scale_set_draw_value (GTK_SCALE (scale), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (scale), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (scale), bw);
    gtk_widget_set_size_request (scale, 45, 150);
    g_signal_connect (G_OBJECT (scale), "value-changed",
        G_CALLBACK (on_bandwidth_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), scale, TRUE, TRUE, 0);

    /* Create frequency scale */
    scale = gtk_scale_new_with_range (GTK_ORIENTATION_VERTICAL,
        20.0, 20000.0, 5.0);
    gtk_scale_set_draw_value (GTK_SCALE (scale), TRUE);
    gtk_scale_set_value_pos (GTK_SCALE (scale), GTK_POS_TOP);
    gtk_range_set_value (GTK_RANGE (scale), freq);
    gtk_widget_set_size_request (scale, 45, 150);
    g_signal_connect (G_OBJECT (scale), "value-changed",
        G_CALLBACK (on_freq_changed), (gpointer) band);
    gtk_box_pack_start (GTK_BOX (scales_hbox), scale, TRUE, TRUE, 0);

    gtk_container_add (GTK_CONTAINER (frame), scales_hbox);

    gtk_box_pack_start (GTK_BOX (hbox), frame, TRUE, TRUE, 0);
  }

  gtk_box_pack_start (GTK_BOX (vbox), hbox, TRUE, TRUE, 0);

  gtk_container_add (GTK_CONTAINER (appwindow), vbox);
  gtk_widget_show_all (appwindow);

  gst_element_set_state (bin, GST_STATE_PLAYING);
  gtk_main ();
  gst_element_set_state (bin, GST_STATE_NULL);

  gst_object_unref (bin);

  return 0;
}
bool GstEnginePipeline::Init() {
  // Here we create all the parts of the gstreamer pipeline - from the source
  // to the sink.  The parts of the pipeline are split up into bins:
  //   uri decode bin -> audio bin
  // The uri decode bin is a gstreamer builtin that automatically picks the
  // right type of source and decoder for the URI.

  // The audio bin gets created here and contains:
  //   queue ! audioconvert ! <caps32>
  //         ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee
  // rgvolume and rglimiter are only created when replaygain is enabled.

  // After the tee the pipeline splits.  One split is converted to 16-bit int
  // samples for the scope, the other is kept as float32 and sent to the
  // speaker.
  //   tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink
  //   tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale
  //        ! convert ! audiosink

  gst_segment_init(&last_decodebin_segment_, GST_FORMAT_TIME);

  // Audio bin
  audiobin_ = gst_bin_new("audiobin");
  gst_bin_add(GST_BIN(pipeline_), audiobin_);

  // Create the sink
  if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false;

  if (g_object_class_find_property(G_OBJECT_GET_CLASS(audiosink_), "device") &&
      !device_.toString().isEmpty()) {
    switch (device_.type()) {
      case QVariant::Int:
        g_object_set(G_OBJECT(audiosink_), "device", device_.toInt(), nullptr);
        break;
      case QVariant::String:
        g_object_set(G_OBJECT(audiosink_), "device",
                     device_.toString().toUtf8().constData(), nullptr);
        break;

#ifdef Q_OS_WIN32
      case QVariant::ByteArray: {
        GUID guid = QUuid(device_.toByteArray());
        g_object_set(G_OBJECT(audiosink_), "device", &guid, nullptr);
        break;
      }
#endif  // Q_OS_WIN32

      default:
        qLog(Warning) << "Unknown device type" << device_;
        break;
    }
  }

  // Create all the other elements
  GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue,
      *convert;

  queue_ = engine_->CreateElement("queue2", audiobin_);
  audioconvert_ = engine_->CreateElement("audioconvert", audiobin_);
  tee = engine_->CreateElement("tee", audiobin_);

  probe_queue = engine_->CreateElement("queue", audiobin_);
  probe_converter = engine_->CreateElement("audioconvert", audiobin_);
  probe_sink = engine_->CreateElement("fakesink", audiobin_);

  audio_queue = engine_->CreateElement("queue", audiobin_);
  equalizer_preamp_ = engine_->CreateElement("volume", audiobin_);
  equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_);
  stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_);
  volume_ = engine_->CreateElement("volume", audiobin_);
  audioscale_ = engine_->CreateElement("audioresample", audiobin_);
  convert = engine_->CreateElement("audioconvert", audiobin_);

  if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter ||
      !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ ||
      !stereo_panorama_ || !volume_ || !audioscale_ || !convert) {
    return false;
  }

  // Create the replaygain elements if it's enabled.  event_probe is the
  // audioconvert element we attach the probe to, which will change depending
  // on whether replaygain is enabled.  convert_sink is the element after the
  // first audioconvert, which again will change.
  GstElement* event_probe = audioconvert_;
  GstElement* convert_sink = tee;

  if (rg_enabled_) {
    rgvolume_ = engine_->CreateElement("rgvolume", audiobin_);
    rglimiter_ = engine_->CreateElement("rglimiter", audiobin_);
    audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_);
    event_probe = audioconvert2_;
    convert_sink = rgvolume_;

    if (!rgvolume_ || !rglimiter_ || !audioconvert2_) {
      return false;
    }

    // Set replaygain settings
    g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr);
    g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr);
    g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_),
                 nullptr);
  }

  // Create a pad on the outside of the audiobin and connect it to the pad of
  // the first element.
  GstPad* pad = gst_element_get_static_pad(queue_, "sink");
  gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
  gst_object_unref(pad);

  // Add a data probe on the src pad of the audioconvert element for our scope.
  // We do it here because we want pre-equalized and pre-volume samples
  // so that our visualization are not be affected by them.
  pad = gst_element_get_static_pad(event_probe, "src");
  gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM,
                    &EventHandoffCallback, this, NULL);
  gst_object_unref(pad);

  // Configure the fakesink properly
  g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr);

  // Setting the equalizer bands:
  //
  // GStreamer's GstIirEqualizerNBands sets up shelve filters for the first and
  // last bands as corner cases. That was causing the "inverted slider" bug.
  // As a workaround, we create two dummy bands at both ends of the spectrum.
  // This causes the actual first and last adjustable bands to be
  // implemented using band-pass filters.

  g_object_set(G_OBJECT(equalizer_), "num-bands", 10 + 2, nullptr);

  // Dummy first band (bandwidth 0, cutting below 20Hz):
  GstObject* first_band = GST_OBJECT(
      gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), 0));
  g_object_set(G_OBJECT(first_band), "freq", 20.0, "bandwidth", 0, "gain", 0.0f,
               nullptr);
  g_object_unref(G_OBJECT(first_band));

  // Dummy last band (bandwidth 0, cutting over 20KHz):
  GstObject* last_band = GST_OBJECT(gst_child_proxy_get_child_by_index(
      GST_CHILD_PROXY(equalizer_), kEqBandCount + 1));
  g_object_set(G_OBJECT(last_band), "freq", 20000.0, "bandwidth", 0, "gain",
               0.0f, nullptr);
  g_object_unref(G_OBJECT(last_band));

  int last_band_frequency = 0;
  for (int i = 0; i < kEqBandCount; ++i) {
    const int index_in_eq = i + 1;
    GstObject* band = GST_OBJECT(gst_child_proxy_get_child_by_index(
        GST_CHILD_PROXY(equalizer_), index_in_eq));

    const float frequency = kEqBandFrequencies[i];
    const float bandwidth = frequency - last_band_frequency;
    last_band_frequency = frequency;

    g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth,
                 "gain", 0.0f, nullptr);
    g_object_unref(G_OBJECT(band));
  }

  // Set the stereo balance.
  g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_,
               nullptr);

  // Set the buffer duration.  We set this on this queue instead of the
  // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin
  // only affects network sources.
  // Disable the default buffer and byte limits, so we only buffer based on
  // time.
  g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_,
               nullptr);
  g_object_set(G_OBJECT(queue_), "low-percent", buffer_min_fill_, nullptr);

  if (buffer_duration_nanosec_ > 0) {
    g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr);
  }

  gst_element_link_many(queue_, audioconvert_, convert_sink, nullptr);

  // Link the elements with special caps
  // The scope path through the tee gets 16-bit ints.
  GstCaps* caps16 = gst_caps_new_simple("audio/x-raw", "format", G_TYPE_STRING,
                                        "S16LE", NULL);
  gst_element_link_filtered(probe_converter, probe_sink, caps16);
  gst_caps_unref(caps16);

  // Link the outputs of tee to the queues on each path.
  gst_pad_link(gst_element_get_request_pad(tee, "src_%u"),
               gst_element_get_static_pad(probe_queue, "sink"));
  gst_pad_link(gst_element_get_request_pad(tee, "src_%u"),
               gst_element_get_static_pad(audio_queue, "sink"));

  // Link replaygain elements if enabled.
  if (rg_enabled_) {
    gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr);
  }

  // Link everything else.
  gst_element_link(probe_queue, probe_converter);
  gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_,
                        stereo_panorama_, volume_, audioscale_, convert,
                        nullptr);

  // add caps for fixed sample rate and mono, but only if requested
  if (sample_rate_ != GstEngine::kAutoSampleRate && sample_rate_ > 0) {
    GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT,
                                        sample_rate_, nullptr);
    if (mono_playback_) {
      gst_caps_set_simple(caps, "channels", G_TYPE_INT, 1, nullptr);
    }

    gst_element_link_filtered(convert, audiosink_, caps);
    gst_caps_unref(caps);
  } else if (mono_playback_) {
    GstCaps* capsmono =
        gst_caps_new_simple("audio/x-raw", "channels", G_TYPE_INT, 1, nullptr);
    gst_element_link_filtered(convert, audiosink_, capsmono);
    gst_caps_unref(capsmono);
  } else {
    gst_element_link(convert, audiosink_);
  }

  // Add probes and handlers.
  gst_pad_add_probe(gst_element_get_static_pad(probe_converter, "src"),
                    GST_PAD_PROBE_TYPE_BUFFER, HandoffCallback, this, nullptr);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           BusCallbackSync, this, nullptr);
  bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                                 BusCallback, this);

  MaybeLinkDecodeToAudio();

  return true;
}
Exemplo n.º 11
0
/* build a pipeline equivalent to:
 *
 * gst-launch -v rtpbin name=rtpbin                                                \
 *      udpsrc caps=$AUDIO_CAPS port=5002 ! rtpbin.recv_rtp_sink_0              \
 *        rtpbin. ! rtppcmadepay ! alawdec ! audioconvert ! audioresample ! alsasink \
 *      udpsrc port=5003 ! rtpbin.recv_rtcp_sink_0                              \
 *        rtpbin.send_rtcp_src_0 ! udpsink port=5007 host=$DEST sync=false async=false
 */
int
main (int argc, char *argv[])
{
  GstElement *rtpbin, *rtpsrc, *rtcpsrc, *rtcpsink;
  GstElement *audiodepay, *audiodec, *audiores, *audioconv, *audiosink;
  GstElement *pipeline;
  GMainLoop *loop;
  GstCaps *caps;
  gboolean res;
  GstPadLinkReturn lres;
  GstPad *srcpad, *sinkpad;

  /* always init first */
  gst_init (&argc, &argv);

  /* the pipeline to hold everything */
  pipeline = gst_pipeline_new (NULL);
  g_assert (pipeline);

  /* the udp src and source we will use for RTP and RTCP */
  rtpsrc = gst_element_factory_make ("udpsrc", "rtpsrc");
  g_assert (rtpsrc);
  g_object_set (rtpsrc, "port", 5002, NULL);
  /* we need to set caps on the udpsrc for the RTP data */
  caps = gst_caps_from_string (AUDIO_CAPS);
  g_object_set (rtpsrc, "caps", caps, NULL);
  gst_caps_unref (caps);

  rtcpsrc = gst_element_factory_make ("udpsrc", "rtcpsrc");
  g_assert (rtcpsrc);
  g_object_set (rtcpsrc, "port", 5003, NULL);

  rtcpsink = gst_element_factory_make ("udpsink", "rtcpsink");
  g_assert (rtcpsink);
  g_object_set (rtcpsink, "port", 5007, "host", DEST_HOST, NULL);
  /* no need for synchronisation or preroll on the RTCP sink */
  g_object_set (rtcpsink, "async", FALSE, "sync", FALSE, NULL);

  gst_bin_add_many (GST_BIN (pipeline), rtpsrc, rtcpsrc, rtcpsink, NULL);

  /* the depayloading and decoding */
  audiodepay = gst_element_factory_make (AUDIO_DEPAY, "audiodepay");
  g_assert (audiodepay);
  audiodec = gst_element_factory_make (AUDIO_DEC, "audiodec");
  g_assert (audiodec);
  /* the audio playback and format conversion */
  audioconv = gst_element_factory_make ("audioconvert", "audioconv");
  g_assert (audioconv);
  audiores = gst_element_factory_make ("audioresample", "audiores");
  g_assert (audiores);
  audiosink = gst_element_factory_make (AUDIO_SINK, "audiosink");
  g_assert (audiosink);

  /* add depayloading and playback to the pipeline and link */
  gst_bin_add_many (GST_BIN (pipeline), audiodepay, audiodec, audioconv,
      audiores, audiosink, NULL);

  res = gst_element_link_many (audiodepay, audiodec, audioconv, audiores,
      audiosink, NULL);
  g_assert (res == TRUE);

  /* the rtpbin element */
  rtpbin = gst_element_factory_make ("rtpbin", "rtpbin");
  g_assert (rtpbin);

  gst_bin_add (GST_BIN (pipeline), rtpbin);

  /* now link all to the rtpbin, start by getting an RTP sinkpad for session 0 */
  srcpad = gst_element_get_static_pad (rtpsrc, "src");
  sinkpad = gst_element_get_request_pad (rtpbin, "recv_rtp_sink_0");
  lres = gst_pad_link (srcpad, sinkpad);
  g_assert (lres == GST_PAD_LINK_OK);
  gst_object_unref (srcpad);

  /* get an RTCP sinkpad in session 0 */
  srcpad = gst_element_get_static_pad (rtcpsrc, "src");
  sinkpad = gst_element_get_request_pad (rtpbin, "recv_rtcp_sink_0");
  lres = gst_pad_link (srcpad, sinkpad);
  g_assert (lres == GST_PAD_LINK_OK);
  gst_object_unref (srcpad);
  gst_object_unref (sinkpad);

  /* get an RTCP srcpad for sending RTCP back to the sender */
  srcpad = gst_element_get_request_pad (rtpbin, "send_rtcp_src_0");
  sinkpad = gst_element_get_static_pad (rtcpsink, "sink");
  lres = gst_pad_link (srcpad, sinkpad);
  g_assert (lres == GST_PAD_LINK_OK);
  gst_object_unref (sinkpad);

  /* the RTP pad that we have to connect to the depayloader will be created
   * dynamically so we connect to the pad-added signal, pass the depayloader as
   * user_data so that we can link to it. */
  g_signal_connect (rtpbin, "pad-added", G_CALLBACK (pad_added_cb), audiodepay);

  /* give some stats when we receive RTCP */
  g_signal_connect (rtpbin, "on-ssrc-active", G_CALLBACK (on_ssrc_active_cb),
      audiodepay);

  /* set the pipeline to playing */
  g_print ("starting receiver pipeline\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* we need to run a GLib main loop to get the messages */
  loop = g_main_loop_new (NULL, FALSE);
  g_main_loop_run (loop);

  g_print ("stopping receiver pipeline\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);

  gst_object_unref (pipeline);

  return 0;
}
Exemplo n.º 12
0
gint main (gint argc, gchar *argv[])
{
    gtk_init (&argc, &argv);
    gst_init (&argc, &argv);

    GstElement* pipeline = gst_pipeline_new ("pipeline");

    //window that contains an area where the video is drawn
    GtkWidget* window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
    gtk_widget_set_size_request (window, 640, 480);
    gtk_window_move (GTK_WINDOW (window), 300, 10);
    gtk_window_set_title (GTK_WINDOW (window), "glimagesink implement the gstxoverlay interface");
    GdkGeometry geometry;
    geometry.min_width = 1;
    geometry.min_height = 1;
    geometry.max_width = -1;
    geometry.max_height = -1;
    gtk_window_set_geometry_hints (GTK_WINDOW (window), window, &geometry, GDK_HINT_MIN_SIZE);

    //window to control the states
    GtkWidget* window_control = gtk_window_new (GTK_WINDOW_TOPLEVEL);
    geometry.min_width = 1;
    geometry.min_height = 1;
    geometry.max_width = -1;
    geometry.max_height = -1;
    gtk_window_set_geometry_hints (GTK_WINDOW (window_control), window_control, &geometry, GDK_HINT_MIN_SIZE);
    gtk_window_set_resizable (GTK_WINDOW (window_control), FALSE);
    gtk_window_move (GTK_WINDOW (window_control), 10, 10);
    GtkWidget* table = gtk_table_new (2, 1, TRUE);
    gtk_container_add (GTK_CONTAINER (window_control), table);

    //control state null
    GtkWidget* button_state_null = gtk_button_new_with_label ("GST_STATE_NULL");
    g_signal_connect (G_OBJECT (button_state_null), "clicked",
        G_CALLBACK (button_state_null_cb), pipeline);
    gtk_table_attach_defaults (GTK_TABLE (table), button_state_null, 0, 1, 0, 1);
    gtk_widget_show (button_state_null);

    //control state ready
    GtkWidget* button_state_ready = gtk_button_new_with_label ("GST_STATE_READY");
    g_signal_connect (G_OBJECT (button_state_ready), "clicked",
        G_CALLBACK (button_state_ready_cb), pipeline);
    gtk_table_attach_defaults (GTK_TABLE (table), button_state_ready, 0, 1, 1, 2);
    gtk_widget_show (button_state_ready);

    //control state paused
    GtkWidget* button_state_paused = gtk_button_new_with_label ("GST_STATE_PAUSED");
    g_signal_connect (G_OBJECT (button_state_paused), "clicked",
        G_CALLBACK (button_state_paused_cb), pipeline);
    gtk_table_attach_defaults (GTK_TABLE (table), button_state_paused, 0, 1, 2, 3);
    gtk_widget_show (button_state_paused);

    //control state playing
    GtkWidget* button_state_playing = gtk_button_new_with_label ("GST_STATE_PLAYING");
    g_signal_connect (G_OBJECT (button_state_playing), "clicked",
        G_CALLBACK (button_state_playing_cb), pipeline);
    gtk_table_attach_defaults (GTK_TABLE (table), button_state_playing, 0, 1, 3, 4);
    gtk_widget_show (button_state_playing);

    //change framerate
    GtkWidget* slider_fps = gtk_vscale_new_with_range (1, 30, 2);
    g_signal_connect (G_OBJECT (slider_fps), "format-value",
        G_CALLBACK (slider_fps_cb), pipeline);
    gtk_table_attach_defaults (GTK_TABLE (table), slider_fps, 1, 2, 0, 4);
    gtk_widget_show (slider_fps);

    gtk_widget_show (table);
    gtk_widget_show (window_control);

    //configure the pipeline
    g_signal_connect(G_OBJECT(window), "delete-event", G_CALLBACK(destroy_cb), pipeline);

    GstElement* videosrc = gst_element_factory_make ("videotestsrc", "videotestsrc");
    GstElement* glupload = gst_element_factory_make ("glupload", "glupload");
    GstElement* glfiltercube = gst_element_factory_make ("glfiltercube", "glfiltercube");
    GstElement* glfilterlaplacian = gst_element_factory_make ("glfilterlaplacian", "glfilterlaplacian");
    GstElement* videosink = gst_element_factory_make ("glimagesink", "glimagesink");

    GstCaps *caps = gst_caps_new_simple("video/x-raw-yuv",
                                        "width", G_TYPE_INT, 640,
                                        "height", G_TYPE_INT, 480,
                                        "framerate", GST_TYPE_FRACTION, 25, 1,
                                        "format", GST_TYPE_FOURCC, GST_MAKE_FOURCC ('A', 'Y', 'U', 'V'),
                                        NULL) ;

    gst_bin_add_many (GST_BIN (pipeline), videosrc, glupload, glfiltercube, glfilterlaplacian, videosink, NULL);

    gboolean link_ok = gst_element_link_filtered(videosrc, glupload, caps) ;
    gst_caps_unref(caps) ;
    if(!link_ok)
    {
        g_warning("Failed to link videosrc to glupload!\n") ;
        return -1;
    }

    if(!gst_element_link_many(glupload, glfiltercube, glfilterlaplacian, videosink, NULL))
    {
        g_warning("Failed to link glupload to videosink!\n") ;
        return -1;
    }

    //area where the video is drawn
    GtkWidget* area = gtk_drawing_area_new();
    gtk_container_add (GTK_CONTAINER (window), area);

    //set window id on this event
    GstBus* bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
    gst_bus_set_sync_handler (bus, (GstBusSyncHandler) create_window, area);
    gst_bus_add_signal_watch (bus);
    g_signal_connect(bus, "message::error", G_CALLBACK(end_stream_cb), pipeline);
    g_signal_connect(bus, "message::warning", G_CALLBACK(end_stream_cb), pipeline);
    g_signal_connect(bus, "message::eos", G_CALLBACK(end_stream_cb), pipeline);
    gst_object_unref (bus);

    //needed when being in GST_STATE_READY, GST_STATE_PAUSED
    //or resizing/obscuring the window
    g_signal_connect(area, "expose-event", G_CALLBACK(expose_cb), videosink);

    g_signal_connect (area, "realize", G_CALLBACK (area_realize_cb), pipeline);

    //start
    GstStateChangeReturn ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
    if (ret == GST_STATE_CHANGE_FAILURE)
    {
        g_print ("Failed to start up pipeline!\n");
        return -1;
    }

    gtk_widget_show_all (window);

    gtk_main();

    return 0;
}
Exemplo n.º 13
0
int main(int argc, char *argv[]) {
  GstElement *pipeline, *audio_source, *tee, *audio_queue, *audio_convert, *audio_resample, *audio_sink;
  GstElement *video_queue, *visual, *video_convert, *video_sink;
  GstBus *bus;
  GstMessage *msg;
  GstPadTemplate *tee_src_pad_template;
  GstPad *tee_audio_pad, *tee_video_pad;
  GstPad *queue_audio_pad, *queue_video_pad;

  /* Initialize GStreamer */
  gst_init (&argc, &argv);

  /* Create the elements */
  audio_source = gst_element_factory_make ("audiotestsrc", "audio_source");
  tee = gst_element_factory_make ("tee", "tee");
  audio_queue = gst_element_factory_make ("queue", "audio_queue");
  audio_convert = gst_element_factory_make ("audioconvert", "audio_convert");
  audio_resample = gst_element_factory_make ("audioresample", "audio_resample");
  audio_sink = gst_element_factory_make ("autoaudiosink", "audio_sink");
  video_queue = gst_element_factory_make ("queue", "video_queue");
  visual = gst_element_factory_make ("wavescope", "visual");
  video_convert = gst_element_factory_make ("videoconvert", "video_convert");
  video_sink = gst_element_factory_make ("autovideosink", "video_sink");

  /* Create the empty pipeline */
  pipeline = gst_pipeline_new ("test-pipeline");

  if (!pipeline || !audio_source || !tee || !audio_queue || !audio_convert || !audio_resample || !audio_sink ||
      !video_queue || !visual || !video_convert || !video_sink) {
    g_printerr ("Not all elements could be created.\n");
    return -1;
  }

  /* Configure elements */
  g_object_set (audio_source, "freq", 215.0f, NULL);
  g_object_set (visual, "shader", 0, "style", 1, NULL);

  /* Link all elements that can be automatically linked because they have "Always" pads */
  gst_bin_add_many (GST_BIN (pipeline), audio_source, tee, audio_queue, audio_convert, audio_resample, audio_sink,
      video_queue, visual, video_convert, video_sink, NULL);
  if (gst_element_link_many (audio_source, tee, NULL) != TRUE ||
      gst_element_link_many (audio_queue, audio_convert, audio_resample, audio_sink, NULL) != TRUE ||
      gst_element_link_many (video_queue, visual, video_convert, video_sink, NULL) != TRUE) {
    g_printerr ("Elements could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;
  }

  /* Manually link the Tee, which has "Request" pads */
  tee_src_pad_template = gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (tee), "src_%u");
  tee_audio_pad = gst_element_request_pad (tee, tee_src_pad_template, NULL, NULL);
  g_print ("Obtained request pad %s for audio branch.\n", gst_pad_get_name (tee_audio_pad));
  queue_audio_pad = gst_element_get_static_pad (audio_queue, "sink");
  tee_video_pad = gst_element_request_pad (tee, tee_src_pad_template, NULL, NULL);
  g_print ("Obtained request pad %s for video branch.\n", gst_pad_get_name (tee_video_pad));
  queue_video_pad = gst_element_get_static_pad (video_queue, "sink");
  if (gst_pad_link (tee_audio_pad, queue_audio_pad) != GST_PAD_LINK_OK ||
      gst_pad_link (tee_video_pad, queue_video_pad) != GST_PAD_LINK_OK) {
    g_printerr ("Tee could not be linked.\n");
    gst_object_unref (pipeline);
    return -1;
  }
  gst_object_unref (queue_audio_pad);
  gst_object_unref (queue_video_pad);

  /* Start playing the pipeline */
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait until error or EOS */
  bus = gst_element_get_bus (pipeline);
  msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);

  /* Release the request pads from the Tee, and unref them */
  gst_element_release_request_pad (tee, tee_audio_pad);
  gst_element_release_request_pad (tee, tee_video_pad);
  gst_object_unref (tee_audio_pad);
  gst_object_unref (tee_video_pad);

  /* Free resources */
  if (msg != NULL)
    gst_message_unref (msg);
  gst_object_unref (bus);
  gst_element_set_state (pipeline, GST_STATE_NULL);

  gst_object_unref (pipeline);
  return 0;
}
Exemplo n.º 14
0
int
main (int argc, char *argv[])
{
  GstElement *filesrc, *osssink, *queue, *parse, *decode;
  GstElement *bin;
  GstElement *thread;

  gst_init (&argc, &argv);

  if (argc != 2) {
    g_print ("usage: %s <filename>\n", argv[0]);
    exit (-1);
  }

  /* create a new thread to hold the elements */
  thread = gst_thread_new ("thread");
  g_assert (thread != NULL);

  /* create a new bin to hold the elements */
  bin = gst_bin_new ("bin");
  g_assert (bin != NULL);

  /* create a disk reader */
  filesrc = gst_element_factory_make ("filesrc", "disk_source");
  g_assert (filesrc != NULL);
  g_object_set (G_OBJECT (filesrc), "location", argv[1], NULL);
  g_signal_connect (G_OBJECT (filesrc), "eos", G_CALLBACK (eos), thread);

  queue = gst_element_factory_make ("queue", "queue");

  /* and an audio sink */
  osssink = gst_element_factory_make ("osssink", "play_audio");
  g_assert (osssink != NULL);

  parse = gst_element_factory_make ("mp3parse", "parse");
  decode = gst_element_factory_make ("mpg123", "decode");

  /* add objects to the main bin */
  gst_bin_add (GST_BIN (bin), filesrc);
  gst_bin_add (GST_BIN (bin), queue);

  gst_bin_add (GST_BIN (thread), parse);
  gst_bin_add (GST_BIN (thread), decode);
  gst_bin_add (GST_BIN (thread), osssink);

  gst_element_link_many (filesrc, queue, parse, decode, osssink, NULL);

  /* make it ready */
  gst_element_set_state (GST_ELEMENT (bin), GST_STATE_READY);
  /* start playing */
  gst_element_set_state (GST_ELEMENT (bin), GST_STATE_PLAYING);

  playing = TRUE;

  while (playing) {
    gst_bin_iterate (GST_BIN (bin));
  }

  gst_element_set_state (GST_ELEMENT (bin), GST_STATE_NULL);

  exit (0);
}
Exemplo n.º 15
0
gint
main (gint argc, gchar ** argv)
{
  GstElement *bin;
  GstElement *src, *fmt, *enc, *sink;
  GstCaps *caps;
  GstStructure *prog;

  /* init gstreamer */
  gst_init (&argc, &argv);

  /* create a new bin to hold the elements */
  bin = gst_pipeline_new ("camera");

  /* create elements */
  if (!(sink = gst_element_factory_make ("multifilesink", NULL))) {
    GST_WARNING ("Can't create element \"multifilesink\"");
    return -1;
  }
  g_object_set (sink, "location", "image%02d.jpg", NULL);

  if (!(enc = gst_element_factory_make ("jpegenc", NULL))) {
    GST_WARNING ("Can't create element \"jpegenc\"");
    return -1;
  }

  if (!(fmt = gst_element_factory_make ("capsfilter", NULL))) {
    GST_WARNING ("Can't create element \"capsfilter\"");
    return -1;
  }
  caps =
      gst_caps_from_string
      ("video/x-raw, width=640, height=480, framerate=(fraction)15/1");
  g_object_set (fmt, "caps", caps, NULL);

  if (!(src = gst_element_factory_make ("v4l2src", NULL))) {
    GST_WARNING ("Can't create element \"v4l2src\"");
    return -1;
  }
  g_object_set (src, "queue-size", 1, NULL);

  /* add objects to the main bin */
  gst_bin_add_many (GST_BIN (bin), src, fmt, enc, sink, NULL);

  /* link elements */
  if (!gst_element_link_many (src, fmt, enc, sink, NULL)) {
    GST_WARNING ("Can't link elements");
    return -1;
  }

  /* programm a pattern of events */
#if 0
  prog = gst_structure_from_string ("program"
      ", image00=(structure)\"image\\,contrast\\=0.0\\;\""
      ", image01=(structure)\"image\\,contrast\\=0.3\\;\""
      ", image02=(structure)\"image\\,contrast\\=1.0\\;\""
      ", image03=(structure)\"image\\,contrast\\=0.05\\;\";", NULL);
#endif
#if 1
  prog = gst_structure_from_string ("program"
      ", image00=(structure)\"image\\,brightness\\=1.0\\,contrast\\=0.0\\;\""
      ", image01=(structure)\"image\\,brightness\\=0.5\\,contrast\\=0.3\\;\""
      ", image02=(structure)\"image\\,brightness\\=0.25\\,contrast\\=1.0\\;\""
      ", image03=(structure)\"image\\,brightness\\=0.0\\,contrast\\=0.05\\;\";",
      NULL);
#endif
  set_program (GST_OBJECT (src), prog);
  g_object_set (src, "num-buffers", gst_structure_n_fields (prog), NULL);

  /* prepare playback */
  gst_element_set_state (bin, GST_STATE_PAUSED);

  /* play and wait */
  gst_element_set_state (bin, GST_STATE_PLAYING);

  /* mainloop and wait for eos */
  event_loop (bin);

  /* stop and cleanup */
  gst_element_set_state (bin, GST_STATE_NULL);
  gst_object_unref (GST_OBJECT (bin));
  return 0;
}
Exemplo n.º 16
0
static void
kms_agnostic_bin2_link_to_tee (KmsAgnosticBin2 * self, GstPad * pad,
    GstElement * tee, GstCaps * caps)
{
  GstElement *queue = gst_element_factory_make ("queue", NULL);
  GstPad *target;
  GstProxyPad *proxy;

  gst_bin_add (GST_BIN (self), queue);
  gst_element_sync_state_with_parent (queue);

  if (!(gst_caps_is_any (caps) || gst_caps_is_empty (caps))
      && kms_utils_caps_are_raw (caps)) {
    GstElement *convert = kms_utils_create_convert_for_caps (caps);
    GstElement *rate = kms_utils_create_rate_for_caps (caps);
    GstElement *mediator = kms_utils_create_mediator_element (caps);

    if (kms_utils_caps_are_video (caps)) {
      g_object_set (queue, "leaky", 2, "max-size-time", LEAKY_TIME, NULL);
    }

    remove_element_on_unlinked (convert, "src", "sink");
    if (rate) {
      remove_element_on_unlinked (rate, "src", "sink");
    }
    remove_element_on_unlinked (mediator, "src", "sink");

    if (rate) {
      gst_bin_add (GST_BIN (self), rate);
    }

    gst_bin_add_many (GST_BIN (self), convert, mediator, NULL);

    gst_element_sync_state_with_parent (mediator);
    gst_element_sync_state_with_parent (convert);
    if (rate) {
      gst_element_sync_state_with_parent (rate);
    }

    if (rate) {
      gst_element_link_many (queue, rate, mediator, NULL);
    } else {
      gst_element_link (queue, mediator);
    }

    gst_element_link_many (mediator, convert, NULL);
    target = gst_element_get_static_pad (convert, "src");
  } else {
    target = gst_element_get_static_pad (queue, "src");
  }

  gst_ghost_pad_set_target (GST_GHOST_PAD (pad), target);

  proxy = gst_proxy_pad_get_internal (GST_PROXY_PAD (pad));
  gst_pad_set_query_function (GST_PAD_CAST (proxy),
      proxy_src_pad_query_function);
  g_object_unref (proxy);

  g_object_unref (target);
  link_element_to_tee (tee, queue);
}
Exemplo n.º 17
0
bool GstEnginePipeline::Init() {
  // Here we create all the parts of the gstreamer pipeline - from the source
  // to the sink.  The parts of the pipeline are split up into bins:
  //   uri decode bin -> audio bin
  // The uri decode bin is a gstreamer builtin that automatically picks the
  // right type of source and decoder for the URI.

  // The audio bin gets created here and contains:
  //   queue ! audioconvert ! <caps32>
  //         ! ( rgvolume ! rglimiter ! audioconvert2 ) ! tee
  // rgvolume and rglimiter are only created when replaygain is enabled.

  // After the tee the pipeline splits.  One split is converted to 16-bit int
  // samples for the scope, the other is kept as float32 and sent to the
  // speaker.
  //   tee1 ! probe_queue ! probe_converter ! <caps16> ! probe_sink
  //   tee2 ! audio_queue ! equalizer_preamp ! equalizer ! volume ! audioscale
  //        ! convert ! audiosink

  // Audio bin
  audiobin_ = gst_bin_new("audiobin");
  gst_bin_add(GST_BIN(pipeline_), audiobin_);

  // Create the sink
  if (!(audiosink_ = engine_->CreateElement(sink_, audiobin_))) return false;

  if (GstEngine::
          DoesThisSinkSupportChangingTheOutputDeviceToAUserEditableString(
              sink_) &&
      !device_.isEmpty())
    g_object_set(G_OBJECT(audiosink_), "device", device_.toUtf8().constData(),
                 nullptr);

  // Create all the other elements
  GstElement* tee, *probe_queue, *probe_converter, *probe_sink, *audio_queue,
      *convert;

  queue_ = engine_->CreateElement("queue2", audiobin_);
  audioconvert_ = engine_->CreateElement("audioconvert", audiobin_);
  tee = engine_->CreateElement("tee", audiobin_);

  probe_queue = engine_->CreateElement("queue", audiobin_);
  probe_converter = engine_->CreateElement("audioconvert", audiobin_);
  probe_sink = engine_->CreateElement("fakesink", audiobin_);

  audio_queue = engine_->CreateElement("queue", audiobin_);
  equalizer_preamp_ = engine_->CreateElement("volume", audiobin_);
  equalizer_ = engine_->CreateElement("equalizer-nbands", audiobin_);
  stereo_panorama_ = engine_->CreateElement("audiopanorama", audiobin_);
  volume_ = engine_->CreateElement("volume", audiobin_);
  audioscale_ = engine_->CreateElement("audioresample", audiobin_);
  convert = engine_->CreateElement("audioconvert", audiobin_);

  if (!queue_ || !audioconvert_ || !tee || !probe_queue || !probe_converter ||
      !probe_sink || !audio_queue || !equalizer_preamp_ || !equalizer_ ||
      !stereo_panorama_ || !volume_ || !audioscale_ || !convert) {
    return false;
  }

  // Create the replaygain elements if it's enabled.  event_probe is the
  // audioconvert element we attach the probe to, which will change depending
  // on whether replaygain is enabled.  convert_sink is the element after the
  // first audioconvert, which again will change.
  GstElement* event_probe = audioconvert_;
  GstElement* convert_sink = tee;

  if (rg_enabled_) {
    rgvolume_ = engine_->CreateElement("rgvolume", audiobin_);
    rglimiter_ = engine_->CreateElement("rglimiter", audiobin_);
    audioconvert2_ = engine_->CreateElement("audioconvert", audiobin_);
    event_probe = audioconvert2_;
    convert_sink = rgvolume_;

    if (!rgvolume_ || !rglimiter_ || !audioconvert2_) {
      return false;
    }

    // Set replaygain settings
    g_object_set(G_OBJECT(rgvolume_), "album-mode", rg_mode_, nullptr);
    g_object_set(G_OBJECT(rgvolume_), "pre-amp", double(rg_preamp_), nullptr);
    g_object_set(G_OBJECT(rglimiter_), "enabled", int(rg_compression_),
                 nullptr);
  }

  // Create a pad on the outside of the audiobin and connect it to the pad of
  // the first element.
  GstPad* pad = gst_element_get_static_pad(queue_, "sink");
  gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
  gst_object_unref(pad);

  // Add a data probe on the src pad of the audioconvert element for our scope.
  // We do it here because we want pre-equalized and pre-volume samples
  // so that our visualization are not be affected by them.
  pad = gst_element_get_static_pad(event_probe, "src");
  gst_pad_add_event_probe(pad, G_CALLBACK(EventHandoffCallback), this);
  gst_object_unref(pad);

  // Configure the fakesink properly
  g_object_set(G_OBJECT(probe_sink), "sync", TRUE, nullptr);

  // Set the equalizer bands
  g_object_set(G_OBJECT(equalizer_), "num-bands", 10, nullptr);

  int last_band_frequency = 0;
  for (int i = 0; i < kEqBandCount; ++i) {
    GstObject* band =
        gst_child_proxy_get_child_by_index(GST_CHILD_PROXY(equalizer_), i);

    const float frequency = kEqBandFrequencies[i];
    const float bandwidth = frequency - last_band_frequency;
    last_band_frequency = frequency;

    g_object_set(G_OBJECT(band), "freq", frequency, "bandwidth", bandwidth,
                 "gain", 0.0f, nullptr);
    g_object_unref(G_OBJECT(band));
  }

  // Set the stereo balance.
  g_object_set(G_OBJECT(stereo_panorama_), "panorama", stereo_balance_,
               nullptr);

  // Set the buffer duration.  We set this on this queue instead of the
  // decode bin (in ReplaceDecodeBin()) because setting it on the decode bin
  // only affects network sources.
  // Disable the default buffer and byte limits, so we only buffer based on
  // time.
  g_object_set(G_OBJECT(queue_), "max-size-buffers", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-bytes", 0, nullptr);
  g_object_set(G_OBJECT(queue_), "max-size-time", buffer_duration_nanosec_,
               nullptr);
  g_object_set(G_OBJECT(queue_), "low-percent", 1, nullptr);

  if (buffer_duration_nanosec_ > 0) {
    g_object_set(G_OBJECT(queue_), "use-buffering", true, nullptr);
  }

  gst_element_link(queue_, audioconvert_);

  // Create the caps to put in each path in the tee.  The scope path gets 16-bit
  // ints and the audiosink path gets float32.
  GstCaps* caps16 =
      gst_caps_new_simple("audio/x-raw-int", "width", G_TYPE_INT, 16, "signed",
                          G_TYPE_BOOLEAN, true, nullptr);
  GstCaps* caps32 = gst_caps_new_simple("audio/x-raw-float", "width",
                                        G_TYPE_INT, 32, nullptr);
  if (mono_playback_) {
    gst_caps_set_simple(caps32, "channels", G_TYPE_INT, 1, nullptr);
  }

  // Link the elements with special caps
  gst_element_link_filtered(probe_converter, probe_sink, caps16);
  gst_element_link_filtered(audioconvert_, convert_sink, caps32);
  gst_caps_unref(caps16);
  gst_caps_unref(caps32);

  // Link the outputs of tee to the queues on each path.
  gst_pad_link(gst_element_get_request_pad(tee, "src%d"),
               gst_element_get_static_pad(probe_queue, "sink"));
  gst_pad_link(gst_element_get_request_pad(tee, "src%d"),
               gst_element_get_static_pad(audio_queue, "sink"));

  // Link replaygain elements if enabled.
  if (rg_enabled_) {
    gst_element_link_many(rgvolume_, rglimiter_, audioconvert2_, tee, nullptr);
  }

  // Link everything else.
  gst_element_link(probe_queue, probe_converter);
  gst_element_link_many(audio_queue, equalizer_preamp_, equalizer_,
                        stereo_panorama_, volume_, audioscale_, convert,
                        audiosink_, nullptr);

  // Add probes and handlers.
  gst_pad_add_buffer_probe(gst_element_get_static_pad(probe_converter, "src"),
                           G_CALLBACK(HandoffCallback), this);
  gst_bus_set_sync_handler(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                           BusCallbackSync, this);
  bus_cb_id_ = gst_bus_add_watch(gst_pipeline_get_bus(GST_PIPELINE(pipeline_)),
                                 BusCallback, this);

  MaybeLinkDecodeToAudio();

  return true;
}
Exemplo n.º 18
0
void EntradaFitxer::crea(int k, GstElement *pipeline, QString nom_fitxer)
{
    //Elements de font d'entrada de fitxer
    QString sbin("bin_font%1"),  ssource("source_%1"), sdec("decoder%1"), svolumen_m("volumen_mix%1"), squeue("audio_queue%1");
    QString saconv("audio_conv_%1"), sabin("bin_audio_%1"), sconv("video_conv_%1"), ssink("video_sink_%1");

    //Creem entrada de fitxer i el decodebin, els afegim al pipeline i els linkem.
    bin_font = gst_bin_new ((char*)sbin.arg(k).toStdString().c_str());

    source = gst_element_factory_make ("filesrc",   (char*)ssource.arg(k).toStdString().c_str());
    dec = gst_element_factory_make ("decodebin2",   (char*)sdec.arg(k).toStdString().c_str());

    //Comprovem que s'han pogut crear tots els elements d'entrada
    if(!bin_font || !source || !dec){
      g_printerr ("Un dels elements de l'entrada de fitxer no s'ha pogut crear. Sortint.\n");
    }

    g_signal_connect (dec, "new-decoded-pad", G_CALLBACK (cb_newpad_audio), this);
    g_signal_connect (dec, "new-decoded-pad", G_CALLBACK (cb_newpad_video), this);
    gst_bin_add_many (GST_BIN (bin_font), source, dec, NULL);
    gst_element_link (source, dec);

    //Creem l'entrada d'àudio
    a.bin = gst_bin_new ((char*)sabin.arg(k).toStdString().c_str());
    conv_audio =    gst_element_factory_make("audioconvert",    (char*)saconv.arg(k).toStdString().c_str());
    audiopad =      gst_element_get_static_pad (conv_audio, "sink");
    a.queue_mix=    gst_element_factory_make("queue2",          (char*)squeue.arg(k).toStdString().c_str());
    a.volume_mix =  gst_element_factory_make("volume",          (char*)svolumen_m.arg(k).toStdString().c_str());

    //Comprovem que s'han pogut crear tots els elements d'entrada
    if(!a.bin || !conv_audio || !audiopad || !a.queue_mix || !a.volume_mix){
      g_printerr ("Un dels elements de l'entrada de fitxer d'àudio no s'ha pogut crear. Sortint.\n");
    }

    gst_bin_add_many (GST_BIN (a.bin), conv_audio, a.queue_mix, a.volume_mix, NULL);
    gst_element_link_many (conv_audio, a.queue_mix, a.volume_mix, NULL);
    gst_element_add_pad (a.bin, gst_ghost_pad_new ("sink", audiopad));
    gst_object_unref (audiopad);
    gst_bin_add (GST_BIN (bin_font), a.bin);

    //Creem l'entrada de vídeo
    v.creacomuns(k,"video_fitxer");
    v.creatransformadors(k);
    conv_video =    gst_element_factory_make ("ffmpegcolorspace",   (char*)sconv.arg(k).toStdString().c_str());
    videopad =      gst_element_get_static_pad (conv_video,         "sink");
    v.sink =        gst_element_factory_make ("xvimagesink",        (char*)ssink.arg(k).toStdString().c_str());

    //Comprovem que s'han pogut crear tots els elements d'entrada
    if( !videopad || !conv_video || !v.sink){
      g_printerr ("Un dels elements de l'entrada de fitxer de vídeo no s'ha pogut crear. Sortint.\n");
    }

    gst_bin_add_many (GST_BIN (v.bin), conv_video, v.tee, v.queue, v.scale, v.sink, v.queue_mix, v.color_conv, v.scale_mix, NULL);
    gst_element_link_many (conv_video, v.tee, v.queue, v.scale, v.sink, NULL);
    gst_element_add_pad (v.bin, gst_ghost_pad_new ("sink", videopad));
    gst_object_unref (videopad);
    gst_bin_add (GST_BIN (bin_font), v.bin);

    //Seleccionem el fitxer d'entrada
    const char *c_nom_fitxer = nom_fitxer.toStdString().c_str();
    g_object_set (G_OBJECT (source), "location", c_nom_fitxer, NULL);
    gst_element_set_state(v.sink, GST_STATE_READY);

    //Afegim el bin_video_pgm al pipeline
    gst_bin_add (GST_BIN (pipeline),bin_font);
}
Exemplo n.º 19
0
static gboolean
bbd_pipeline_construct (BansheeBpmDetector *detector)
{
    g_return_val_if_fail (detector != NULL, FALSE);

    if (detector->pipeline != NULL) {
        return TRUE;
    }
        
    detector->pipeline = gst_pipeline_new ("pipeline");
    if (detector->pipeline == NULL) {
        bbd_raise_error (detector, _("Could not create pipeline"), NULL);
        return FALSE;
    }

    detector->filesrc = gst_element_factory_make ("filesrc", "filesrc");
    if (detector->filesrc == NULL) {
        bbd_raise_error (detector, _("Could not create filesrc element"), NULL);
        return FALSE;
    }
  
    detector->decodebin = gst_element_factory_make ("decodebin", "decodebin");
    if (detector->decodebin == NULL) {
        bbd_raise_error (detector, _("Could not create decodebin plugin"), NULL);
        return FALSE;
    }

    detector->audioconvert = gst_element_factory_make ("audioconvert", "audioconvert");
    if (detector->audioconvert == NULL) {
        bbd_raise_error (detector, _("Could not create audioconvert plugin"), NULL);
        return FALSE;
    }

    detector->bpmdetect = gst_element_factory_make ("bpmdetect", "bpmdetect");
    if (detector->bpmdetect == NULL) {
        bbd_raise_error (detector, _("Could not create bpmdetect plugin"), NULL);
        return FALSE;
    }
    
    detector->fakesink = gst_element_factory_make ("fakesink", "bpmfakesink");
    if (detector->fakesink == NULL) {
        bbd_raise_error (detector, _("Could not create fakesink plugin"), NULL);
        return FALSE;
    }

    gst_bin_add_many (GST_BIN (detector->pipeline),
        detector->filesrc, detector->decodebin, detector->audioconvert,
        detector->bpmdetect, detector->fakesink, NULL);

    if (!gst_element_link (detector->filesrc, detector->decodebin)) {
        bbd_raise_error (detector, _("Could not link pipeline elements"), NULL);
        return FALSE;
    }

    // decodebin and audioconvert are linked dynamically when the decodebin creates a new pad
    g_signal_connect(detector->decodebin, "new-decoded-pad", 
        G_CALLBACK(bbd_new_decoded_pad), detector);

    if (!gst_element_link_many (detector->audioconvert, detector->bpmdetect, detector->fakesink, NULL)) {
        bbd_raise_error (detector, _("Could not link pipeline elements"), NULL);
        return FALSE;
    }
        
    gst_bus_add_watch (gst_pipeline_get_bus (GST_PIPELINE (detector->pipeline)), bbd_pipeline_bus_callback, detector);

    return TRUE;
}
Exemplo n.º 20
0
static gboolean
brasero_transcode_create_pipeline (BraseroTranscode *transcode,
				   GError **error)
{
	gchar *uri;
	gboolean keep_dts;
	GstElement *decode;
	GstElement *source;
	GstBus *bus = NULL;
	GstCaps *filtercaps;
	GValue *value = NULL;
	GstElement *pipeline;
	GstElement *sink = NULL;
	BraseroJobAction action;
	GstElement *filter = NULL;
	GstElement *volume = NULL;
	GstElement *convert = NULL;
	BraseroTrack *track = NULL;
	GstElement *resample = NULL;
	BraseroTranscodePrivate *priv;

	priv = BRASERO_TRANSCODE_PRIVATE (transcode);

	BRASERO_JOB_LOG (transcode, "Creating new pipeline");

	priv->set_active_state = 0;

	/* free the possible current pipeline and create a new one */
	if (priv->pipeline) {
		gst_element_set_state (priv->pipeline, GST_STATE_NULL);
		gst_object_unref (G_OBJECT (priv->pipeline));
		priv->link = NULL;
		priv->sink = NULL;
		priv->source = NULL;
		priv->convert = NULL;
		priv->pipeline = NULL;
	}

	/* create three types of pipeline according to the needs: (possibly adding grvolume)
	 * - filesrc ! decodebin ! audioconvert ! fakesink (find size) and filesrc!mp3parse!fakesink for mp3s
	 * - filesrc ! decodebin ! audioresample ! audioconvert ! audio/x-raw-int,rate=44100,width=16,depth=16,endianness=4321,signed ! filesink
	 * - filesrc ! decodebin ! audioresample ! audioconvert ! audio/x-raw-int,rate=44100,width=16,depth=16,endianness=4321,signed ! fdsink
	 */
	pipeline = gst_pipeline_new (NULL);

	bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
	gst_bus_add_watch (bus,
			   (GstBusFunc) brasero_transcode_bus_messages,
			   transcode);
	gst_object_unref (bus);

	/* source */
	brasero_job_get_current_track (BRASERO_JOB (transcode), &track);
	uri = brasero_track_stream_get_source (BRASERO_TRACK_STREAM (track), TRUE);
	source = gst_element_make_from_uri (GST_URI_SRC, uri, NULL);
	g_free (uri);

	if (source == NULL) {
		g_set_error (error,
			     BRASERO_BURN_ERROR,
			     BRASERO_BURN_ERROR_GENERAL,
			     /* Translators: %s is the name of the object (as in
			      * GObject) from the Gstreamer library that could
			      * not be created */
			     _("%s element could not be created"),
			     "\"Source\"");
		goto error;
	}
	gst_bin_add (GST_BIN (pipeline), source);
	g_object_set (source,
		      "typefind", FALSE,
		      NULL);

	/* sink */
	brasero_job_get_action (BRASERO_JOB (transcode), &action);
	switch (action) {
	case BRASERO_JOB_ACTION_SIZE:
		if (priv->mp3_size_pipeline)
			return brasero_transcode_create_pipeline_size_mp3 (transcode, pipeline, source, error);

		sink = gst_element_factory_make ("fakesink", NULL);
		break;

	case BRASERO_JOB_ACTION_IMAGE:
		volume = brasero_transcode_create_volume (transcode, track);

		if (brasero_job_get_fd_out (BRASERO_JOB (transcode), NULL) != BRASERO_BURN_OK) {
			gchar *output;

			brasero_job_get_image_output (BRASERO_JOB (transcode),
						      &output,
						      NULL);
			sink = gst_element_factory_make ("filesink", NULL);
			g_object_set (sink,
				      "location", output,
				      NULL);
			g_free (output);
		}
		else {
			int fd;

			brasero_job_get_fd_out (BRASERO_JOB (transcode), &fd);
			sink = gst_element_factory_make ("fdsink", NULL);
			g_object_set (sink,
				      "fd", fd,
				      NULL);
		}
		break;

	default:
		goto error;
	}

	if (!sink) {
		g_set_error (error,
			     BRASERO_BURN_ERROR,
			     BRASERO_BURN_ERROR_GENERAL,
			     _("%s element could not be created"),
			     "\"Sink\"");
		goto error;
	}

	gst_bin_add (GST_BIN (pipeline), sink);
	g_object_set (sink,
		      "sync", FALSE,
		      NULL);

	brasero_job_tag_lookup (BRASERO_JOB (transcode),
				BRASERO_SESSION_STREAM_AUDIO_FORMAT,
				&value);
	if (value)
		keep_dts = (g_value_get_int (value) & BRASERO_AUDIO_FORMAT_DTS) != 0;
	else
		keep_dts = FALSE;

	if (keep_dts
	&&  action == BRASERO_JOB_ACTION_IMAGE
	&& (brasero_track_stream_get_format (BRASERO_TRACK_STREAM (track)) & BRASERO_AUDIO_FORMAT_DTS) != 0) {
		GstElement *wavparse;
		GstPad *sinkpad;

		BRASERO_JOB_LOG (transcode, "DTS wav pipeline");

		/* FIXME: volume normalization won't work here. We'd need to 
		 * reencode it afterwards otherwise. */
		/* This is a special case. This is DTS wav. So we only decode wav. */
		wavparse = gst_element_factory_make ("wavparse", NULL);
		if (wavparse == NULL) {
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
				     _("%s element could not be created"),
				     "\"Wavparse\"");
			goto error;
		}
		gst_bin_add (GST_BIN (pipeline), wavparse);

		if (!gst_element_link (source, wavparse)) {
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
			             _("Impossible to link plugin pads"));
			goto error;
		}

		g_signal_connect (wavparse,
		                  "pad-added",
		                  G_CALLBACK (brasero_transcode_wavparse_pad_added_cb),
		                  transcode);

		/* This is an ugly workaround for the lack of accuracy with
		 * gstreamer. Yet this is unfortunately a necessary evil. */
		priv->pos = 0;
		priv->size = 0;
		sinkpad = gst_element_get_pad (sink, "sink");
		priv->probe = gst_pad_add_buffer_probe (sinkpad,
							G_CALLBACK (brasero_transcode_buffer_handler),
							transcode);
		gst_object_unref (sinkpad);


		priv->link = NULL;
		priv->sink = sink;
		priv->decode = NULL;
		priv->source = source;
		priv->convert = NULL;
		priv->pipeline = pipeline;

		gst_element_set_state (pipeline, GST_STATE_PLAYING);
		return TRUE;
	}

	/* audioconvert */
	convert = gst_element_factory_make ("audioconvert", NULL);
	if (convert == NULL) {
		g_set_error (error,
			     BRASERO_BURN_ERROR,
			     BRASERO_BURN_ERROR_GENERAL,
			     _("%s element could not be created"),
			     "\"Audioconvert\"");
		goto error;
	}
	gst_bin_add (GST_BIN (pipeline), convert);

	if (action == BRASERO_JOB_ACTION_IMAGE) {
		BraseroStreamFormat session_format;
		BraseroTrackType *output_type;

		output_type = brasero_track_type_new ();
		brasero_job_get_output_type (BRASERO_JOB (transcode), output_type);
		session_format = brasero_track_type_get_stream_format (output_type);
		brasero_track_type_free (output_type);

		/* audioresample */
		resample = gst_element_factory_make ("audioresample", NULL);
		if (resample == NULL) {
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
				     _("%s element could not be created"),
				     "\"Audioresample\"");
			goto error;
		}
		gst_bin_add (GST_BIN (pipeline), resample);

		/* filter */
		filter = gst_element_factory_make ("capsfilter", NULL);
		if (!filter) {
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
				     _("%s element could not be created"),
				     "\"Filter\"");
			goto error;
		}
		gst_bin_add (GST_BIN (pipeline), filter);
		filtercaps = gst_caps_new_full (gst_structure_new ("audio/x-raw-int",
								   "channels", G_TYPE_INT, 2,
								   "width", G_TYPE_INT, 16,
								   "depth", G_TYPE_INT, 16,
								   /* NOTE: we use little endianness only for libburn which requires little */
								   "endianness", G_TYPE_INT, (session_format & BRASERO_AUDIO_FORMAT_RAW_LITTLE_ENDIAN) != 0 ? 1234:4321,
								   "rate", G_TYPE_INT, 44100,
								   "signed", G_TYPE_BOOLEAN, TRUE,
								   NULL),
						NULL);
		g_object_set (GST_OBJECT (filter), "caps", filtercaps, NULL);
		gst_caps_unref (filtercaps);
	}

	/* decode */
	decode = gst_element_factory_make ("decodebin", NULL);
	if (decode == NULL) {
		g_set_error (error,
			     BRASERO_BURN_ERROR,
			     BRASERO_BURN_ERROR_GENERAL,
			     _("%s element could not be created"),
			     "\"Decodebin\"");
		goto error;
	}
	gst_bin_add (GST_BIN (pipeline), decode);

	if (action == BRASERO_JOB_ACTION_IMAGE) {
		GstPad *sinkpad;
		gboolean res;

		if (!gst_element_link (source, decode)) {
			BRASERO_JOB_LOG (transcode, "Impossible to link plugin pads");
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
			             _("Impossible to link plugin pads"));
			goto error;
		}

		priv->link = resample;
		g_signal_connect (G_OBJECT (decode),
				  "new-decoded-pad",
				  G_CALLBACK (brasero_transcode_new_decoded_pad_cb),
				  transcode);

		if (volume) {
			gst_bin_add (GST_BIN (pipeline), volume);
			res = gst_element_link_many (resample,
			                             volume,
						     convert,
			                             filter,
			                             sink,
			                             NULL);
		}
		else
			res = gst_element_link_many (resample,
			                             convert,
			                             filter,
			                             sink,
			                             NULL);

		if (!res) {
			BRASERO_JOB_LOG (transcode, "Impossible to link plugin pads");
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
				     _("Impossible to link plugin pads"));
			goto error;
		}

		/* This is an ugly workaround for the lack of accuracy with
		 * gstreamer. Yet this is unfortunately a necessary evil. */
		priv->pos = 0;
		priv->size = 0;
		sinkpad = gst_element_get_pad (sink, "sink");
		priv->probe = gst_pad_add_buffer_probe (sinkpad,
							G_CALLBACK (brasero_transcode_buffer_handler),
							transcode);
		gst_object_unref (sinkpad);
	}
	else {
		if (!gst_element_link (source, decode)
		||  !gst_element_link (convert, sink)) {
			BRASERO_JOB_LOG (transcode, "Impossible to link plugin pads");
			g_set_error (error,
				     BRASERO_BURN_ERROR,
				     BRASERO_BURN_ERROR_GENERAL,
				     _("Impossible to link plugin pads"));
			goto error;
		}

		priv->link = convert;
		g_signal_connect (G_OBJECT (decode),
				  "new-decoded-pad",
				  G_CALLBACK (brasero_transcode_new_decoded_pad_cb),
				  transcode);
	}

	priv->sink = sink;
	priv->decode = decode;
	priv->source = source;
	priv->convert = convert;
	priv->pipeline = pipeline;

	gst_element_set_state (pipeline, GST_STATE_PLAYING);
	return TRUE;

error:

	if (error && (*error))
		BRASERO_JOB_LOG (transcode,
				 "can't create object : %s \n",
				 (*error)->message);

	gst_object_unref (GST_OBJECT (pipeline));
	return FALSE;
}
Exemplo n.º 21
0
int
main (int argc, char *argv[])
{
  GstElement *bin;
  GstElement *src, *spectrum, *audioconvert, *sink;
  GstBus *bus;
  GtkWidget *appwindow, *vbox, *widget;

  gst_init (&argc, &argv);
  gtk_init (&argc, &argv);

  bin = gst_pipeline_new ("bin");

  src = gst_element_factory_make ("audiotestsrc", "src");
  g_object_set (G_OBJECT (src), "wave", 0, NULL);

  spectrum = gst_element_factory_make ("spectrum", "spectrum");
  g_object_set (G_OBJECT (spectrum), "bands", spect_bands, "threshold", -80,
      "message", TRUE, NULL);

  audioconvert = gst_element_factory_make ("audioconvert", "audioconvert");

  sink = gst_element_factory_make (DEFAULT_AUDIOSINK, "sink");

  gst_bin_add_many (GST_BIN (bin), src, spectrum, audioconvert, sink, NULL);
  if (!gst_element_link_many (src, spectrum, audioconvert, sink, NULL)) {
    fprintf (stderr, "can't link elements\n");
    exit (1);
  }

  bus = gst_element_get_bus (bin);
  gst_bus_add_watch (bus, message_handler, NULL);
  gst_object_unref (bus);

  sync_clock = gst_pipeline_get_clock (GST_PIPELINE (bin));

  appwindow = gtk_window_new (GTK_WINDOW_TOPLEVEL);
  g_signal_connect (G_OBJECT (appwindow), "destroy",
      G_CALLBACK (on_window_destroy), NULL);
  vbox = gtk_vbox_new (FALSE, 6);

  widget = gtk_hscale_new_with_range (50.0, 20000.0, 10);
  gtk_scale_set_draw_value (GTK_SCALE (widget), TRUE);
  gtk_scale_set_value_pos (GTK_SCALE (widget), GTK_POS_TOP);
  gtk_range_set_value (GTK_RANGE (widget), 440.0);
  g_signal_connect (G_OBJECT (widget), "value-changed",
      G_CALLBACK (on_frequency_changed), (gpointer) src);
  gtk_box_pack_start (GTK_BOX (vbox), widget, FALSE, FALSE, 0);

  drawingarea = gtk_drawing_area_new ();
  gtk_widget_set_size_request (drawingarea, spect_bands, spect_height);
  g_signal_connect (G_OBJECT (drawingarea), "configure-event",
      G_CALLBACK (on_configure_event), (gpointer) spectrum);
  gtk_box_pack_start (GTK_BOX (vbox), drawingarea, TRUE, TRUE, 0);

  gtk_container_add (GTK_CONTAINER (appwindow), vbox);
  gtk_widget_show_all (appwindow);

  gst_element_set_state (bin, GST_STATE_PLAYING);
  gtk_main ();
  gst_element_set_state (bin, GST_STATE_NULL);

  gst_object_unref (sync_clock);
  gst_object_unref (bin);

  return 0;
}
Exemplo n.º 22
0
bool CDFKAFU050::CreatePipeline()
{
#ifdef USE_AFU050
 
  GstCaps* caps; 
  GstCaps* videoscalecaps; 
  GstElement *appsrc;
  g_print("Pipeline build start\n");
  
  _handle = afu050_open();
  if(!_handle )
  {
    printf("Failed to open camera.\n");
    return false;
  }

  afu050_set_video_format(_handle, _Formats[_SelectedCamera].internalFormat);

  _Pipeline.pipeline 	= gst_pipeline_new ("camera");

  
  if( !InstantiateGSTElements() )
  {
    return false;
  }
   
  appsrc = GetElement( "appsrc");
  g_object_set( appsrc, "block", FALSE, NULL);
    
  caps = gst_caps_from_string("image/jpeg");
  gst_app_src_set_caps(GST_APP_SRC( appsrc ), caps);

  // Create the output pads of the tee video tee. Must be done, before the
  // videotee is linked.

  // Size of the output window
    videoscalecaps = gst_caps_new_simple ("video/x-raw-rgb",
                              "width", G_TYPE_INT, 640, //GetWidth(),
                              "height", G_TYPE_INT, 480, //GetHeight(),
                                NULL); 

    g_object_set( GetElement("filter"), "caps", videoscalecaps, NULL);
    g_object_set( GetElement("ximagesink"), "sync", FALSE, NULL);
  
  
   // Pipeline to the tee. Does the camera control and color image.
  if( !gst_element_link_many(appsrc,
		    GetElement("jpegdec"),
		    GetElement("cogcolorspace"),
		    GetElement("tee"),
			     NULL) )
  {
    g_critical ("Unable to link source pipeline");
    return false;
  }

    // Link the branch to the display of live video in the videowindow.
  if( ! gst_element_link_many(GetElement("queue1"), 
			      GetElement("videoscale1"),
			      GetElement("filter"),
			      GetElement("ximagesink"),
			      NULL))
  {
    g_critical ("Unable to link first branch.");
    return false;
  }

  // Link the branch to the fakesink for image processing.
  if( ! gst_element_link_many(GetElement("queue2"), GetElement("image_sink"), NULL))
  {
    g_critical ("Unable to link second branch.");
    return  false;
  }
  
  

  // Link the display pipeline to the first video tee pad.
  GstPadLinkReturn ret = gst_pad_link (tee_q1_pad, q1_pad);
  if(ret != GST_PAD_LINK_OK)
  {
    g_critical ("Unable to link tee with queue1");
    return false;
  }

  // Link the fakesink pipeline to the second video tee pad.
  ret = gst_pad_link (tee_q2_pad, q2_pad);
  if(ret != GST_PAD_LINK_OK)
  {
    g_critical ("Unable to link tee with queue2");
    return false;
  }
  
  
  
  _Pipeline.bus = gst_pipeline_get_bus (GST_PIPELINE (_Pipeline.pipeline));
  _Pipeline.bus_watch_id = gst_bus_add_watch (_Pipeline.bus, bus_call, _Pipeline.loop);
  
  gst_object_unref (_Pipeline.bus); 

  //gst_element_set_state (_Pipeline.pipeline, GST_STATE_READY);  
  
  gst_x_overlay_set_xwindow_id(GST_X_OVERLAY ( GetElement("ximagesink") ),WindowHandle);

  

  afu050_set_exposure_auto (_handle, 1);
  afu050_set_gain_auto (_handle, 1);
  afu050_set_white_balance_auto (_handle, 1);
  
  g_object_set(G_OBJECT( GetElement("image_sink" )),"signal-handoffs", TRUE, NULL);
  g_signal_connect(G_OBJECT( GetElement( "image_sink")), "handoff", G_CALLBACK(_cbfunc), _CallbackData);	

  
  GstStateChangeReturn sret = gst_element_set_state (_Pipeline.pipeline, GST_STATE_PLAYING);
  //gst_element_get_state(_Pipeline.pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
  if (sret == GST_STATE_CHANGE_FAILURE) 
  {
    g_printerr ("Playing set_state failed.\n");
    return false;
  }
  else
  {
    g_print ("Playing\n");
  }


  if (afu050_capture_start(_handle, new_frame_cb, appsrc) < 0)
  {
	  fprintf (stderr, "failed to start capture\n");
	  return false;
  }
  
  g_print("Pipeline built.\n");
#endif
 
  return true;
}
Exemplo n.º 23
0
int main(int argc, char *argv[]){

	GMainLoop *loop;
	GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink;
	GstBus *bus;
	guint bus_watch_id;
	
	/* Inicializa librerias */
	gst_init (&argc, &argv);
	loop = g_main_loop_new (NULL, FALSE);

	/* Comprueba argumentos */
	if (argc != 2) {
		g_printerr ("Uso: %s <Ogg/Vorbis filename>\n", argv[0]);
		return -1;
	}
	
	/* Create gstreamer elements */
	pipeline = gst_pipeline_new ("audio-player");
	source = gst_element_factory_make ("filesrc", "file-source");
	demuxer = gst_element_factory_make ("oggdemux", "ogg-demuxer");
	decoder = gst_element_factory_make ("theoradec", "theoradec-decoder");
	conv = gst_element_factory_make ("videoconvert", "converter");
	sink = gst_element_factory_make ("ximagesink", "video_out");
	if (!pipeline || !source || !demuxer || !decoder || !conv || !sink) {
		g_printerr ("Error al crear algun elemento. Saliendo.\n");
		return -1;
	}

	/* Set up the pipeline */
	/* we set the input filename to the source element */
	g_object_set (G_OBJECT (source), "location", argv[1], NULL);

	/* we add a message handler */
	bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
	bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
	gst_object_unref (bus);

	/* we add all elements into the pipeline */
	/* file-source | ogg-demuxer | vorbis-decoder | converter | video-output */
	gst_bin_add_many (GST_BIN (pipeline), source, demuxer, decoder, conv, sink, NULL);

	/* we link the elements together */
	/* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */
	gst_element_link (source, demuxer);
	gst_element_link_many (decoder, conv, sink, NULL);

	g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder);
	/* note that the demuxer will be linked to the decoder dynamically.
	The reason is that Ogg may contain various streams (for example
	audio and video). The source pad(s) will be created at run time,
	by the demuxer when it detects the amount and nature of streams.
	Therefore we connect a callback function which will be executed
	when the "pad-added" is emitted.*/

	/* Set the pipeline to "playing" state*/
	g_print ("Now playing: %s\n", argv[1]);
	gst_element_set_state (pipeline, GST_STATE_PLAYING);
	
	/* Iterate */
	g_print ("Running...\n");
	g_main_loop_run (loop);
	
	/* Out of the main loop, clean up nicely */
	g_print ("Returned, stopping playback\n");
	gst_element_set_state (pipeline, GST_STATE_NULL);
	g_print ("Deleting pipeline\n");
	gst_object_unref (GST_OBJECT (pipeline));
	g_source_remove (bus_watch_id);
	g_main_loop_unref (loop);

return 0;
}
Exemplo n.º 24
0
int
main (int   argc,
      char *argv[])
{
  GMainLoop *loop;
  GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink;
  GstElement * hora;
  GstBus *bus;

   //Initialisation
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

	/* Comprobamos argumentos */
	if (argc != 2) {
		g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]);
		return -1;
	}
   
	/* Se crean los elementos gstreamer */
	pipeline = gst_pipeline_new ("test_pipeline");
	source = gst_element_factory_make ("filesrc", "file-source");
	demuxer = gst_element_factory_make ("oggdemux", "ogg-demuxer");
	decoder = gst_element_factory_make ("theoradec", "theoradec-decoder");
  hora = gst_element_factory_make ("clockoverlay", "hora_actual");
	conv = gst_element_factory_make ("videoconvert", "converter");
	sink = gst_element_factory_make ("ximagesink", "video_out");

	if (!pipeline || !source || !demuxer || !decoder || !hora || !conv || !sink) {
		g_printerr ("One element could not be created. Exiting.\n");
		return -1;
	}

	/* we set the input filename to the source element */
	g_object_set (G_OBJECT (source), "location", argv[1], NULL);
	
	// Cambiamos propiedad para que aparezca grande la hora actual
	g_object_set (G_OBJECT(hora), "auto-resize", FALSE, NULL);

  //Set up the pipeline
  //we set the input filename to the source element
  //we add a message handler
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);
 
	/* anyadimos todos los elementos al pipeline*/
	/* file-source | ogg-demuxer | theoradec-decoder | hora_actual | converter | video_out */
	gst_bin_add_many (GST_BIN (pipeline), source, demuxer, decoder, hora, conv, sink, NULL);

	/* we link the elements together */
	/* file-source -> demuxer -> vorbis-decoder -> hora -> converter -> sink */
	gst_element_link (source, demuxer);
	gst_element_link_many (decoder, hora, conv, sink, NULL);

	g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder);

   //Set the pipeline to "playing" state
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
   //Iterate
  g_main_loop_run (loop);
  // Out of the main loop, clean up nicely
  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (GST_OBJECT (pipeline));
  return 0;
}	
Exemplo n.º 25
0
/**
 * Set up the Gstreamer pipeline. The playbin element is used to decode
 * all kinds of different formats. The capsfilter is used to deliver the
 * audio in a fixed format (X Hz, 1-2 channels, 16 bit signed)
 *
 * The pipeline looks like this:
 *
 * <pre>
 *  .--------------.    .------------------------------------------.
 *  |    playbin   |    |mybin    .------------.   .------------.  |
 *  |----.    .----|    |-----.   | capsfilter |   |  fakesink  |  |
 *  |sink|    |src |--->|ghost|   |----.   .---|   |----.   .---|  |    handoff
 *  |----'    '----|    |pad  |-->|sink|   |src|-->|sink|   |src|--+--> handler
 *  |              |    |-----'   '------------'   '------------'  |
 *  '--------------'    '------------------------------------------'
 * </pre>
 */
static int gst_setup(struct ausrc_st *st)
{
	GstBus *bus;
	GstPad *pad;

	st->loop = g_main_loop_new(NULL, FALSE);

	st->pipeline = gst_pipeline_new("pipeline");
	if (!st->pipeline) {
		DEBUG_WARNING("failed to create pipeline element\n");
		return ENOMEM;
	}

	/********************* Player BIN **************************/

	st->source = gst_element_factory_make("playbin", "source");
	if (!st->source) {
		DEBUG_WARNING("failed to create playbin source element\n");
		return ENOMEM;
	}

	/********************* My BIN **************************/

	st->bin = gst_bin_new("mybin");

	st->capsfilt = gst_element_factory_make("capsfilter", NULL);
	if (!st->capsfilt) {
		DEBUG_WARNING("failed to create capsfilter element\n");
		return ENOMEM;
	}

	set_caps(st);

	st->sink = gst_element_factory_make("fakesink", "sink");
	if (!st->sink) {
		DEBUG_WARNING("failed to create sink element\n");
		return ENOMEM;
	}

	gst_bin_add_many(GST_BIN(st->bin), st->capsfilt, st->sink, NULL);
	gst_element_link_many(st->capsfilt, st->sink, NULL);

	/* add ghostpad */
	pad = gst_element_get_pad(st->capsfilt, "sink");
	gst_element_add_pad(st->bin, gst_ghost_pad_new("sink", pad));
	gst_object_unref(GST_OBJECT(pad));

	/* put all elements in a bin */
	gst_bin_add_many(GST_BIN(st->pipeline), st->source, NULL);

	/* Override audio-sink handoff handler */
	g_object_set(G_OBJECT(st->sink), "signal-handoffs", TRUE, NULL);
	g_signal_connect(st->sink, "handoff", G_CALLBACK(handoff_handler), st);
	g_object_set(G_OBJECT(st->source), "audio-sink", st->bin, NULL);

	/********************* Misc **************************/

	/* Bus watch */
	bus = gst_pipeline_get_bus(GST_PIPELINE(st->pipeline));
	gst_bus_add_watch(bus, bus_watch_handler, st);
	gst_object_unref(bus);

	/* Set URI */
	g_object_set(G_OBJECT(st->source), "uri", st->uri, NULL);

	return 0;
}
int main(int argc, char *argv[])
{
  GstElement *pipeline, *bin, *effect_element, *convert, *sink;
  GstPad *pad, *ghost_pad;
  char *pipeline_str;
  GIOChannel *io_stdin = g_io_channel_unix_new(fileno(stdin));
  CustomData data;
  GstStateChangeReturn ret;
  gboolean list_effects = FALSE;
  gchar *effect_name = NULL;
  GError *error = NULL;
  GstPlugin *gaudiplugin;
  gchar *props_str = NULL;
  GOptionContext *context;
  GOptionEntry options[] = {
    { "list-effects", 'l', 0, G_OPTION_ARG_NONE, &list_effects,
      "list available effects and exits", NULL },
    { "effect", 'e', 0, G_OPTION_ARG_STRING, &effect_name,
      "set the desired effect", NULL },
    { "props", 'p', 0, G_OPTION_ARG_STRING, &props_str,
      "for property setting (-p \"silent,bool,true;adjustement,uint,150\")",
      NULL },
    { NULL }
  };

  setlocale(LC_ALL, "fr_FR.utf8");

  gst_init(&argc, &argv);

  gaudiplugin = gst_registry_find_plugin(GET_PLUGIN_REGISTRY, "gaudieffects");
  if (gaudiplugin == NULL) {
    g_print("Pas de plugin “gaudieffects” trouvé !! :(\n");
    return -1;
  }

  context = g_option_context_new("");
  g_option_context_add_main_entries(context, options, "");
  if (!g_option_context_parse(context, &argc, &argv, &error)) {
    g_print("option parsing failed: %s\n", error->message);
    return -1;
  }
  g_option_context_free(context);

  if (list_effects == TRUE)
    return  list_gaudieffects_features();

  if (argc > 1) {
    if (g_str_has_prefix(argv[1], "http://") ||
	g_str_has_prefix(argv[1], "ftp://"))
      pipeline_str = g_strdup_printf("%s uri=\"%s\"", PLAYBIN, argv[1]);
    else if (argv[1][0] == '~')
      pipeline_str = g_strdup_printf("%s uri=\"file://%s%s\"", PLAYBIN,
				     g_get_home_dir(), argv[1]+1);
    else if (g_file_test(argv[1], G_FILE_TEST_IS_REGULAR))
      pipeline_str = g_strdup_printf("playbin uri=\"file://%s\"", argv[1]);
    else
      pipeline_str = g_strdup_printf("%s uri=%s", PLAYBIN, DEFAULT_URI);
  } else
    pipeline_str = g_strdup_printf("%s uri=%s", PLAYBIN, DEFAULT_URI);

  g_io_add_watch(io_stdin, G_IO_IN, (GIOFunc)handle_keyboard, &data);

  pipeline = gst_parse_launch(pipeline_str, NULL);

  if (gst_plugin_is_loaded(gaudiplugin) == FALSE)
    gst_plugin_load(gaudiplugin);

  if (effect_name == NULL)
    effect_name = "solarize";

  effect_element = gst_element_factory_make(effect_name, effect_name);
  convert = gst_element_factory_make("videoconvert", "convert");
  sink = gst_element_factory_make("autovideosink", "video_sink");
  if (!effect_element || !convert || !sink) {
    g_printerr("Not all elements could be created.\n");
    return -1;
  }

  bin = gst_bin_new("video_sink_bin");
  gst_bin_add_many(GST_BIN(bin), effect_element, convert, sink, NULL);
  gst_element_link_many(effect_element, convert, sink, NULL);
  pad = gst_element_get_static_pad(effect_element, "sink");
  ghost_pad = gst_ghost_pad_new("sink", pad);
  gst_pad_set_active(ghost_pad, TRUE);
  gst_element_add_pad(bin, ghost_pad);
  gst_object_unref(pad);

  g_object_set(GST_OBJECT(pipeline), "video-sink", bin, NULL);

  if (props_str != NULL)
    set_props(effect_element, props_str);

  ret = gst_element_set_state(pipeline, GST_STATE_PLAYING);
  if (ret == GST_STATE_CHANGE_FAILURE) {
    g_printerr("Unable to set the pipeline to the playing state.\n");
    gst_object_unref(pipeline);
    return -1;
  }

  data.loop = g_main_loop_new(NULL, FALSE);
  g_main_loop_run(data.loop);

  g_io_channel_unref(io_stdin);

  gst_element_set_state(pipeline, GST_STATE_NULL);

  gst_object_unref(pipeline);

  return 0;
}
Exemplo n.º 27
0
int
main (int argc, char *argv[])
{
  GstBus *bus;
  GstElement *filter, *convert, *sink;
  GstCaps *caps;
  gboolean res;
  SprinkleState *state;

  gst_init (&argc, &argv);

  loop = g_main_loop_new (NULL, TRUE);

  pipeline = gst_pipeline_new ("pipeline");

  /* add the fixed part to the pipeline. Remember that we need a capsfilter
   * after adder so that multiple sources are not racing to negotiate
   * a format */
  adder = gst_element_factory_make ("adder", "adder");
  filter = gst_element_factory_make ("capsfilter", "filter");
  convert = gst_element_factory_make ("audioconvert", "convert");
  sink = gst_element_factory_make ("autoaudiosink", "sink");

  caps = gst_caps_new_simple ("audio/x-raw-int",
      "endianness", G_TYPE_INT, G_LITTLE_ENDIAN,
      "channels", G_TYPE_INT, 2,
      "width", G_TYPE_INT, 16,
      "depth", G_TYPE_INT, 16,
      "rate", G_TYPE_INT, 44100, "signed", G_TYPE_BOOLEAN, TRUE, NULL);
  g_object_set (filter, "caps", caps, NULL);
  gst_caps_unref (caps);

  gst_bin_add_many (GST_BIN (pipeline), adder, filter, convert, sink, NULL);

  res = gst_element_link_many (adder, filter, convert, sink, NULL);
  g_assert (res);

  /* setup message handling */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  gst_bus_add_signal_watch_full (bus, G_PRIORITY_HIGH);
  g_signal_connect (bus, "message::error", (GCallback) message_received,
      pipeline);
  g_signal_connect (bus, "message::warning", (GCallback) message_received,
      pipeline);
  g_signal_connect (bus, "message::eos", (GCallback) eos_message_received,
      pipeline);

  /* we set the pipeline to PLAYING, the pipeline will not yet preroll because
   * there is no source providing data for it yet */
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* and add the function that modifies the pipeline every 100ms */
  state = create_state ();
  g_timeout_add (100, (GSourceFunc) do_sprinkle, state);

  /* go to main loop */
  g_main_loop_run (loop);

  gst_element_set_state (pipeline, GST_STATE_NULL);

  free_state (state);
  gst_object_unref (bus);
  gst_object_unref (pipeline);

  return 0;
}
Exemplo n.º 28
0
/****************************************************
 *              GstElement vmetods                  *
 ****************************************************/
static GstPad *
_request_new_pad (GstElement * element, GstPadTemplate * templ,
    const gchar * name, const GstCaps * caps)
{
  GstPad *audioresample_srcpad, *audioconvert_sinkpad, *tmpghost;
  GstPad *ghost;
  GstElement *audioconvert, *audioresample;
  PadInfos *infos = g_slice_new0 (PadInfos);
  GESSmartAdder *self = GES_SMART_ADDER (element);

  infos->adder_pad = gst_element_request_pad (self->adder,
      gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (self->adder),
          "sink_%u"), NULL, caps);

  if (infos->adder_pad == NULL) {
    GST_WARNING_OBJECT (element, "Could not get any pad from GstAdder");
    g_slice_free (PadInfos, infos);

    return NULL;
  }

  infos->self = self;

  infos->bin = gst_bin_new (NULL);
  audioconvert = gst_element_factory_make ("audioconvert", NULL);
  audioresample = gst_element_factory_make ("audioresample", NULL);

  gst_bin_add_many (GST_BIN (infos->bin), audioconvert, audioresample, NULL);
  gst_element_link_many (audioconvert, audioresample, NULL);

  audioconvert_sinkpad = gst_element_get_static_pad (audioconvert, "sink");
  tmpghost = GST_PAD (gst_ghost_pad_new (NULL, audioconvert_sinkpad));
  gst_object_unref (audioconvert_sinkpad);
  gst_pad_set_active (tmpghost, TRUE);
  gst_element_add_pad (GST_ELEMENT (infos->bin), tmpghost);

  gst_bin_add (GST_BIN (self), infos->bin);
  ghost = gst_ghost_pad_new (NULL, tmpghost);
  gst_pad_set_active (ghost, TRUE);
  if (!gst_element_add_pad (GST_ELEMENT (self), ghost))
    goto could_not_add;

  audioresample_srcpad = gst_element_get_static_pad (audioresample, "src");
  tmpghost = GST_PAD (gst_ghost_pad_new (NULL, audioresample_srcpad));
  gst_object_unref (audioresample_srcpad);
  gst_pad_set_active (tmpghost, TRUE);
  gst_element_add_pad (GST_ELEMENT (infos->bin), tmpghost);
  gst_pad_link (tmpghost, infos->adder_pad);

  LOCK (self);
  g_hash_table_insert (self->pads_infos, ghost, infos);
  UNLOCK (self);

  GST_DEBUG_OBJECT (self, "Returning new pad %" GST_PTR_FORMAT, ghost);
  return ghost;

could_not_add:
  {
    GST_ERROR_OBJECT (self, "could not add pad");
    destroy_pad (infos);
    return NULL;
  }
}
bool CvVideoWriter_GStreamer::open( const char * filename, int fourcc,
        double fps, CvSize frameSize, bool is_color )
{
    CV_FUNCNAME("CvVideoWriter_GStreamer::open");

    __BEGIN__;
    //actually doesn't support fourcc parameter and encode an avi with jpegenc
    //we need to find a common api between backend to support fourcc for avi
    //but also to choose in a common way codec and container format (ogg,dirac,matroska)
    // check arguments

    assert (filename);
    assert (fps > 0);
    assert (frameSize.width > 0  &&  frameSize.height > 0);
    std::map<int,char*>::iterator encit;
    encit=encs.find(fourcc);
    if (encit==encs.end())
        CV_ERROR( CV_StsUnsupportedFormat,"Gstreamer Opencv backend doesn't support this codec acutally.");
    if(!isInited) {
        gst_init (NULL, NULL);
        isInited = true;
    }
    close();
    source=gst_element_factory_make("appsrc",NULL);
    file=gst_element_factory_make("filesink", NULL);
    enc=gst_element_factory_make(encit->second, NULL);
    mux=gst_element_factory_make("avimux", NULL);
    color = gst_element_factory_make("ffmpegcolorspace", NULL);
    if (!enc)
        CV_ERROR( CV_StsUnsupportedFormat, "Your version of Gstreamer doesn't support this codec acutally or needed plugin missing.");
    g_object_set(G_OBJECT(file), "location", filename, NULL);
    pipeline = gst_pipeline_new (NULL);
    GstCaps* caps;
    if (is_color) {
        input_pix_fmt=1;
        caps= gst_video_format_new_caps(GST_VIDEO_FORMAT_BGR,
                                        frameSize.width,
                                        frameSize.height,
                                        (int) (fps * 1000),
                                        1000,
                                        1,
                                        1);
    }
    else  {
        input_pix_fmt=0;
        caps= gst_caps_new_simple("video/x-raw-gray",
                                  "width", G_TYPE_INT, frameSize.width,
                                  "height", G_TYPE_INT, frameSize.height,
                                  "framerate", GST_TYPE_FRACTION, int(fps),1,
                                  "bpp",G_TYPE_INT,8,
                                  "depth",G_TYPE_INT,8,
                                  NULL);
    }
    gst_app_src_set_caps(GST_APP_SRC(source), caps);
    if (fourcc==CV_FOURCC_DEFAULT) {
        gst_bin_add_many(GST_BIN(pipeline), source, color,mux, file, NULL);
        if(!gst_element_link_many(source,color,enc,mux,file,NULL)) {
            CV_ERROR(CV_StsError, "GStreamer: cannot link elements\n");
        }
    }
    else {
        gst_bin_add_many(GST_BIN(pipeline), source, color,enc,mux, file, NULL);
        if(!gst_element_link_many(source,color,enc,mux,file,NULL)) {
            CV_ERROR(CV_StsError, "GStreamer: cannot link elements\n");
        }
    }


    if(gst_element_set_state(GST_ELEMENT(pipeline), GST_STATE_PLAYING) ==
        GST_STATE_CHANGE_FAILURE) {
            CV_ERROR(CV_StsError, "GStreamer: cannot put pipeline to play\n");
    }
    __END__;
    return true;
}
Exemplo n.º 30
0
int
main (int argc, char *argv[])
{
  GstElement *bin, *filesrc, *decoder, *audiosink;
  GstElement *conv, *resample;
	xmlfile = "helloworld_logs";
  std_log(LOG_FILENAME_LINE, "Test Started hellowworld");
  
  gst_init (&argc, &argv);
 
  if (argc != 2) {
    g_print ("usage: %s <mp3 file>\n", argv[0]);
    std_log(LOG_FILENAME_LINE, "Test Failed argument need to be passed");
    create_xml(1); 
    exit (-1);
  }

  /* create a new bin to hold the elements */
  bin = gst_pipeline_new ("pipeline");
  g_assert (bin);

  /* create a disk reader */
  filesrc = gst_element_factory_make (/*"filesrc"*/"audiotestsrc", "disk_source");
  g_assert (filesrc);
  g_object_set (G_OBJECT (filesrc), "location", argv[1], NULL);

  /* now it's time to get the decoder */
  //decoder = gst_element_factory_make ("mad", "decode");
//  if (!decoder) {
//    std_log(LOG_FILENAME_LINE, "could not find plugin mad");
//    g_print ("could not find plugin \"mad\"");
//    return -1;
//  }

  /* also, we need to add some converters to make sure the audio stream
   * from the decoder is converted into a format the audio sink can
   * understand (if necessary) */
//  conv = gst_element_factory_make ("audioconvert", "audioconvert");
//  if (!conv) {
//    std_log(LOG_FILENAME_LINE, "could not create \"audioconvert\" element!");
//    g_print ("could not create \"audioconvert\" element!");
//    return -1;
//  }
//  resample = gst_element_factory_make ("audioresample", "audioresample");
//  if (!conv) {
//    std_log(LOG_FILENAME_LINE, "could not create \"audioresample\" element!");
//    g_print ("could not create \"audioresample\" element!");
//    return -1;
//  }

  /* and an audio sink */
  audiosink = gst_element_factory_make ("devsoundsink", "play_audio");
  g_assert (audiosink);

  /* add objects to the main pipeline */
  gst_bin_add_many (GST_BIN (bin), filesrc/*, decoder, conv,
      resample*/, audiosink, NULL);

  /* link the elements */
  gst_element_link_many (filesrc, /*decoder, conv, resample,*/ audiosink, NULL);

  /* start playing */
  std_log(LOG_FILENAME_LINE, "START PLAYING");
  gst_element_set_state (bin, GST_STATE_PLAYING);
  std_log(LOG_FILENAME_LINE, "STOP PLAYING ");
  /* Run event loop listening for bus messages until EOS or ERROR */
  event_loop (bin);

  /* stop the bin */
  std_log(LOG_FILENAME_LINE, "START BIN");
  gst_element_set_state (bin, GST_STATE_NULL);
  std_log(LOG_FILENAME_LINE, "START BIN");

	std_log(LOG_FILENAME_LINE, "Test Successful");
  create_xml(0); 
  exit (0);
}