コード例 #1
0
static void
gst_ffmpegmux_base_init (gpointer g_class)
{
  GstFFMpegMuxClass *klass = (GstFFMpegMuxClass *) g_class;
  GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
  GstElementDetails details;
  GstPadTemplate *videosinktempl, *audiosinktempl, *srctempl;
  AVOutputFormat *in_plugin;
  GstCaps *srccaps, *audiosinkcaps, *videosinkcaps;
  enum CodecID *video_ids = NULL, *audio_ids = NULL;

  in_plugin =
      (AVOutputFormat *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
      GST_FFMUX_PARAMS_QDATA);
  g_assert (in_plugin != NULL);

  /* construct the element details struct */
  details.longname = g_strdup_printf ("FFmpeg %s muxer", in_plugin->long_name);
  details.klass = g_strdup ("Codec/Muxer");
  details.description = g_strdup_printf ("FFmpeg %s muxer",
      in_plugin->long_name);
  details.author = "Wim Taymans <*****@*****.**>, "
      "Ronald Bultje <*****@*****.**>";
  gst_element_class_set_details (element_class, &details);
  g_free (details.longname);
  g_free (details.klass);
  g_free (details.description);

  /* Try to find the caps that belongs here */
  srccaps = gst_ffmpeg_formatid_to_caps (in_plugin->name);
  if (!srccaps) {
    GST_DEBUG ("Couldn't get source caps for muxer '%s', skipping format",
        in_plugin->name);
    goto beach;
  }

  if (!gst_ffmpeg_formatid_get_codecids (in_plugin->name,
          &video_ids, &audio_ids, in_plugin)) {
    gst_caps_unref (srccaps);
    GST_DEBUG
        ("Couldn't get sink caps for muxer '%s'. Most likely because no input format mapping exists.",
        in_plugin->name);
    goto beach;
  }

  videosinkcaps = video_ids ? gst_ffmpegmux_get_id_caps (video_ids) : NULL;
  audiosinkcaps = audio_ids ? gst_ffmpegmux_get_id_caps (audio_ids) : NULL;

  /* fix up allowed caps for some muxers */
  /* FIXME : This should be in gstffmpegcodecmap.c ! */
  if (strcmp (in_plugin->name, "flv") == 0) {
    const gint rates[] = { 44100, 22050, 11025 };

    gst_ffmpeg_mux_simple_caps_set_int_list (audiosinkcaps, "rate", 3, rates);
  } else if (strcmp (in_plugin->name, "gif") == 0) {
    if (videosinkcaps)
      gst_caps_unref (videosinkcaps);

    videosinkcaps =
        gst_caps_from_string ("video/x-raw-rgb, bpp=(int)24, depth=(int)24");
  }

  /* pad templates */
  srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
  gst_element_class_add_pad_template (element_class, srctempl);

  if (audiosinkcaps) {
    audiosinktempl = gst_pad_template_new ("audio_%d",
        GST_PAD_SINK, GST_PAD_REQUEST, audiosinkcaps);
    gst_element_class_add_pad_template (element_class, audiosinktempl);
  }

  if (videosinkcaps) {
    videosinktempl = gst_pad_template_new ("video_%d",
        GST_PAD_SINK, GST_PAD_REQUEST, videosinkcaps);
    gst_element_class_add_pad_template (element_class, videosinktempl);
  }

beach:
  klass->in_plugin = in_plugin;
}
コード例 #2
0
/*
 * Creates a RTP pipeline for one test.
 * @param frame_data Pointer to the frame data which is used to pass thru pay/depayloaders.
 * @param frame_data_size Frame data size in bytes.
 * @param frame_count Frame count.
 * @param filtercaps Caps filters.
 * @param pay Payloader name.
 * @param depay Depayloader name.
 * @return
 * Returns pointer to the RTP pipeline.
 * The user must free the RTP pipeline when it's not used anymore.
 */
static rtp_pipeline *
rtp_pipeline_create (const guint8 * frame_data, int frame_data_size,
    int frame_count, const char *filtercaps, const char *pay, const char *depay)
{
  gchar *pipeline_name;
  rtp_pipeline *p;
  GstCaps *caps;

  /* Check parameters. */
  if (!frame_data || !pay || !depay) {
    return NULL;
  }

  /* Allocate memory for the RTP pipeline. */
  p = (rtp_pipeline *) malloc (sizeof (rtp_pipeline));

  p->frame_data = frame_data;
  p->frame_data_size = frame_data_size;
  p->frame_count = frame_count;

  /* Create elements. */
  pipeline_name = g_strdup_printf ("%s-%s-pipeline", pay, depay);
  p->pipeline = gst_pipeline_new (pipeline_name);
  g_free (pipeline_name);
  p->appsrc = gst_element_factory_make ("appsrc", NULL);
  p->rtppay = gst_element_factory_make (pay, NULL);
  p->rtpdepay = gst_element_factory_make (depay, NULL);
  p->fakesink = gst_element_factory_make ("fakesink", NULL);

  /* One or more elements are not created successfully or failed to create p? */
  if (!p->pipeline || !p->appsrc || !p->rtppay || !p->rtpdepay || !p->fakesink) {
    /* Release created elements. */
    RELEASE_ELEMENT (p->pipeline);
    RELEASE_ELEMENT (p->appsrc);
    RELEASE_ELEMENT (p->rtppay);
    RELEASE_ELEMENT (p->rtpdepay);
    RELEASE_ELEMENT (p->fakesink);

    /* Release allocated memory. */
    free (p);

    return NULL;
  }

  /* Set src properties. */
  caps = gst_caps_from_string (filtercaps);
  g_object_set (p->appsrc, "do-timestamp", TRUE, "caps", caps,
      "format", GST_FORMAT_TIME, NULL);
  gst_caps_unref (caps);

  /* Add elements to the pipeline. */
  gst_bin_add (GST_BIN (p->pipeline), p->appsrc);
  gst_bin_add (GST_BIN (p->pipeline), p->rtppay);
  gst_bin_add (GST_BIN (p->pipeline), p->rtpdepay);
  gst_bin_add (GST_BIN (p->pipeline), p->fakesink);

  /* Link elements. */
  gst_element_link (p->appsrc, p->rtppay);
  gst_element_link (p->rtppay, p->rtpdepay);
  gst_element_link (p->rtpdepay, p->fakesink);

  return p;
}
コード例 #3
0
// gst-launch v4l2src ! videoscale ! videorate ! capsfilter "caps=video/x-raw-yuv,width=320,height=240,framerate=10/1" ! ffmpegcolorspace ! motioncells ! ffmpegcolorspace ! xvimagesink
int
main (int argc, char *argv[])
{
  GstElement *pipeline, *source, *videor, *videos, *decodebin, *capsf,
      *colorsp0, *colorsp1, *mcells, *sink;
  GstCaps *caps;
  gchar property[20];
  gchar prop_value[100];
  GParamSpec **property_specs;
  guint num_properties, i;
  GValue value = { 0, };
  gboolean found_property = FALSE;
  int ret;

  // Initialisation //
  gst_init (&argc, &argv);
  fprintf (stderr, "Usage: %s test or rtsp rtsp://your/cam/address\n", argv[0]);
  // Create gstreamer elements //
  pipeline = gst_pipeline_new ("moitoncells-pipeline");
  if (argc == 2 && (g_strcmp0 (argv[1], "test") == 0))
    source = gst_element_factory_make ("videotestsrc", "vidsrc");
  else if (argc == 3 && (g_strcmp0 (argv[1], "rtsp") == 0))
    source = gst_element_factory_make ("rtspsrc", "rtspsrc0");
  else if (argc == 1)
    source = gst_element_factory_make ("v4l2src", "v4l2");
  else {
    fprintf (stderr, "Usage: %s test or rtsp rtsp://your/cam/address\n",
        argv[0]);
    exit (-1);
  }

  videor = gst_element_factory_make ("videorate", "videor");
  videos = gst_element_factory_make ("videoscale", "videos");
  capsf = gst_element_factory_make ("capsfilter", "capsf");
  if (argc == 3 && (g_strcmp0 (argv[1], "rtsp") == 0))
    decodebin = gst_element_factory_make ("decodebin", "decode");
  else
    decodebin = NULL;
  colorsp0 = gst_element_factory_make ("ffmpegcolorspace", "colorspace0");
  mcells = gst_element_factory_make ("motioncells", "mcells");
  colorsp1 = gst_element_factory_make ("ffmpegcolorspace", "colorspace1");
  sink = gst_element_factory_make ("xvimagesink", "xv-image-sink");
  if (!pipeline || !source || !videor || !videos || !capsf || !colorsp0
      || !mcells || !colorsp1 || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  if (argc == 3 && (g_strcmp0 (argv[1], "rtsp") == 0) && !decodebin) {
    g_printerr ("Decodebin could not be created. Exiting.\n");
    return -1;
  }
  if ((g_strcmp0 (argv[1], "rtsp") == 0)) {
    g_object_set (G_OBJECT (source), "location", argv[2], NULL);
    g_object_set (G_OBJECT (source), "latency", 1000, NULL);
  } else if ((g_strcmp0 (argv[1], "test") == 0))
    g_object_set (G_OBJECT (source), "pattern", 18, NULL);

  caps =
      gst_caps_from_string
      ("video/x-raw-yuv,width=320,height=240,framerate=10/1");
  g_object_set (G_OBJECT (capsf), "caps", caps, NULL);
  //g_object_set (G_OBJECT (sink), "sync",FALSE,NULL);

  if (argc > 1) {
    if (g_strcmp0 (argv[1], "test") == 0) {
      gst_bin_add_many (GST_BIN (pipeline),
          source, videor, videos, capsf, colorsp0, mcells, colorsp1, sink,
          NULL);

      gst_element_link_many (source, videor, videos, capsf, colorsp0, mcells,
          colorsp1, sink, NULL);
    } else if (g_strcmp0 (argv[1], "rtsp") == 0) {
      gst_bin_add_many (GST_BIN (pipeline),
          source, videor, videos, capsf, decodebin, colorsp0, mcells, colorsp1,
          sink, NULL);

      gst_element_link_many (source, videor, videos, capsf, decodebin, colorsp0,
          mcells, colorsp1, sink, NULL);
    }
  } else {                      //default
    gst_bin_add_many (GST_BIN (pipeline),
        source, videor, videos, capsf, colorsp0, mcells, colorsp1, sink, NULL);

    gst_element_link_many (source, videor, videos, capsf, colorsp0, mcells,
        colorsp1, sink, NULL);
  }

  g_print ("Now playing\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  g_print ("Running...\n");
  g_print ("You can use these properties : \n");
  gst_element_print_properties (mcells);
  g_print ("change property here: example  some_property property_value \n");
  g_print ("Quit with 'q' \n");
  //get all properties
  property_specs = g_object_class_list_properties (G_OBJECT_GET_CLASS (mcells),
      &num_properties);
  while (TRUE) {
    found_property = FALSE;
    i = 0;

    ret = scanf ("%19s %99s", property, prop_value);

    if (ret < 1)
      g_printerr ("Error parsing command.\n");

    if ((g_strcmp0 (property, "q") == 0) || (g_strcmp0 (prop_value, "q") == 0))
      break;
    printf ("property: %s -> value: %s \n", property, prop_value);
    for (i = 0; i < num_properties; i++) {
      GParamSpec *param = property_specs[i];
      g_value_init (&value, param->value_type);
      g_object_get_property (G_OBJECT (mcells), param->name, &value);
      //fprintf(stderr,"property: %s and param name: %s and property value: %s \n",property,param->name,prop_value);
      if ((g_strcmp0 (property, param->name) == 0) && !found_property &&
          (g_strcmp0 (prop_value, "") != 0)
          && (g_strcmp0 (prop_value, "\"") != 0)
          && (g_strcmp0 (prop_value, "\'") != 0)) {
        GType type;
        found_property = TRUE;
        type = param->value_type;
        setProperty (mcells, property, prop_value, type, &value);
      }
      g_value_unset (&value);
      if (found_property)
        break;
    }
  }

  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (pipeline);
  return 0;
}
コード例 #4
0
static GstMultipartPad *
gst_multipart_find_pad_by_mime (GstMultipartDemux * demux, gchar * mime,
    gboolean * created)
{
  GSList *walk;

  walk = demux->srcpads;
  while (walk) {
    GstMultipartPad *pad = (GstMultipartPad *) walk->data;

    if (!strcmp (pad->mime, mime)) {
      if (created) {
        *created = FALSE;
      }
      return pad;
    }

    walk = walk->next;
  }
  /* pad not found, create it */
  {
    GstPad *pad;
    GstMultipartPad *mppad;
    gchar *name;
    const gchar *capsname;
    GstCaps *caps;
    gchar *stream_id;
    GstEvent *event;

    mppad = g_new0 (GstMultipartPad, 1);

    GST_DEBUG_OBJECT (demux, "creating pad with mime: %s", mime);

    name = g_strdup_printf ("src_%u", demux->numpads);
    pad =
        gst_pad_new_from_static_template (&multipart_demux_src_template_factory,
        name);
    g_free (name);

    mppad->pad = pad;
    mppad->mime = g_strdup (mime);
    mppad->last_ret = GST_FLOW_OK;
    mppad->last_ts = GST_CLOCK_TIME_NONE;
    mppad->discont = TRUE;

    demux->srcpads = g_slist_prepend (demux->srcpads, mppad);
    demux->numpads++;

    gst_pad_use_fixed_caps (pad);
    gst_pad_set_active (pad, TRUE);

    /* prepare and send stream-start */
    if (!demux->have_group_id) {
      event = gst_pad_get_sticky_event (demux->sinkpad,
          GST_EVENT_STREAM_START, 0);

      if (event) {
        demux->have_group_id =
            gst_event_parse_group_id (event, &demux->group_id);
        gst_event_unref (event);
      } else if (!demux->have_group_id) {
        demux->have_group_id = TRUE;
        demux->group_id = gst_util_group_id_next ();
      }
    }

    stream_id = gst_pad_create_stream_id (pad,
        GST_ELEMENT_CAST (demux), demux->mime_type);

    event = gst_event_new_stream_start (stream_id);
    if (demux->have_group_id)
      gst_event_set_group_id (event, demux->group_id);

    gst_pad_store_sticky_event (pad, event);
    g_free (stream_id);
    gst_event_unref (event);

    /* take the mime type, convert it to the caps name */
    capsname = gst_multipart_demux_get_gstname (demux, mime);
    caps = gst_caps_from_string (capsname);
    GST_DEBUG_OBJECT (demux, "caps for pad: %s", capsname);
    gst_pad_set_caps (pad, caps);
    gst_element_add_pad (GST_ELEMENT_CAST (demux), pad);
    gst_caps_unref (caps);

    if (created) {
      *created = TRUE;
    }

    if (demux->singleStream) {
      gst_element_no_more_pads (GST_ELEMENT_CAST (demux));
    }

    return mppad;
  }
}
コード例 #5
0
void
gst_play_main() {
    GstPad *audiopad, *videopad;
    //Stop before playing
    if (pipeline) {
        gst_element_set_state(pipeline, GST_STATE_NULL);
        gst_object_unref(GST_OBJECT(pipeline));
        pipeline = NULL;
    }
    {
        mutex = 0;

        /* setup */
        pipeline = gst_pipeline_new("pipeline");

        bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
        gst_bus_add_signal_watch(bus);
	g_signal_connect (G_OBJECT (bus), "message::state-changed", (GCallback)state_changed_cb, NULL);
	g_signal_connect (G_OBJECT (bus), "message::eos", (GCallback)eos_cb, NULL);
        gst_object_unref(bus);

        src = gst_element_factory_make("filesrc", "source");
        g_object_set(G_OBJECT(src), "location", filename, NULL);
        typefind = gst_element_factory_make("typefind", "typefinder");
        //g_signal_connect(typefind,"have-type",G_CALLBACK(typefound_cb),loop);
        dec = gst_element_factory_make("decodebin", "decoder");
        g_signal_connect(dec, "new-decoded-pad", G_CALLBACK(cb_newpad), NULL);
        //myplugin = gst_element_factory_make("myplugin","MyPlugin");
        audioqueue = gst_element_factory_make("queue", "audioqueue");
        videoqueue = gst_element_factory_make("queue", "videoqueue");
        gst_bin_add_many(GST_BIN(pipeline), src, typefind, dec, NULL);
        gst_element_link_many(src, typefind, dec, NULL);

        /* create audio output */
        audio = gst_bin_new("audiobin");
        conv = gst_element_factory_make("audioconvert", "aconv");
        typefind2 = gst_element_factory_make("typefind", "typefinder2");
	volume = gst_element_factory_make("volume","volume");
	level = gst_element_factory_make("level","level");
        //g_signal_connect(typefind2,"have-type",G_CALLBACK(typefound_cb),loop);
        audiopad = gst_element_get_static_pad(audioqueue, "sink");
        sink = gst_element_factory_make("alsasink", "sink");
        gst_bin_add_many(GST_BIN(audio), audioqueue, conv, typefind2,volume,level, sink, NULL);
        gst_element_link_many(audioqueue, conv, typefind2,volume, level,sink, NULL);
        gst_element_add_pad(audio,
                gst_ghost_pad_new("sink", audiopad));
        gst_object_unref(audiopad);
        gst_bin_add(GST_BIN(pipeline), audio);

        /* create video output */
        video = gst_bin_new("videobin");
        caps2 =gst_caps_from_string("video/x-raw-yuv,framerate=25/1");
        caps =gst_caps_from_string("video/x-raw-yuv,width=1024,height=768");
        
        videoRate = gst_element_factory_make("videorate", "Video Rate");
        capsFilter1 = gst_element_factory_make("capsfilter", "Caps Filter");
        g_object_set(G_OBJECT(capsFilter1), "caps", caps2, NULL);
        videoScale = gst_element_factory_make("videoscale", "Video Scale");
        //g_object_set(G_OBJECT(videoScale),"add-borders","true",NULL);
        capsFilter2 = gst_element_factory_make("capsfilter", "Caps Filter2");
        g_object_set(G_OBJECT(capsFilter2), "caps", caps, NULL);
        
        convVid = gst_element_factory_make("ffmpegcolorspace", "converter");
        videopad = gst_element_get_static_pad(videoqueue, "sink");
        videosink = gst_element_factory_make("xvimagesink", "videosink");
        g_object_set(G_OBJECT(videosink),"pixel-aspect-ratio","3/4",NULL);
	g_object_set(G_OBJECT(videosink),"force-aspect-ratio",TRUE,NULL);
        gst_bin_add_many(GST_BIN(video), videoqueue,videoScale,capsFilter2 ,videoRate, capsFilter1, convVid, videosink, NULL);
        gst_element_link_many(videoqueue, videoScale,capsFilter2 ,videoRate, capsFilter1, convVid, videosink, NULL);
        gst_element_add_pad(video, gst_ghost_pad_new("sink", videopad));
        gst_object_unref(videopad);
        gst_bin_add(GST_BIN(pipeline), video);

	

        /* run */
        //gst_element_set_state (pipeline, GST_STATE_PLAYING);

        g_print("Now playing: %s\n", filename);
        if (GST_IS_X_OVERLAY (videosink))
        {
            gst_x_overlay_set_window_handle(GST_X_OVERLAY(videosink), GPOINTER_TO_UINT(GINT_TO_POINTER(GDK_WINDOW_XWINDOW(video_output->window))));
           /* if(gst_x_overlay_set_render_rectangle(GST_X_OVERLAY(videosink),0,0,800,600))
            {
                gst_x_overlay_expose(GST_X_OVERLAY(videosink));    
                g_print("Redraw");
            }*/
        }
        gst_element_set_state(pipeline, GST_STATE_PLAYING);
	gdouble curr_vol; 
    	g_object_get(volume,"volume",&curr_vol,NULL);
    	gtk_scale_button_set_value(GTK_SCALE_BUTTON(volumeButton),(curr_vol)*10);
	g_timeout_add(1, refresh_ui, NULL);
    }
}
コード例 #6
0
ファイル: glive-client.c プロジェクト: Rock1965/glive
/*******************************************************************************
	Gstreamer pipeline creation and init
*******************************************************************************/
int vc_gst_pipeline_init(vc_data *data)
{
	GstStateChangeReturn ret;
	GstElement *rtp_udpsrc, *rtcp_udpsrc, *rtcp_udpsink, *decoder,
		*depayloader, *converter, *sink, *rtpbin;
	GstCaps *caps;

	/* Request Pads */

	/* Template */
	GstPadTemplate* rtpbin_pad_template;
	/* TODO - Find a way to free the pads when the pipeline is closed */

	/* Create a new GMainLoop */
	data->gst_data.loop = g_main_loop_new (NULL, FALSE);
	data->gst_data.context = g_main_loop_get_context(data->gst_data.loop);

	/* Create gstreamer elements */
	data->gst_data.pipeline  = gst_pipeline_new ("videoclient");
	VC_CHECK_ELEMENT_ERROR(data->gst_data.pipeline, "pipeline");

	/*
	 * RTP UDP Source - for received RTP messages
	 */
	rtp_udpsrc = gst_element_factory_make ("udpsrc", "rtp-udpsrc");
	VC_CHECK_ELEMENT_ERROR(rtp_udpsrc,"rtp-udpsrc");

	g_print ("Setting RTP source port to: %d\n", data->cfg.rtp_recv_port);
	g_object_set (G_OBJECT (rtp_udpsrc),"port", data->cfg.rtp_recv_port, NULL);

	/* Create GstCaps structure from string.  This function allocates
	     memory for the structure */
	caps = gst_caps_from_string(
			"application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264");
	g_object_set (G_OBJECT (rtp_udpsrc), "caps", caps, NULL);
	gst_caps_unref(caps); /* Free the structure */

	/*
	 * RTCP UDP Source
	 */
	rtcp_udpsrc = gst_element_factory_make ("udpsrc", "rtcp-udpsrc");
	VC_CHECK_ELEMENT_ERROR(rtcp_udpsrc,"rtcp-udpsrc");

	g_print ("Setting RTCP udp source port to: %d\n",
			data->cfg.rtcp_recv_port);
	g_object_set (G_OBJECT (rtcp_udpsrc), "port", data->cfg.rtcp_recv_port, NULL);

	/*
	 * RTCP UDP Sink (transmits data from rtpbin back to server)
	 */
	rtcp_udpsink = gst_element_factory_make ("udpsink", "rtcp-udpsink");
	VC_CHECK_ELEMENT_ERROR(rtcp_udpsink,"rtcp-udpsink");

	g_print ("Setting RTCP udp sink port to: %d\n",
			data->cfg.rtcp_send_port);
	g_object_set (G_OBJECT (rtcp_udpsink),
			"host", data->cfg.server_ip_addr,
			"port", data->cfg.rtcp_send_port,
			"sync", FALSE,
			"async", FALSE,
			NULL);

	/*
	 * RTP Bin - Automates RTP/RTCP management
	 */
	rtpbin = gst_element_factory_make ("gstrtpbin", "rtpbin");
	VC_CHECK_ELEMENT_ERROR(rtpbin,"gstrtpbin");

	/*
	 * Request pads from rtpbin, starting with the RTP receive sink pad,
	 * This pad receives RTP data from the network (rtp-udpsrc).
	 */
	rtpbin_pad_template = gst_element_class_get_pad_template (
			GST_ELEMENT_GET_CLASS (rtpbin),
			"recv_rtp_sink_%d");
	/* Use the template to request the pad */
	data->gst_data.recv_rtp_sink_pad = gst_element_request_pad (rtpbin, rtpbin_pad_template,
			"recv_rtp_sink_0", NULL);
	/* Print the name for confirmation */
	g_print ("A new pad %s was created\n",
			gst_pad_get_name (data->gst_data.recv_rtp_sink_pad));

	rtpbin_pad_template = gst_element_class_get_pad_template (
			GST_ELEMENT_GET_CLASS (rtpbin),
			"recv_rtcp_sink_%d");
	data->gst_data.recv_rtcp_sink_pad = gst_element_request_pad (rtpbin,
			rtpbin_pad_template,
			"recv_rtcp_sink_0", NULL);
	g_print ("A new pad %s was created\n",
			gst_pad_get_name (data->gst_data.recv_rtcp_sink_pad));

	rtpbin_pad_template = gst_element_class_get_pad_template (
			GST_ELEMENT_GET_CLASS (rtpbin),
			"send_rtcp_src_%d");
	data->gst_data.send_rtcp_src_pad = gst_element_request_pad (rtpbin, rtpbin_pad_template,
			"send_rtcp_src_0", NULL);
	g_print ("A new pad %s was created\n",
			gst_pad_get_name (data->gst_data.send_rtcp_src_pad));

	/* Set the latency of the rtpbin */
	g_object_set (G_OBJECT (rtpbin),
			"latency", DEFAULT_LATENCY_MS,
			"rtcp-sync-interval",1000, NULL);

	/*
	 * RTP H.264 Depayloader
	 */
	depayloader = gst_element_factory_make ("rtph264depay","depayloader");
	VC_CHECK_ELEMENT_ERROR(depayloader,"rtph264depay");
	data->gst_data.depayloader = depayloader;

	/* If we are ARM architecture, then assume that we are an i.MX processor and build
	   the pipeline to decode and display using the i.MX plugins */
#ifdef __arm__
	int assume_imx = 1;
#else
	int assume_imx = 0;
#endif

	if (assume_imx){
		/*
	 	 * i.MX VPU decoder
	 	 */
		decoder = gst_element_factory_make ("vpudec", "decoder");
		VC_CHECK_ELEMENT_ERROR(decoder,"vpudec");

		/*
	 	 * i.MX Video sink
	 	 */
		sink = gst_element_factory_make ("mfw_v4lsink", "sink");
		VC_CHECK_ELEMENT_ERROR(sink,"mfw_v4lsink");

		/* Set max lateness to .5 seconds */
		g_object_set (G_OBJECT(sink), "max-lateness", (long long)50000000, NULL);
		g_object_set (G_OBJECT(sink), "sync", FALSE, NULL);
		g_object_set (G_OBJECT(sink), "device", "/dev/video16",NULL);
		
		/* Add elements into the pipeline */
		g_print("  Adding elements to pipeline...\n");

		gst_bin_add_many (GST_BIN (data->gst_data.pipeline),
			          rtp_udpsrc,
			          rtcp_udpsrc,
			          rtpbin,
			          rtcp_udpsink,
			          depayloader,
			          decoder,
			          sink,
			          NULL);

		/* Link some of the elements together */
		g_print("  Linking some elements...\n");

		if(!gst_element_link_many (depayloader, decoder,  sink, NULL))
			g_print("Error: could not link the depayloader, decoder, and sink\n");
		
	}
	else {
		/*
	 	 * ffmpeg decoder
	 	 */
		decoder = gst_element_factory_make ("ffdec_h264", "decoder");
		VC_CHECK_ELEMENT_ERROR(decoder,"ffdec_h264");
		
		/*
		 *
		 */
		converter = gst_element_factory_make ("ffmpegcolorspace", "converter");
		VC_CHECK_ELEMENT_ERROR(converter,"ffmpegcolorspace");

		/*
	 	 * i.MX Video sink
	 	 */
		sink = gst_element_factory_make ("autovideosink", "sink");
		VC_CHECK_ELEMENT_ERROR(sink,"autovideosink");
		
		/* Add elements into the pipeline */
		g_print("  Adding elements to pipeline...\n");

		gst_bin_add_many (GST_BIN (data->gst_data.pipeline),
			          rtp_udpsrc,
			          rtcp_udpsrc,
			          rtpbin,
			          rtcp_udpsink,
			          depayloader,
			          converter,
			          decoder,
			          sink,
			          NULL);

		/* Link some of the elements together */
		g_print("  Linking some elements...\n");

		if(!gst_element_link_many (depayloader, decoder, converter, sink, NULL))
			g_print("Error: could not link the depayloader, decoder, converter, and sink\n");	
		
	}
	
	/*
	 * Connect to the pad-added signal for the rtpbin.  This allows us to link
	 * the dynamic RTP source pad to the depayloader when it is created.
	 */
	if(!g_signal_connect (rtpbin, "pad-added",
			G_CALLBACK (vc_pad_added_handler), data))
		g_print("Error: could not add signal handler\n");

	/*
	 * Connect the on-timeout signal
	 */
	if(!g_signal_connect (rtpbin, "on-timeout", G_CALLBACK (vc_on_timeout_handler), data))
		g_print("Error: could not add on-timeout signal handler\n");

	/* Link some of the elements together */
	g_print("  Linking RTP and RTCP sources to rtpbin...\n");
	/* Link the payloader src pad to the rtpbin send_vrtp_sink_pad */
	if(!gst_element_link_pads(rtp_udpsrc, "src", rtpbin, "recv_rtp_sink_0"))
		g_print("Error: could not link udp source to rtp sink\n");

	/* Link the rtpbin send_vrtp_src_pad to the rtp_udpsink sink pad */
	if(!gst_element_link_pads(rtcp_udpsrc, "src", rtpbin, "recv_rtcp_sink_0"))
		g_print("Error: could not link udp source to rtcp sink\n");

	/* Link the rtpbin sent_rctp_src_pad to the rtcp_udpsink (udpsink) sink pad */
	if(!gst_element_link_pads(rtpbin, "send_rtcp_src_0", rtcp_udpsink, "sink"))
		g_print("Error: could not link rtcp source to udp sink\n");

	/* Set the pipeline to "playing" state*/
	g_print ("Now playing\n");
	ret = gst_element_set_state (data->gst_data.pipeline, GST_STATE_PLAYING);

	if (ret == GST_STATE_CHANGE_FAILURE) {
		g_printerr ("Unable to set the pipeline to the playing state.\n");
		gst_object_unref (data->gst_data.pipeline);
		return -1;
	}

	return 0;
}
コード例 #7
0
int
main (int argc, char **argv)
{
    GError *err = NULL;
    gchar *outputuri = NULL;
    gchar *format = NULL;
    gchar *aformat = NULL;
    gchar *vformat = NULL;
    gboolean allmissing = FALSE;
    gboolean listcodecs = FALSE;
    GOptionEntry options[] = {
        {   "silent", 's', 0, G_OPTION_ARG_NONE, &silent,
            "Don't output the information structure", NULL
        },
        {   "outputuri", 'o', 0, G_OPTION_ARG_STRING, &outputuri,
            "URI to encode to", "URI (<protocol>://<location>)"
        },
        {   "format", 'f', 0, G_OPTION_ARG_STRING, &format,
            "Container format", "<GstCaps>"
        },
        {   "vformat", 'v', 0, G_OPTION_ARG_STRING, &vformat,
            "Video format", "<GstCaps>"
        },
        {   "aformat", 'a', 0, G_OPTION_ARG_STRING, &aformat,
            "Audio format", "<GstCaps>"
        },
        {   "allmissing", 'm', 0, G_OPTION_ARG_NONE, &allmissing,
            "encode to all matching format/codec that aren't specified", NULL
        },
        {   "list-codecs", 'l', 0, G_OPTION_ARG_NONE, &listcodecs,
            "list all available codecs and container formats", NULL
        },
        {NULL}
    };
    GOptionContext *ctx;
    GstEncodingProfile *prof;
    gchar *inputuri;

    ctx = g_option_context_new ("- encode URIs with GstProfile and encodebin");
    g_option_context_add_main_entries (ctx, options, NULL);
    g_option_context_add_group (ctx, gst_init_get_option_group ());

    if (!g_option_context_parse (ctx, &argc, &argv, &err)) {
        g_print ("Error initializing: %s\n", err->message);
        exit (1);
    }

    if (listcodecs) {
        list_codecs ();
        g_option_context_free (ctx);
        exit (0);
    }

    if (outputuri == NULL || argc != 2) {
        g_print ("%s", g_option_context_get_help (ctx, TRUE, NULL));
        g_option_context_free (ctx);
        exit (-1);
    }

    g_option_context_free (ctx);

    /* Fixup outputuri to be a URI */
    inputuri = ensure_uri (argv[1]);
    outputuri = ensure_uri (outputuri);

    if (allmissing) {
        GList *muxers;
        GstCaps *formats = NULL;
        GstCaps *vformats = NULL;
        GstCaps *aformats = NULL;
        guint f, v, a, flen, vlen, alen;

        if (!format)
            formats = gst_caps_list_container_formats (GST_RANK_NONE);
        else
            formats = gst_caps_from_string (format);

        if (!vformat)
            vformats = gst_caps_list_video_encoding_formats (GST_RANK_NONE);
        else
            vformats = gst_caps_from_string (vformat);

        if (!aformat)
            aformats = gst_caps_list_audio_encoding_formats (GST_RANK_NONE);
        else
            aformats = gst_caps_from_string (aformat);
        muxers =
            gst_element_factory_list_get_elements (GST_ELEMENT_FACTORY_TYPE_MUXER,
                    GST_RANK_NONE);

        flen = gst_caps_get_size (formats);

        for (f = 0; f < flen; f++) {
            GstCaps *container =
                gst_caps_new_full (gst_caps_steal_structure (formats, 0), NULL);
            GstCaps *compatv =
                gst_caps_list_compatible_codecs (container, vformats, muxers);
            GstCaps *compata =
                gst_caps_list_compatible_codecs (container, aformats, muxers);

            vlen = gst_caps_get_size (compatv);
            alen = gst_caps_get_size (compata);


            for (v = 0; v < vlen; v++) {
                GstCaps *vcodec =
                    gst_caps_new_full (gst_structure_copy (gst_caps_get_structure
                                       (compatv, v)), NULL);
                for (a = 0; a < alen; a++) {
                    GstCaps *acodec =
                        gst_caps_new_full (gst_structure_copy (gst_caps_get_structure
                                           (compata, a)), NULL);

                    prof =
                        create_profile ((GstCaps *) container, (GstCaps *) vcodec,
                                        (GstCaps *) acodec);
                    if (G_UNLIKELY (prof == NULL)) {
                        g_print ("Wrong arguments\n");
                        break;
                    }
                    outputuri =
                        ensure_uri (generate_filename (container, vcodec, acodec));
                    transcode_file (inputuri, outputuri, prof);
                    gst_encoding_profile_unref (prof);

                    gst_caps_unref (acodec);
                }
                gst_caps_unref (vcodec);
            }
            gst_caps_unref (container);
        }

    } else {

        /* Create the profile */
        prof = create_profile_from_string (format, vformat, aformat);
        if (G_UNLIKELY (prof == NULL)) {
            g_print ("Encoding arguments are not valid !\n");
            return 1;
        }

        /* Transcode file */
        transcode_file (inputuri, outputuri, prof);

        /* cleanup */
        gst_encoding_profile_unref (prof);

    }
    return 0;
}
コード例 #8
0
static void
gst_video_mark_class_init (GstSimpleVideoMarkClass * klass)
{
  GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
  GstBaseTransformClass *base_transform_class =
      GST_BASE_TRANSFORM_CLASS (klass);
  GstVideoFilterClass *video_filter_class = GST_VIDEO_FILTER_CLASS (klass);

  gst_element_class_add_pad_template (GST_ELEMENT_CLASS (klass),
      gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
          gst_caps_from_string (VIDEO_CAPS)));
  gst_element_class_add_pad_template (GST_ELEMENT_CLASS (klass),
      gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
          gst_caps_from_string (VIDEO_CAPS)));

  gst_element_class_set_static_metadata (GST_ELEMENT_CLASS (klass),
      "Video marker", "Filter/Effect/Video",
      "Marks a video signal with a pattern", "Wim Taymans <*****@*****.**>");

  gobject_class->set_property = gst_video_mark_set_property;
  gobject_class->get_property = gst_video_mark_get_property;
  gobject_class->dispose = gst_video_mark_dispose;
  gobject_class->finalize = gst_video_mark_finalize;
  base_transform_class->start = GST_DEBUG_FUNCPTR (gst_video_mark_start);
  base_transform_class->stop = GST_DEBUG_FUNCPTR (gst_video_mark_stop);
  video_filter_class->set_info = GST_DEBUG_FUNCPTR (gst_video_mark_set_info);
  video_filter_class->transform_frame_ip =
      GST_DEBUG_FUNCPTR (gst_video_mark_transform_frame_ip);

  g_object_class_install_property (gobject_class, PROP_PATTERN_WIDTH,
      g_param_spec_int ("pattern-width", "Pattern width",
          "The width of the pattern markers", 1, G_MAXINT,
          DEFAULT_PATTERN_WIDTH,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_PATTERN_HEIGHT,
      g_param_spec_int ("pattern-height", "Pattern height",
          "The height of the pattern markers", 1, G_MAXINT,
          DEFAULT_PATTERN_HEIGHT,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_PATTERN_COUNT,
      g_param_spec_int ("pattern-count", "Pattern count",
          "The number of pattern markers", 0, G_MAXINT,
          DEFAULT_PATTERN_COUNT,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_PATTERN_DATA_COUNT,
      g_param_spec_int ("pattern-data-count", "Pattern data count",
          "The number of extra data pattern markers", 0, 64,
          DEFAULT_PATTERN_DATA_COUNT,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_PATTERN_DATA,
      g_param_spec_uint64 ("pattern-data", "Pattern data",
          "The extra data pattern markers", 0, G_MAXUINT64,
          DEFAULT_PATTERN_DATA,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_ENABLED,
      g_param_spec_boolean ("enabled", "Enabled",
          "Enable or disable the filter",
          DEFAULT_ENABLED,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_LEFT_OFFSET,
      g_param_spec_int ("left-offset", "Left Offset",
          "The offset from the left border where the pattern starts", 0,
          G_MAXINT, DEFAULT_LEFT_OFFSET,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_BOTTOM_OFFSET,
      g_param_spec_int ("bottom-offset", "Bottom Offset",
          "The offset from the bottom border where the pattern starts", 0,
          G_MAXINT, DEFAULT_BOTTOM_OFFSET,
          G_PARAM_READWRITE | G_PARAM_CONSTRUCT | G_PARAM_STATIC_STRINGS));

}
コード例 #9
0
static gboolean
setup_pipeline (void)
{
  GstBus *bus;
  gboolean res = TRUE;
  GstElement *vmux = NULL, *ienc = NULL, *sink = NULL, *aenc = NULL, *ipp =
      NULL;
  GstCaps *filter_caps = NULL;

  camera_bin = gst_element_factory_make ("camerabin", NULL);
  if (NULL == camera_bin) {
    g_warning ("can't create camerabin element\n");
    goto error;
  }
  g_object_set (camera_bin, "flags", flags, NULL);

  g_signal_connect (camera_bin, "image-done", (GCallback) img_capture_done,
      NULL);

  bus = gst_pipeline_get_bus (GST_PIPELINE (camera_bin));
  /* Add sync handler for time critical messages that need to be handled fast */
  gst_bus_set_sync_handler (bus, sync_bus_callback, NULL);
  /* Handle normal messages asynchronously */
  gst_bus_add_watch (bus, bus_callback, NULL);
  gst_object_unref (bus);

  GST_INFO_OBJECT (camera_bin, "camerabin created");

  /* configure used elements */
  res &= setup_pipeline_element ("viewfinder-sink", vfsink_name, &sink);
  res &= setup_pipeline_element ("audio-source", audiosrc_name, NULL);
  res &= setup_pipeline_element ("video-source", videosrc_name, NULL);
  res &= setup_pipeline_element ("video-source-filter", video_src_filter, NULL);
  res &= setup_pipeline_element ("viewfinder-filter", viewfinder_filter, NULL);

  if (audioenc_name) {
    aenc = create_audioencoder_bin ();
    if (aenc)
      g_object_set (camera_bin, "audio-encoder", aenc, NULL);
    else
      GST_WARNING ("Could not make audio encoder element");
  }

  if (imagepp_name) {
    ipp = create_ipp_bin ();
    if (ipp)
      g_object_set (camera_bin, "image-post-processing", ipp, NULL);
    else
      GST_WARNING ("Could not create ipp elements");
  }

  res &= setup_pipeline_element ("video-encoder", videoenc_name, NULL);
  res &= setup_pipeline_element ("image-encoder", imageenc_name, &ienc);
  res &= setup_pipeline_element ("image-formatter", imageformatter_name, NULL);
  res &= setup_pipeline_element ("video-muxer", videomux_name, &vmux);
  if (!res) {
    goto error;
  }
  GST_INFO_OBJECT (camera_bin, "elements created");

  /* set properties */
  if (src_format) {
    filter_caps = gst_caps_from_string (src_format);
  } else if (src_csp && strlen (src_csp) == 4) {
    /* Set requested colorspace format, this is needed if the default 
       colorspace negotiated for viewfinder doesn't match with e.g. encoders. */
    filter_caps = gst_caps_new_simple ("video/x-raw-yuv",
        "format", GST_TYPE_FOURCC,
        GST_MAKE_FOURCC (src_csp[0], src_csp[1], src_csp[2], src_csp[3]), NULL);
  }

  if (filter_caps) {
    g_object_set (camera_bin, "filter-caps", filter_caps, NULL);
    gst_caps_unref (filter_caps);
  }

  g_object_set (sink, "sync", TRUE, NULL);

  GST_INFO_OBJECT (camera_bin, "elements configured");

  /* configure a resolution and framerate */
  if (mode == 1) {
    g_signal_emit_by_name (camera_bin, "set-video-resolution-fps", image_width,
        image_height, view_framerate_num, view_framerate_den, NULL);
  } else {
    g_signal_emit_by_name (camera_bin, "set-image-resolution", image_width,
        image_height, NULL);
  }

  if (GST_STATE_CHANGE_FAILURE ==
      gst_element_set_state (camera_bin, GST_STATE_READY)) {
    g_warning ("can't set camerabin to ready\n");
    goto error;
  }
  GST_INFO_OBJECT (camera_bin, "camera ready");

  if (GST_STATE_CHANGE_FAILURE ==
      gst_element_set_state (camera_bin, GST_STATE_PLAYING)) {
    g_warning ("can't set camerabin to playing\n");
    goto error;
  }

  GST_INFO_OBJECT (camera_bin, "camera started");
  return TRUE;
error:
  cleanup_pipeline ();
  return FALSE;
}
コード例 #10
0
static gboolean
gst_dshowvideodec_sink_setcaps (GstPad * pad, GstCaps * caps)
{
  gboolean ret = FALSE;
  HRESULT hres;
  GstStructure *s = gst_caps_get_structure (caps, 0);
  GstDshowVideoDec *vdec = (GstDshowVideoDec *) gst_pad_get_parent (pad);
  GstDshowVideoDecClass *klass =
      (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (vdec);
  GstBuffer *extradata = NULL;
  const GValue *v = NULL;
  guint size = 0;
  GstCaps *caps_out;
  AM_MEDIA_TYPE output_mediatype, input_mediatype;
  VIDEOINFOHEADER *input_vheader = NULL, *output_vheader = NULL;
  CComPtr<IPin> output_pin;
  CComPtr<IPin> input_pin;
  IBaseFilter *srcfilter = NULL;
  IBaseFilter *sinkfilter = NULL;
  const GValue *fps, *par;

  /* read data */
  if (!gst_structure_get_int (s, "width", &vdec->width) ||
      !gst_structure_get_int (s, "height", &vdec->height)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("error getting video width or height from caps"), (NULL));
    goto end;
  }
  fps = gst_structure_get_value (s, "framerate");
  if (fps) {
    vdec->fps_n = gst_value_get_fraction_numerator (fps);
    vdec->fps_d = gst_value_get_fraction_denominator (fps);
  }
  else {
    /* Invent a sane default framerate; the timestamps matter
     * more anyway. */
    vdec->fps_n = 25;
    vdec->fps_d = 1;
  }

  par = gst_structure_get_value (s, "pixel-aspect-ratio");
  if (par) {
    vdec->par_n = gst_value_get_fraction_numerator (par);
    vdec->par_d = gst_value_get_fraction_denominator (par);
  }
  else {
    vdec->par_n = vdec->par_d = 1;
  }

  if ((v = gst_structure_get_value (s, "codec_data")))
    extradata = gst_value_get_buffer (v);

  /* define the input type format */
  memset (&input_mediatype, 0, sizeof (AM_MEDIA_TYPE));
  input_mediatype.majortype = klass->entry->input_majortype;
  input_mediatype.subtype = klass->entry->input_subtype;
  input_mediatype.bFixedSizeSamples = FALSE;
  input_mediatype.bTemporalCompression = TRUE;

  if (strstr (klass->entry->sinkcaps, "video/mpeg, mpegversion= (int) 1")) {
    size =
        sizeof (MPEG1VIDEOINFO) + (extradata ? GST_BUFFER_SIZE (extradata) -
        1 : 0);
    input_vheader = (VIDEOINFOHEADER *)g_malloc0 (size);

    input_vheader->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    if (extradata) {
      MPEG1VIDEOINFO *mpeg_info = (MPEG1VIDEOINFO *) input_vheader;

      memcpy (mpeg_info->bSequenceHeader,
          GST_BUFFER_DATA (extradata), GST_BUFFER_SIZE (extradata));
      mpeg_info->cbSequenceHeader = GST_BUFFER_SIZE (extradata);
    }
    input_mediatype.formattype = FORMAT_MPEGVideo;
  } else {
    size =
        sizeof (VIDEOINFOHEADER) +
        (extradata ? GST_BUFFER_SIZE (extradata) : 0);
    input_vheader = (VIDEOINFOHEADER *)g_malloc0 (size);

    input_vheader->bmiHeader.biSize = sizeof (BITMAPINFOHEADER);
    if (extradata) {            /* Codec data is appended after our header */
      memcpy (((guchar *) input_vheader) + sizeof (VIDEOINFOHEADER),
          GST_BUFFER_DATA (extradata), GST_BUFFER_SIZE (extradata));
      input_vheader->bmiHeader.biSize += GST_BUFFER_SIZE (extradata);
    }
    input_mediatype.formattype = FORMAT_VideoInfo;
  }
  input_vheader->rcSource.top = input_vheader->rcSource.left = 0;
  input_vheader->rcSource.right = vdec->width;
  input_vheader->rcSource.bottom = vdec->height;
  input_vheader->rcTarget = input_vheader->rcSource;
  input_vheader->bmiHeader.biWidth = vdec->width;
  input_vheader->bmiHeader.biHeight = vdec->height;
  input_vheader->bmiHeader.biPlanes = 1;
  input_vheader->bmiHeader.biBitCount = 16;
  input_vheader->bmiHeader.biCompression = klass->entry->format;
  input_vheader->bmiHeader.biSizeImage =
      (vdec->width * vdec->height) * (input_vheader->bmiHeader.biBitCount / 8);

  input_mediatype.cbFormat = size;
  input_mediatype.pbFormat = (BYTE *) input_vheader;
  input_mediatype.lSampleSize = input_vheader->bmiHeader.biSizeImage;

  vdec->fakesrc->GetOutputPin()->SetMediaType(&input_mediatype);

  /* set the sample size for fakesrc filter to the output buffer size */
  vdec->fakesrc->GetOutputPin()->SetSampleSize(input_mediatype.lSampleSize);

  /* connect our fake src to decoder */
  hres = vdec->fakesrc->QueryInterface(IID_IBaseFilter,
      (void **) &srcfilter);
  if (FAILED (hres)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
      ("Can't QT fakesrc to IBaseFilter: %x", hres), (NULL));
    goto end;
  }

  output_pin = gst_dshow_get_pin_from_filter (srcfilter, PINDIR_OUTPUT);
  if (!output_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get output pin from our directshow fakesrc filter"), (NULL));
    goto end;
  }
  input_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_INPUT);
  if (!input_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get input pin from decoder filter"), (NULL));
    goto end;
  }

  hres = vdec->filtergraph->ConnectDirect (output_pin, input_pin, NULL);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't connect fakesrc with decoder (error=%x)", hres), (NULL));
    goto end;
  }

  /* get decoder output video format */
  if (!gst_dshowvideodec_get_filter_output_format (vdec,
          klass->entry->output_subtype, &output_vheader, &size)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get decoder output video format"), (NULL));
    goto end;
  }

  memset (&output_mediatype, 0, sizeof (AM_MEDIA_TYPE));
  output_mediatype.majortype = klass->entry->output_majortype;
  output_mediatype.subtype = klass->entry->output_subtype;
  output_mediatype.bFixedSizeSamples = TRUE;
  output_mediatype.bTemporalCompression = FALSE;
  output_mediatype.lSampleSize = output_vheader->bmiHeader.biSizeImage;
  output_mediatype.formattype = FORMAT_VideoInfo;
  output_mediatype.cbFormat = size;
  output_mediatype.pbFormat = (BYTE *) output_vheader;

  vdec->fakesink->SetMediaType (&output_mediatype);

  /* connect decoder to our fake sink */
  output_pin = gst_dshow_get_pin_from_filter (vdec->decfilter, PINDIR_OUTPUT);
  if (!output_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get output pin from our decoder filter"), (NULL));
    goto end;
  }

  hres = vdec->fakesink->QueryInterface(IID_IBaseFilter,
      (void **) &sinkfilter);
  if (FAILED (hres)) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
      ("Can't QT fakesink to IBaseFilter: %x", hres), (NULL));
    goto end;
  }

  input_pin = gst_dshow_get_pin_from_filter (sinkfilter, PINDIR_INPUT);
  if (!input_pin) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't get input pin from our directshow fakesink filter"), (NULL));
    goto end;
  }

  hres = vdec->filtergraph->ConnectDirect(output_pin, input_pin,
      &output_mediatype);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't connect decoder with fakesink (error=%x)", hres), (NULL));
    goto end;
  }

  /* negotiate output */
  caps_out = gst_caps_from_string (klass->entry->srccaps);
  gst_caps_set_simple (caps_out,
      "width", G_TYPE_INT, vdec->width,
      "height", G_TYPE_INT, vdec->height, NULL);

  if (vdec->fps_n && vdec->fps_d) {
      gst_caps_set_simple (caps_out,
          "framerate", GST_TYPE_FRACTION, vdec->fps_n, vdec->fps_d, NULL);
  }

  gst_caps_set_simple (caps_out, 
      "pixel-aspect-ratio", GST_TYPE_FRACTION, vdec->par_n, vdec->par_d, NULL);

  if (!gst_pad_set_caps (vdec->srcpad, caps_out)) {
    gst_caps_unref (caps_out);
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Failed to negotiate output"), (NULL));
    goto end;
  }
  gst_caps_unref (caps_out);

  hres = vdec->mediafilter->Run (-1);
  if (hres != S_OK) {
    GST_ELEMENT_ERROR (vdec, CORE, NEGOTIATION,
        ("Can't run the directshow graph (error=%d)", hres), (NULL));
    goto end;
  }

  ret = TRUE;
end:
  gst_object_unref (vdec);
  if (input_vheader)
    g_free (input_vheader);
  if (srcfilter)
    srcfilter->Release();
  if (sinkfilter)
    sinkfilter->Release();
  return ret;
}
コード例 #11
0
static GstCaps *
gst_v4lsrc_palette_to_caps (int palette)
{
    guint32 fourcc;
    GstCaps *caps;

    switch (palette) {
    case VIDEO_PALETTE_YUV422:
    case VIDEO_PALETTE_YUYV:
        fourcc = GST_MAKE_FOURCC ('Y', 'U', 'Y', '2');
        break;
    case VIDEO_PALETTE_YUV420P:
        fourcc = GST_MAKE_FOURCC ('I', '4', '2', '0');
        break;
    case VIDEO_PALETTE_UYVY:
        fourcc = GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y');
        break;
    case VIDEO_PALETTE_YUV411P:
        fourcc = GST_MAKE_FOURCC ('Y', '4', '1', 'B');
        break;
    case VIDEO_PALETTE_YUV411:
        fourcc = GST_MAKE_FOURCC ('Y', '4', '1', 'P');
        break;
    case VIDEO_PALETTE_YUV422P:
        fourcc = GST_MAKE_FOURCC ('Y', '4', '2', 'B');
        break;
    case VIDEO_PALETTE_YUV410P:
        fourcc = GST_MAKE_FOURCC ('Y', 'U', 'V', '9');
        break;
    case VIDEO_PALETTE_RGB555:
    case VIDEO_PALETTE_RGB565:
    case VIDEO_PALETTE_RGB24:
    case VIDEO_PALETTE_RGB32:
        fourcc = GST_MAKE_FOURCC ('R', 'G', 'B', ' ');
        break;
    default:
        return NULL;
    }

    if (fourcc == GST_MAKE_FOURCC ('R', 'G', 'B', ' ')) {
        switch (palette) {
        case VIDEO_PALETTE_RGB555:
            caps = gst_caps_from_string ("video/x-raw-rgb, "
                                         "bpp = (int) 16, "
                                         "depth = (int) 15, "
                                         "endianness = (int) BYTE_ORDER, "
                                         "red_mask = 0x7c00, " "green_mask = 0x03e0, " "blue_mask = 0x001f");
            break;
        case VIDEO_PALETTE_RGB565:
            caps = gst_caps_from_string ("video/x-raw-rgb, "
                                         "bpp = (int) 16, "
                                         "depth = (int) 16, "
                                         "endianness = (int) BYTE_ORDER, "
                                         "red_mask = 0xf800, " "green_mask = 0x07f0, " "blue_mask = 0x001f");
            break;
        case VIDEO_PALETTE_RGB24:
            caps = gst_caps_from_string ("video/x-raw-rgb, "
                                         "bpp = (int) 24, "
                                         "depth = (int) 24, "
                                         "endianness = (int) BIG_ENDIAN, "
                                         "red_mask = 0xFF0000, "
                                         "green_mask = 0x00FF00, " "blue_mask = 0x0000FF");
            break;
        case VIDEO_PALETTE_RGB32:
            caps = gst_caps_from_string ("video/x-raw-rgb, "
                                         "bpp = (int) 32, "
                                         "depth = (int) 24, "
                                         "endianness = (int) BIG_ENDIAN, "
                                         "red_mask = 0xFF000000, "
                                         "green_mask = 0x00FF0000, " "blue_mask = 0x0000FF00");
            break;
        default:
            g_assert_not_reached ();
            return NULL;
        }
    } else {
        caps = gst_caps_new_simple ("video/x-raw-yuv",
                                    "format", GST_TYPE_FOURCC, fourcc, NULL);
    }

    return caps;
}
コード例 #12
0
ファイル: gstkateenc.c プロジェクト: 0p1pp1/gst-plugins-bad
static GstFlowReturn
gst_kate_enc_send_headers (GstKateEnc * ke)
{
  GstFlowReturn rflow = GST_FLOW_OK;
  GstCaps *caps;
  GList *headers = NULL, *item;

  if (G_UNLIKELY (ke->category == NULL || *ke->category == '\0')) {
    /* The error code is a bit of a lie, but seems most appropriate. */
    GST_ELEMENT_ERROR (ke, LIBRARY, SETTINGS, (NULL),
        ("The 'category' property must be set. For subtitles, set it to "
            "either 'SUB' (text subtitles) or 'K-SPU' (dvd-style subtitles)"));
    return GST_FLOW_ERROR;
  }

  gst_kate_enc_set_metadata (ke);

  /* encode headers and store them in a list */
  while (1) {
    kate_packet kp;
    int ret = kate_encode_headers (&ke->k, &ke->kc, &kp);
    if (ret == 0) {
      GstBuffer *buffer;

      buffer = gst_kate_enc_create_buffer (ke, &kp, 0, 0, 0, TRUE);
      if (!buffer) {
        GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
            ("Failed to create buffer, %u bytes", (guint) kp.nbytes));
        rflow = GST_FLOW_ERROR;
        break;
      }
      kate_packet_clear (&kp);

      GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_HEADER);
      headers = g_list_append (headers, buffer);
    } else if (ret > 0) {
      GST_LOG_OBJECT (ke, "Last header encoded");
      break;
    } else {
      GST_ELEMENT_ERROR (ke, STREAM, ENCODE, (NULL),
          ("Failed encoding headers: %s",
              gst_kate_util_get_error_message (ret)));
      rflow = GST_FLOW_ERROR;
      break;
    }
  }

  if (rflow == GST_FLOW_OK) {
    if (gst_kate_enc_is_simple_subtitle_category (ke, ke->category)) {
      caps = gst_kate_util_set_header_on_caps (&ke->element,
          gst_caps_from_string ("subtitle/x-kate"), headers);
    } else {
      caps = gst_kate_util_set_header_on_caps (&ke->element,
          gst_caps_from_string ("application/x-kate"), headers);
    }
    if (caps) {
      GST_DEBUG_OBJECT (ke, "here are the caps: %" GST_PTR_FORMAT, caps);
      gst_pad_set_caps (ke->srcpad, caps);
      gst_caps_unref (caps);

      if (ke->pending_segment)
        gst_pad_push_event (ke->srcpad, ke->pending_segment);
      ke->pending_segment = NULL;

      GST_LOG_OBJECT (ke, "pushing headers");
      item = headers;
      while (item) {
        GstBuffer *buffer = item->data;
        GST_LOG_OBJECT (ke, "pushing header %p", buffer);
        gst_kate_enc_push_buffer (ke, buffer);
        item = item->next;
      }
    } else {
      GST_ERROR_OBJECT (ke, "Failed to set headers on caps");
    }
  }

  g_list_free (headers);

  return rflow;
}
コード例 #13
0
ファイル: pipeline.cpp プロジェクト: Igalia/aura
Pipeline::Pipeline(QObject *parent)
    : QObject(parent),
      camerabin(0),
      videoSrc(0),
      viewfinder(0),
      effectBin(0),
      effectValve(0),
      effect(0),
      effectPreCS(0),
      effectPostCS(0),
      effectCapsFilter(0),
      windowId(0)
{
    // camerabin
    camerabin = gst_element_factory_make("camerabin", NULL);
    g_object_set (camerabin, "framerate-rounding", TRUE, NULL);

    // video source
    videoSrc = gst_element_factory_make("subdevsrc", NULL);
    g_object_set(videoSrc, "queue-size", 5, NULL);
    g_object_set (G_OBJECT (camerabin), "video-source", videoSrc, NULL);

    // video encoder
    GstElement *videoEnc = gst_element_factory_make("dsph264enc", NULL);
    g_object_set(camerabin, "video-encoder", videoEnc, NULL);

    // videomux
    GstElement *videoMux = gst_element_factory_make("mp4mux", NULL);
    g_object_set(camerabin, "video-muxer", videoMux, NULL);

    // audio source
    GstElement *audioSrc = gst_element_factory_make("pulsesrc", NULL);
    g_object_set(camerabin, "audio-source", audioSrc, NULL);

    // audio encoder
    // we need to set a capsfilter in order to select the audio config
    GstElement *audioEncBin = gst_bin_new(NULL);
    GstElement *audioEnc = gst_element_factory_make("nokiaaacenc", NULL);
    GstElement *audioCapsFilter = gst_element_factory_make("capsfilter", NULL);
    GstCaps *audioCaps = gst_caps_from_string("audio/x-raw-int, channels=(int)2, width=(int)16, depth=(int)16, rate=(int)48000");
    g_object_set(audioCapsFilter, "caps", audioCaps, NULL);

    gst_bin_add_many(GST_BIN(audioEncBin), audioCapsFilter, audioEnc, NULL);
    gst_element_link(audioCapsFilter, audioEnc);

    GstPad * pad;
    pad = gst_element_get_static_pad (audioCapsFilter, "sink");
    gst_element_add_pad (audioEncBin, gst_ghost_pad_new ("sink", pad));
    gst_object_unref (pad);

    pad = gst_element_get_static_pad (audioEnc, "src");
    gst_element_add_pad (audioEncBin, gst_ghost_pad_new ("src", pad));
    gst_object_unref (pad);

    g_object_set(camerabin, "audio-encoder", audioEncBin, NULL);

    // viewfinder
    viewfinder = gst_element_factory_make("omapxvsink", NULL);
    g_object_set(viewfinder, "autopaint-colorkey", FALSE, NULL);
    g_object_set (G_OBJECT (camerabin), "viewfinder-sink", viewfinder, NULL);

    //error handler
    GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(camerabin));
    gst_bus_add_watch(bus, gStreamerMessageWatcher, this);
    gst_bus_set_sync_handler(bus, (GstBusSyncHandler) busSyncHandler, this);
    gst_object_unref(bus);

    // idle handler
    g_signal_connect(G_OBJECT(camerabin),
                     "notify::idle",
                     G_CALLBACK(idleNotificationCallback),
                     this);

    // set initial values
    setVideoMode();
    setupFileStorage();
    setupEffectBins();
    setResolution(VIDEO_RESOLUTION_DEFAULT);
}
コード例 #14
0
static gboolean
kms_recorder_endpoint_query_caps (KmsElement * element, GstPad * pad,
    GstQuery * query)
{
  KmsRecorderEndpoint *self = KMS_RECORDER_ENDPOINT (element);
  GstCaps *allowed = NULL, *caps = NULL;
  GstCaps *filter, *result, *tcaps;
  GstElement *appsrc;

  gst_query_parse_caps (query, &filter);

  switch (kms_element_get_pad_type (element, pad)) {
    case KMS_ELEMENT_PAD_TYPE_VIDEO:
      g_object_get (self->priv->mux, KMS_MUXING_PIPELINE_VIDEO_APPSRC,
          &appsrc, NULL);
      allowed =
          kms_recorder_endpoint_allowed_caps (element,
          KMS_ELEMENT_PAD_TYPE_VIDEO);
      caps =
          kms_recorder_endpoint_get_caps_from_profile (self,
          KMS_ELEMENT_PAD_TYPE_VIDEO);
      result = gst_caps_from_string (KMS_AGNOSTIC_VIDEO_CAPS);
      break;
    case KMS_ELEMENT_PAD_TYPE_AUDIO:
      g_object_get (self->priv->mux, KMS_MUXING_PIPELINE_AUDIO_APPSRC,
          &appsrc, NULL);
      allowed =
          kms_recorder_endpoint_allowed_caps (element,
          KMS_ELEMENT_PAD_TYPE_AUDIO);
      caps =
          kms_recorder_endpoint_get_caps_from_profile (self,
          KMS_ELEMENT_PAD_TYPE_AUDIO);
      result = gst_caps_from_string (KMS_AGNOSTIC_AUDIO_CAPS);
      break;
    default:
      GST_ERROR_OBJECT (pad, "unknown pad");
      return FALSE;
  }

  /* make sure we only return results that intersect our padtemplate */
  tcaps = gst_pad_get_pad_template_caps (pad);
  if (tcaps != NULL) {
    /* Update result caps */
    gst_caps_unref (result);

    if (allowed == NULL) {
      result = gst_caps_ref (tcaps);
    } else {
      result = gst_caps_intersect (allowed, tcaps);
    }
    gst_caps_unref (tcaps);
  } else {
    GST_WARNING_OBJECT (pad,
        "Can not get capabilities from pad's template. Using agnostic's' caps");
  }

  if (caps == NULL) {
    GST_ERROR_OBJECT (self, "No caps from profile");
  } else {
    GstPad *srcpad;

    /* Get encodebin's caps filtering by profile */
    srcpad = gst_element_get_static_pad (appsrc, "src");
    tcaps = gst_pad_peer_query_caps (srcpad, caps);
    if (tcaps != NULL) {
      /* Filter against filtered encodebin's caps */
      GstCaps *aux;

      aux = gst_caps_intersect (tcaps, result);
      gst_caps_unref (result);
      gst_caps_unref (tcaps);
      result = aux;
    } else if (caps != NULL) {
      /* Filter against profile */
      GstCaps *aux;

      GST_WARNING_OBJECT (appsrc, "Using generic profile's caps");
      aux = gst_caps_intersect (caps, result);
      gst_caps_unref (result);
      result = aux;
    }
    g_object_unref (srcpad);
  }

  g_object_unref (appsrc);

  /* filter against the query filter when needed */
  if (filter != NULL) {
    GstCaps *aux;

    aux = gst_caps_intersect (result, filter);
    gst_caps_unref (result);
    result = aux;
  }

  gst_query_set_caps_result (query, result);
  gst_caps_unref (result);

  if (allowed != NULL)
    gst_caps_unref (allowed);

  if (caps != NULL)
    gst_caps_unref (caps);

  return TRUE;
}
コード例 #15
0
ファイル: rtp-codec.c プロジェクト: nicobou/prototypes
int
main (int argc,
      char *argv[])
{
    
  /* Initialisation */
  gst_init (&argc, &argv);

  GList *element_list = gst_element_factory_list_get_elements (GST_ELEMENT_FACTORY_TYPE_DEPAYLOADER, 
							       GST_RANK_NONE);
  GList *iter = element_list;
  while (iter != NULL)
    {
      g_print ("+++++\n");
      g_print ("%s -- ", gst_element_factory_get_longname ((GstElementFactory *)iter->data));
      g_print ("%s\n", gst_plugin_feature_get_name ((GstPluginFeature *)iter->data));
	 
      const GList *static_pads = 
	gst_element_factory_get_static_pad_templates ((GstElementFactory *)iter->data);
	 
      while (NULL != static_pads)
	{
	  GstStaticPadTemplate *pad = (GstStaticPadTemplate *)static_pads->data; 
	  //the following is EMPTY gchar *caps_str = gst_caps_to_string (&pad->static_caps.caps); 
	  //g_free (caps_str); 
	  /* g_print ("string: %s\n",  */
	  /* 	      pad->static_caps.string);  */
	  GstCaps *caps = gst_caps_from_string (pad->static_caps.string);
	  guint caps_size = gst_caps_get_size (caps);
	  if (! gst_caps_is_any (caps))
	    for (guint i = caps_size; i > 0; i--) 
	      {
		GstStructure *caps_struct = gst_caps_get_structure (caps, i-1);
		if (gst_structure_has_name (caps_struct,"application/x-rtp")) 
		  {
		    g_print ("string: %s\n",   
			     gst_structure_to_string (caps_struct));   
		    
		    {//payload 
		      const GValue *val = gst_structure_get_value (caps_struct, "payload");  
		      if (NULL != val) 
			{ 
			  //g_print ("payload struct type %s\n", G_VALUE_TYPE_NAME (val));  
			  if(GST_VALUE_HOLDS_INT_RANGE(val)) 
			    { 
			      g_print ("payload min %d\n", gst_value_get_int_range_min (val));  
			    } 
			  if (GST_VALUE_HOLDS_LIST(val)) 
			    { 
			      for (guint i = 0; i < gst_value_list_get_size (val); i++) 
				{ 
				  const GValue *item_val = gst_value_list_get_value (val, i); 
				  g_print ("payload list %d\n", g_value_get_int (item_val)); 
				} 
			    } 
			  if (G_VALUE_HOLDS_INT (val)) 
			    { 
			      g_print ("payload int %d\n", g_value_get_int (val)); 
			    } 
			} 
		    } 
		    { //encodeing-name
		      const GValue *val = gst_structure_get_value (caps_struct, "encoding-name");  
		      if (NULL != val) 
			{
			  //g_print ("encoding-name struct type %s\n", G_VALUE_TYPE_NAME (val));  
			  if (GST_VALUE_HOLDS_LIST(val)) 
			    { 
			      for (guint i = 0; i < gst_value_list_get_size (val); i++) 
				{ 
				  const GValue *item_val = gst_value_list_get_value (val, i); 
				  g_print ("encoding-name list %s\n", g_value_get_string (item_val)); 
				} 
			    } 
			  if (G_VALUE_HOLDS_STRING (val)) 
			    { 
			      g_print ("encoding-name string %s\n", g_value_get_string (val)); 
			    } 
				      
			}
		    } 
		    {//media
		      const GValue *val = gst_structure_get_value (caps_struct, "media");  
		      if (NULL != val) 
			{
			  if (GST_VALUE_HOLDS_LIST(val)) 
			    { 
			      for (guint i = 0; i < gst_value_list_get_size (val); i++) 
				{ 
				  const GValue *item_val = gst_value_list_get_value (val, i); 
				  g_print ("media list %s\n", g_value_get_string (item_val)); 
				} 
			    } 
			  if (G_VALUE_HOLDS_STRING (val)) 
			    { 
			      g_print ("media string %s\n", g_value_get_string (val)); 
			    } 
				      
			}
		    } 

		    {//clock rate 
		      const GValue *val = gst_structure_get_value (caps_struct, "clock-rate");  
		      if (NULL != val) 
			{ 
			  //g_print ("payload struct type %s\n", G_VALUE_TYPE_NAME (val));  
			  if(GST_VALUE_HOLDS_INT_RANGE(val)) 
			    { 
			      g_print ("clock-rate min %d\n", gst_value_get_int_range_min (val));  
			    } 
			  if (GST_VALUE_HOLDS_LIST(val)) 
			    { 
			      for (guint i = 0; i < gst_value_list_get_size (val); i++) 
				{ 
				  const GValue *item_val = gst_value_list_get_value (val, i); 
				  g_print ("clock-rate list %d\n", g_value_get_int (item_val)); 
				} 
			    } 
			  if (G_VALUE_HOLDS_INT (val)) 
			    { 
			      g_print ("clock-rate int %d\n", g_value_get_int (val)); 
			    } 
			} 
		    } 

		    /* g_print ("\nencoding-name %s\n",   */
		    /* 	 gst_structure_get_string (caps_struct,  */
		    /* 				   "encoding-name"));  */
			
		  }
	      }
	  static_pads = g_list_next (static_pads); 
	  gst_caps_unref (caps);
	}
	 
      iter = g_list_next (iter);
    }
  gst_plugin_feature_list_free (element_list);
    
  return 0;
}
コード例 #16
0
static gboolean
run_pipeline (gpointer user_data)
{
  GstCaps *preview_caps = NULL;
  gchar *filename_str = NULL;
  GstElement *video_source = NULL;
  const gchar *filename_suffix;

  g_object_set (camera_bin, "mode", mode, NULL);

  if (preview_caps_name != NULL) {
    preview_caps = gst_caps_from_string (preview_caps_name);
    if (preview_caps) {
      g_object_set (camera_bin, "preview-caps", preview_caps, NULL);
      GST_DEBUG ("Preview caps set");
    } else
      GST_DEBUG ("Preview caps set but could not create caps from string");
  }

  set_metadata (camera_bin);

  /* Construct filename */
  if (mode == 1)
    filename_suffix = ".mp4";
  else
    filename_suffix = ".jpg";
  filename_str =
      g_strdup_printf ("%s/test_%04u%s", filename->str, capture_count,
      filename_suffix);
  GST_DEBUG ("Setting filename: %s", filename_str);
  g_object_set (camera_bin, "filename", filename_str, NULL);
  g_free (filename_str);

  g_object_get (camera_bin, "video-source", &video_source, NULL);
  if (video_source) {
    if (GST_IS_ELEMENT (video_source) &&
        gst_element_implements_interface (video_source, GST_TYPE_PHOTOGRAPHY)) {
      /* Set GstPhotography interface options. If option not given as
         command-line parameter use default of the source element. */
      if (scene_mode != SCENE_MODE_NONE)
        g_object_set (video_source, "scene-mode", scene_mode, NULL);
      if (ev_compensation != EV_COMPENSATION_NONE)
        g_object_set (video_source, "ev-compensation", ev_compensation, NULL);
      if (aperture != APERTURE_NONE)
        g_object_set (video_source, "aperture", aperture, NULL);
      if (flash_mode != FLASH_MODE_NONE)
        g_object_set (video_source, "flash-mode", flash_mode, NULL);
      if (exposure != EXPOSURE_NONE)
        g_object_set (video_source, "exposure", exposure, NULL);
      if (iso_speed != ISO_SPEED_NONE)
        g_object_set (video_source, "iso-speed", iso_speed, NULL);
      if (wb_mode != WHITE_BALANCE_MODE_NONE)
        g_object_set (video_source, "white-balance-mode", wb_mode, NULL);
      if (color_mode != COLOR_TONE_MODE_NONE)
        g_object_set (video_source, "colour-tone-mode", color_mode, NULL);
    }
    g_object_unref (video_source);
  }
  g_object_set (camera_bin, "mute", mute, NULL);
  g_object_set (camera_bin, "zoom", zoom / 100.0f, NULL);

  capture_count++;
  g_timer_start (timer);
  g_signal_emit_by_name (camera_bin, "capture-start", 0);


  if (mode == 1) {
    g_timeout_add ((capture_time * 1000), (GSourceFunc) stop_capture, NULL);
  }

  return FALSE;
}
コード例 #17
0
static gboolean
gst_musepack_stream_init (GstMusepackDec * musepackdec)
{
  mpc_streaminfo i;
  GstTagList *tags;
  GstCaps *caps;

  /* set up reading */
  gst_musepack_init_reader (musepackdec->r, musepackdec);

#ifdef MPC_IS_OLD_API
  /* streaminfo */
  mpc_streaminfo_init (&i);
  if (mpc_streaminfo_read (&i, musepackdec->r) < 0) {
    GST_ELEMENT_ERROR (musepackdec, STREAM, WRONG_TYPE, (NULL), (NULL));
    return FALSE;
  }

  /* decoding */
  mpc_decoder_setup (musepackdec->d, musepackdec->r);
  mpc_decoder_scale_output (musepackdec->d, 1.0);
  if (!mpc_decoder_initialize (musepackdec->d, &i)) {
    GST_ELEMENT_ERROR (musepackdec, STREAM, WRONG_TYPE, (NULL), (NULL));
    return FALSE;
  }
#else
  musepackdec->d = mpc_demux_init (musepackdec->r);
  if (!musepackdec->d) {
    GST_ELEMENT_ERROR (musepackdec, STREAM, WRONG_TYPE, (NULL), (NULL));
    return FALSE;
  }

  mpc_demux_get_info (musepackdec->d, &i);
#endif

  /* capsnego */
  caps = gst_caps_from_string (BASE_CAPS);
  gst_caps_set_simple (caps,
      "endianness", G_TYPE_INT, G_BYTE_ORDER,
      "channels", G_TYPE_INT, i.channels,
      "rate", G_TYPE_INT, i.sample_freq, NULL);
  gst_pad_use_fixed_caps (musepackdec->srcpad);
  if (!gst_pad_set_caps (musepackdec->srcpad, caps)) {
    GST_ELEMENT_ERROR (musepackdec, CORE, NEGOTIATION, (NULL), (NULL));
    return FALSE;
  }

  g_atomic_int_set (&musepackdec->bps, 4 * i.channels);
  g_atomic_int_set (&musepackdec->rate, i.sample_freq);

  gst_segment_set_last_stop (&musepackdec->segment, GST_FORMAT_DEFAULT, 0);
  gst_segment_set_duration (&musepackdec->segment, GST_FORMAT_DEFAULT,
      mpc_streaminfo_get_length_samples (&i));

  /* send basic tags */
  tags = gst_tag_list_new ();
  gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
      GST_TAG_AUDIO_CODEC, "Musepack", NULL);

  if (i.encoder[0] != '\0' && i.encoder_version > 0) {
    gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_ENCODER, i.encoder,
        GST_TAG_ENCODER_VERSION, i.encoder_version, NULL);
  }

  if (i.bitrate > 0) {
    gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_BITRATE, i.bitrate, NULL);
  } else if (i.average_bitrate > 0.0) {
    gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_BITRATE, (guint) i.average_bitrate, NULL);
  }

  if (i.gain_title != 0 || i.gain_album != 0) {
    gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_TRACK_GAIN, (gdouble) i.gain_title / 100.0,
        GST_TAG_ALBUM_GAIN, (gdouble) i.gain_album / 100.0, NULL);
  }

  if (i.peak_title != 0 && i.peak_title != 32767 &&
      i.peak_album != 0 && i.peak_album != 32767) {
    gst_tag_list_add (tags, GST_TAG_MERGE_REPLACE,
        GST_TAG_TRACK_PEAK, (gdouble) i.peak_title / 32767.0,
        GST_TAG_ALBUM_PEAK, (gdouble) i.peak_album / 32767.0, NULL);
  }

  GST_LOG_OBJECT (musepackdec, "Posting tags: %" GST_PTR_FORMAT, tags);
  gst_element_found_tags (GST_ELEMENT (musepackdec), tags);

  return TRUE;
}
コード例 #18
0
GstCaps *
gst_opencv_caps_from_cv_image_type (int cv_type)
{
  GstCaps *c = gst_caps_new_empty ();
  switch (cv_type) {
    case CV_8UC1:
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("GRAY8")));
      break;
    case CV_8UC3:
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("RGB")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("BGR")));
      break;
    case CV_8UC4:
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("RGBx")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("xRGB")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("BGRx")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("xBGR")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("RGBA")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("ARGB")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("BGRA")));
      gst_caps_append (c, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("ABGR")));
      break;
    case CV_16UC1:
      gst_caps_append (c,
          gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("GRAY16_LE")));
      gst_caps_append (c,
          gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("GRAY16_BE")));
      break;
  }
  return c;
}
コード例 #19
0
ファイル: gstdshow.cpp プロジェクト: Haifen/gst-plugins-bad
GstCaps *
gst_dshow_new_video_caps (GstVideoFormat video_format, const gchar * name,
    GstCapturePinMediaType * pin_mediatype)
{
  GstCaps *video_caps = NULL;
  GstStructure *video_structure = NULL;
  gint min_w, max_w;
  gint min_h, max_h;
  gint min_fr, max_fr;

  /* raw video format */
  switch (video_format) {
    case GST_VIDEO_FORMAT_BGR:
      video_caps = gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("BGR"));
      break;
    case GST_VIDEO_FORMAT_I420:
      video_caps = gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("I420"));
	  break;
    case GST_VIDEO_FORMAT_YUY2:
      video_caps = gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("YUY2"));
      break;
    case GST_VIDEO_FORMAT_UYVY:
      video_caps = gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("UYVY"));
      break;
    default:
      break;
  }

  /* other video format */
  if (!video_caps) {
    if (g_ascii_strncasecmp (name, "video/x-dv, systemstream=FALSE", 31) == 0) {
      video_caps = gst_caps_new_simple ("video/x-dv",
          "systemstream", G_TYPE_BOOLEAN, FALSE,
          "format", G_TYPE_STRING, "dvsd",
          NULL);
    } else if (g_ascii_strncasecmp (name, "video/x-dv, systemstream=TRUE", 31) == 0) {
      video_caps = gst_caps_new_simple ("video/x-dv",
          "systemstream", G_TYPE_BOOLEAN, TRUE, NULL);
      return video_caps;
    } else if (g_ascii_strncasecmp (name, "image/jpeg", 10) == 0) {
      video_caps = gst_caps_new_simple ("image/jpeg", NULL);
    } else if (g_ascii_strncasecmp (name, "video/x-h264", 12) == 0) {
      video_caps = gst_caps_new_simple ("video/x-h264", NULL);
    }
  }

  if (!video_caps)
    return NULL;

  video_structure = gst_caps_get_structure (video_caps, 0);

  /* Hope GST_TYPE_INT_RANGE_STEP will exits in future gstreamer releases  */
  /* because we could use :  */
  /* "width", GST_TYPE_INT_RANGE_STEP, video_default->minWidth, video_default->maxWidth,  video_default->granularityWidth */
  /* instead of : */
  /* "width", GST_TYPE_INT_RANGE, video_default->minWidth, video_default->maxWidth */

  /* For framerate we do not need a step (granularity) because  */
  /* "The IAMStreamConfig::SetFormat method will set the frame rate to the closest  */
  /* value that the filter supports" as it said in the VIDEO_STREAM_CONFIG_CAPS dshwo doc */

  min_w = pin_mediatype->vscc.MinOutputSize.cx;
  max_w = pin_mediatype->vscc.MaxOutputSize.cx;
  min_h = pin_mediatype->vscc.MinOutputSize.cy;
  max_h = pin_mediatype->vscc.MaxOutputSize.cy;
  min_fr = (gint) (10000000 / pin_mediatype->vscc.MaxFrameInterval);
  max_fr = (gint)(10000000 / pin_mediatype->vscc.MinFrameInterval);

  if (min_w == max_w)
    gst_structure_set (video_structure, "width", G_TYPE_INT, min_w, NULL);
  else
     gst_structure_set (video_structure,
       "width", GST_TYPE_INT_RANGE, min_w, max_w, NULL);

  if (min_h == max_h)
    gst_structure_set (video_structure, "height", G_TYPE_INT, min_h, NULL);
  else
     gst_structure_set (video_structure,
       "height", GST_TYPE_INT_RANGE, min_h, max_h, NULL);

  if (min_fr == max_fr)
    gst_structure_set (video_structure, "framerate",
        GST_TYPE_FRACTION, min_fr, 1, NULL);
  else
     gst_structure_set (video_structure, "framerate",
         GST_TYPE_FRACTION_RANGE, min_fr, 1, max_fr, 1, NULL);

  return video_caps;
}
コード例 #20
0
static gboolean
run_pipeline (gpointer user_data)
{
  GstCaps *preview_caps = NULL;
  gchar *filename_str = NULL;
  GstElement *video_source = NULL;
  const gchar *filename_suffix;
  CaptureTiming *timing;

  g_object_set (camerabin, "mode", mode, NULL);

  if (preview_caps_name != NULL) {
    preview_caps = gst_caps_from_string (preview_caps_name);
    if (preview_caps) {
      g_object_set (camerabin, "preview-caps", preview_caps, NULL);
      GST_DEBUG ("Preview caps set");
    } else
      GST_DEBUG ("Preview caps set but could not create caps from string");
  }

  set_metadata (camerabin);

  /* Construct filename */
  if (mode == MODE_VIDEO)
    filename_suffix = ".mp4";
  else
    filename_suffix = ".jpg";
  filename_str =
      g_strdup_printf ("%s/test_%04u%s", filename->str, capture_count,
      filename_suffix);
  GST_DEBUG ("Setting filename: %s", filename_str);
  g_object_set (camerabin, "location", filename_str, NULL);
  g_free (filename_str);

  g_object_get (camerabin, "camera-source", &video_source, NULL);
  if (video_source) {
    if (GST_IS_ELEMENT (video_source) &&
        gst_element_implements_interface (video_source, GST_TYPE_PHOTOGRAPHY)) {
      /* Set GstPhotography interface options. If option not given as
         command-line parameter use default of the source element. */
      if (scene_mode != SCENE_MODE_NONE)
        g_object_set (video_source, "scene-mode", scene_mode, NULL);
      if (ev_compensation != EV_COMPENSATION_NONE)
        g_object_set (video_source, "ev-compensation", ev_compensation, NULL);
      if (aperture != APERTURE_NONE)
        g_object_set (video_source, "aperture", aperture, NULL);
      if (flash_mode != FLASH_MODE_NONE)
        g_object_set (video_source, "flash-mode", flash_mode, NULL);
      if (exposure != EXPOSURE_NONE)
        g_object_set (video_source, "exposure", exposure, NULL);
      if (iso_speed != ISO_SPEED_NONE)
        g_object_set (video_source, "iso-speed", iso_speed, NULL);
      if (wb_mode != WHITE_BALANCE_MODE_NONE)
        g_object_set (video_source, "white-balance-mode", wb_mode, NULL);
      if (color_mode != COLOR_TONE_MODE_NONE)
        g_object_set (video_source, "colour-tone-mode", color_mode, NULL);
    }
    g_object_unref (video_source);
  } else {
    video_source = gst_bin_get_by_name (GST_BIN (camerabin), "camerasrc");
    gst_object_unref (video_source);
  }
  g_object_set (camerabin, "zoom", zoom / 100.0f, NULL);

  capture_count++;

  timing = g_slice_new0 (CaptureTiming);
  capture_times = g_list_prepend (capture_times, timing);

  /* set pad probe to check when buffer leaves the camera source */
  if (mode == MODE_IMAGE) {
    GstPad *pad;

    pad = gst_element_get_static_pad (video_source, "imgsrc");
    camera_probe_id = gst_pad_add_buffer_probe (pad,
        (GCallback) camera_src_get_timestamp_probe, NULL);

    gst_object_unref (pad);
  }
  timing->start_capture = gst_util_get_timestamp ();
  g_signal_emit_by_name (camerabin, "start-capture", 0);

  if (mode == MODE_VIDEO) {
    g_timeout_add ((capture_time * 1000), (GSourceFunc) stop_capture, NULL);
  }

  return FALSE;
}
コード例 #21
0
/*
 * We generate:
 *
 * audiotestsrc ! <audiocaps> ! output-selector ! [enc1 .. enc3] ! input-selector
 * select-all = true ! fakesink
 *
 * <audiocaps> makes sure we only produce one format from the audiotestsrc.
 *
 * Each encX element consists of:
 *
 *  audioresample ! <enccaps> ! identity !
 *
 * This way we can simply switch encoders without having to renegotiate.
 */
static GstElement *
make_pipeline (void)
{
  GstElement *result;
  GstElement *audiotestsrc;
  GstElement *audiocaps;
  GstElement *outputselect;
  GstElement *inputselect;
  GstElement *sink;
  GstCaps *caps;
  GstCaps *capslist[3];
  gint i;

  /* create result pipeline */
  result = gst_pipeline_new (NULL);
  g_assert (result);

  /* create various elements */
  audiotestsrc = gst_element_factory_make ("audiotestsrc", NULL);
  g_object_set (audiotestsrc, "num-buffers", 1000, NULL);
  g_assert (audiotestsrc);

  audiocaps = gst_element_factory_make ("capsfilter", NULL);
  g_assert (audiocaps);

  caps =
      gst_caps_from_string ("audio/x-raw,format=S16LE,rate=48000,channels=1");
  g_object_set (audiocaps, "caps", caps, NULL);
  gst_caps_unref (caps);

  outputselect = gst_element_factory_make ("output-selector", "select");
  g_assert (outputselect);

  inputselect = gst_element_factory_make ("input-selector", NULL);
  g_assert (inputselect);
  g_object_set (inputselect, "select-all", TRUE, NULL);

  sink = gst_element_factory_make ("fakesink", NULL);
  g_object_set (sink, "sync", TRUE, NULL);
  g_object_set (sink, "silent", TRUE, NULL);
  g_assert (sink);

  /* add elements */
  gst_bin_add (GST_BIN (result), audiotestsrc);
  gst_bin_add (GST_BIN (result), audiocaps);
  gst_bin_add (GST_BIN (result), outputselect);
  gst_bin_add (GST_BIN (result), inputselect);
  gst_bin_add (GST_BIN (result), sink);

  /* link elements */
  gst_element_link_pads (audiotestsrc, "src", audiocaps, "sink");
  gst_element_link_pads (audiocaps, "src", outputselect, "sink");
  gst_element_link_pads (inputselect, "src", sink, "sink");

  /* make caps */
  capslist[0] =
      gst_caps_from_string ("audio/x-raw,format=S16LE,rate=48000,channels=1");
  capslist[1] =
      gst_caps_from_string ("audio/x-raw,format=S16LE,rate=16000,channels=1");
  capslist[2] =
      gst_caps_from_string ("audio/x-raw,format=S16LE,rate=8000,channels=1");

  /* create encoder elements */
  for (i = 0; i < 3; i++) {
    GstElement *encoder;
    GstPad *srcpad, *sinkpad;

    encoder = make_encoder (capslist[i]);
    g_assert (encoder);

    gst_bin_add (GST_BIN (result), encoder);

    srcpad = gst_element_get_request_pad (outputselect, "src_%u");
    sinkpad = gst_element_get_static_pad (encoder, "sink");
    gst_pad_link (srcpad, sinkpad);
    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);

    srcpad = gst_element_get_static_pad (encoder, "src");
    sinkpad = gst_element_get_request_pad (inputselect, "sink_%u");
    gst_pad_link (srcpad, sinkpad);
    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);
  }

  return result;
}
コード例 #22
0
static void
gst_spectrum_class_init (GstSpectrumClass * klass)
{
  GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
  GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
  GstBaseTransformClass *trans_class = GST_BASE_TRANSFORM_CLASS (klass);
  GstAudioFilterClass *filter_class = GST_AUDIO_FILTER_CLASS (klass);
  GstCaps *caps;

  gobject_class->set_property = gst_spectrum_set_property;
  gobject_class->get_property = gst_spectrum_get_property;
  gobject_class->finalize = gst_spectrum_finalize;

  trans_class->start = GST_DEBUG_FUNCPTR (gst_spectrum_start);
  trans_class->stop = GST_DEBUG_FUNCPTR (gst_spectrum_stop);
  trans_class->transform_ip = GST_DEBUG_FUNCPTR (gst_spectrum_transform_ip);
  trans_class->passthrough_on_same_caps = TRUE;

  filter_class->setup = GST_DEBUG_FUNCPTR (gst_spectrum_setup);

  g_object_class_install_property (gobject_class, PROP_POST_MESSAGES,
      g_param_spec_boolean ("post-messages", "Post Messages",
          "Whether to post a 'spectrum' element message on the bus for each "
          "passed interval", DEFAULT_POST_MESSAGES,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_MESSAGE_MAGNITUDE,
      g_param_spec_boolean ("message-magnitude", "Magnitude",
          "Whether to add a 'magnitude' field to the structure of any "
          "'spectrum' element messages posted on the bus",
          DEFAULT_MESSAGE_MAGNITUDE,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_MESSAGE_PHASE,
      g_param_spec_boolean ("message-phase", "Phase",
          "Whether to add a 'phase' field to the structure of any "
          "'spectrum' element messages posted on the bus",
          DEFAULT_MESSAGE_PHASE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_INTERVAL,
      g_param_spec_uint64 ("interval", "Interval",
          "Interval of time between message posts (in nanoseconds)",
          1, G_MAXUINT64, DEFAULT_INTERVAL,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_BANDS,
      g_param_spec_uint ("bands", "Bands", "Number of frequency bands",
          0, G_MAXUINT, DEFAULT_BANDS,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_THRESHOLD,
      g_param_spec_int ("threshold", "Threshold",
          "dB threshold for result. All lower values will be set to this",
          G_MININT, 0, DEFAULT_THRESHOLD,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_MULTI_CHANNEL,
      g_param_spec_boolean ("multi-channel", "Multichannel results",
          "Send separate results for each channel",
          DEFAULT_MULTI_CHANNEL, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  GST_DEBUG_CATEGORY_INIT (gst_spectrum_debug, "spectrum", 0,
      "audio spectrum analyser element");

  gst_element_class_set_static_metadata (element_class, "Spectrum analyzer",
      "Filter/Analyzer/Audio",
      "Run an FFT on the audio signal, output spectrum data",
      "Erik Walthinsen <*****@*****.**>, "
      "Stefan Kost <*****@*****.**>, "
      "Sebastian Dröge <*****@*****.**>");

  caps = gst_caps_from_string (ALLOWED_CAPS);
  gst_audio_filter_class_add_pad_templates (filter_class, caps);
  gst_caps_unref (caps);
}
コード例 #23
0
ファイル: ges-pitivi-formatter.c プロジェクト: cfoch/ges
static void
make_source (GESFormatter * self, GList * reflist, GHashTable * source_table)
{
  GHashTable *props_table, *effect_table;
  gchar **prio_array;
  GESLayer *layer;
  GESPitiviFormatterPrivate *priv = GES_PITIVI_FORMATTER (self)->priv;

  gchar *fac_ref = NULL, *media_type = NULL, *filename = NULL, *prio_str;
  GList *tmp = NULL, *keys, *tmp_key;
  GESUriClip *src = NULL;
  gint prio;
  gboolean a_avail = FALSE, v_avail = FALSE, video;
  GHashTable *trackelement_table = priv->track_elements_table;

  for (tmp = reflist; tmp; tmp = tmp->next) {

    /* Get the layer */
    props_table = g_hash_table_lookup (trackelement_table, (gchar *) tmp->data);
    prio_str = (gchar *) g_hash_table_lookup (props_table, "priority");
    prio_array = g_strsplit (prio_str, ")", 0);
    prio = (gint) g_ascii_strtod (prio_array[1], NULL);
    g_strfreev (prio_array);

    /* If we do not have any layer with this priority, create it */
    if (!(layer = g_hash_table_lookup (priv->layers_table, &prio))) {
      layer = ges_layer_new ();
      g_object_set (layer, "auto-transition", TRUE, "priority", prio, NULL);
      ges_timeline_add_layer (self->timeline, layer);
      g_hash_table_insert (priv->layers_table, g_memdup (&prio,
              sizeof (guint64)), layer);
    }

    fac_ref = (gchar *) g_hash_table_lookup (props_table, "fac_ref");
    media_type = (gchar *) g_hash_table_lookup (props_table, "media_type");

    if (!g_strcmp0 (media_type, "pitivi.stream.VideoStream"))
      video = TRUE;
    else
      video = FALSE;

    /* FIXME I am sure we could reimplement this whole part
     * in a simpler way */

    if (g_strcmp0 (fac_ref, (gchar *) "effect")) {
      /* FIXME this is a hack to get a ref to the formatter when receiving
       * child-added */
      g_hash_table_insert (props_table, (gchar *) "current-formatter", self);
      if (a_avail && (!video)) {
        a_avail = FALSE;
      } else if (v_avail && (video)) {
        v_avail = FALSE;
      } else {

        /* If we only have audio or only video in the previous source,
         * set it has such */
        if (a_avail) {
          ges_clip_set_supported_formats (GES_CLIP (src), GES_TRACK_TYPE_VIDEO);
        } else if (v_avail) {
          ges_clip_set_supported_formats (GES_CLIP (src), GES_TRACK_TYPE_AUDIO);
        }

        filename = (gchar *) g_hash_table_lookup (source_table, "filename");

        src = ges_uri_clip_new (filename);

        if (!video) {
          v_avail = TRUE;
          a_avail = FALSE;
        } else {
          a_avail = TRUE;
          v_avail = FALSE;
        }

        set_properties (G_OBJECT (src), props_table);
        ges_layer_add_clip (layer, GES_CLIP (src));

        g_signal_connect (src, "child-added",
            G_CALLBACK (track_element_added_cb), props_table);

        priv->sources_to_load = g_list_prepend (priv->sources_to_load, src);
      }

    } else {
      GESEffect *effect;
      gchar *active = (gchar *) g_hash_table_lookup (props_table, "active");

      effect = ges_effect_new ((gchar *)
          g_hash_table_lookup (props_table, (gchar *) "effect_name"));
      ges_track_element_set_track_type (GES_TRACK_ELEMENT (effect),
          (video ? GES_TRACK_TYPE_VIDEO : GES_TRACK_TYPE_AUDIO));
      effect_table =
          g_hash_table_lookup (props_table, (gchar *) "effect_props");

      ges_container_add (GES_CONTAINER (src), GES_TIMELINE_ELEMENT (effect));

      if (!g_strcmp0 (active, (gchar *) "(bool)False"))
        ges_track_element_set_active (GES_TRACK_ELEMENT (effect), FALSE);

      /* Set effect properties */
      keys = g_hash_table_get_keys (effect_table);
      for (tmp_key = keys; tmp_key; tmp_key = tmp_key->next) {
        GstStructure *structure;
        const GValue *value;
        GParamSpec *spec;
        GstCaps *caps;
        gchar *prop_val;

        prop_val = (gchar *) g_hash_table_lookup (effect_table,
            (gchar *) tmp_key->data);

        if (g_strstr_len (prop_val, -1, "(GEnum)")) {
          gchar **val = g_strsplit (prop_val, ")", 2);

          ges_track_element_set_child_properties (GES_TRACK_ELEMENT (effect),
              (gchar *) tmp_key->data, atoi (val[1]), NULL);
          g_strfreev (val);

        } else if (ges_track_element_lookup_child (GES_TRACK_ELEMENT (effect),
                (gchar *) tmp->data, NULL, &spec)) {
          gchar *caps_str = g_strdup_printf ("structure1, property1=%s;",
              prop_val);

          caps = gst_caps_from_string (caps_str);
          g_free (caps_str);
          structure = gst_caps_get_structure (caps, 0);
          value = gst_structure_get_value (structure, "property1");

          ges_track_element_set_child_property_by_pspec (GES_TRACK_ELEMENT
              (effect), spec, (GValue *) value);
          gst_caps_unref (caps);
        }
      }
    }
  }

  if (a_avail) {
    ges_clip_set_supported_formats (GES_CLIP (src), GES_TRACK_TYPE_VIDEO);
  } else if (v_avail) {
    ges_clip_set_supported_formats (GES_CLIP (src), GES_TRACK_TYPE_AUDIO);
  }
}
コード例 #24
0
/**
 * parse_encoding_profile:
 * @in: a #GKeyFile
 * @parentprofilename: the parent profile name (including 'profile-' or 'streamprofile-' header)
 * @profilename: the profile name group to parse
 * @nbgroups: the number of top-level groups
 * @groups: the top-level groups
 */
static GstEncodingProfile *
parse_encoding_profile (GKeyFile * in, gchar * parentprofilename,
    gchar * profilename, gsize nbgroups, gchar ** groups)
{
  GstEncodingProfile *sprof = NULL;
  gchar **parent;
  gchar *proftype, *format, *preset, *restriction, *pname, *description,
      *locale;
  GstCaps *formatcaps = NULL;
  GstCaps *restrictioncaps = NULL;
  gboolean variableframerate;
  gint pass, presence;
  gsize i, nbencprofiles;

  GST_DEBUG ("parentprofilename : %s , profilename : %s",
      parentprofilename, profilename);

  if (parentprofilename) {
    gboolean found = FALSE;

    parent =
        g_key_file_get_string_list (in, profilename, "parent",
        &nbencprofiles, NULL);
    if (!parent || !nbencprofiles) {
      return NULL;
    }

    /* Check if this streamprofile is used in <profilename> */
    for (i = 0; i < nbencprofiles; i++) {
      if (!g_strcmp0 (parent[i], parentprofilename)) {
        found = TRUE;
        break;
      }
    }
    g_strfreev (parent);

    if (!found) {
      GST_DEBUG ("Stream profile '%s' isn't used in profile '%s'",
          profilename, parentprofilename);
      return NULL;
    }
  }

  pname = g_key_file_get_value (in, profilename, "name", NULL);

  locale = get_locale ();
  /* will try to fall back to untranslated string if no translation found */
  description = g_key_file_get_locale_string (in, profilename,
      "description", locale, NULL);
  g_free (locale);

  /* Note: a missing description is normal for non-container profiles */
  if (description == NULL) {
    GST_LOG ("Missing 'description' field for streamprofile %s", profilename);
  }

  /* Parse the remaining fields */
  proftype = g_key_file_get_value (in, profilename, "type", NULL);
  if (!proftype) {
    GST_WARNING ("Missing 'type' field for streamprofile %s", profilename);
    return NULL;
  }

  format = g_key_file_get_value (in, profilename, "format", NULL);
  if (format) {
    formatcaps = gst_caps_from_string (format);
    g_free (format);
  }

  preset = g_key_file_get_value (in, profilename, "preset", NULL);

  restriction = g_key_file_get_value (in, profilename, "restriction", NULL);
  if (restriction) {
    restrictioncaps = gst_caps_from_string (restriction);
    g_free (restriction);
  }

  presence = g_key_file_get_integer (in, profilename, "presence", NULL);
  pass = g_key_file_get_integer (in, profilename, "pass", NULL);
  variableframerate =
      g_key_file_get_boolean (in, profilename, "variableframerate", NULL);

  /* Build the streamprofile ! */
  if (!g_strcmp0 (proftype, "container")) {
    GstEncodingProfile *pprof;

    sprof =
        (GstEncodingProfile *) gst_encoding_container_profile_new (pname,
        description, formatcaps, preset);
    /* Now look for the stream profiles */
    for (i = 0; i < nbgroups; i++) {
      if (!g_ascii_strncasecmp (groups[i], "streamprofile-", 13)) {
        pprof = parse_encoding_profile (in, pname, groups[i], nbgroups, groups);
        if (pprof) {
          gst_encoding_container_profile_add_profile (
              (GstEncodingContainerProfile *) sprof, pprof);
        }
      }
    }
  } else if (!g_strcmp0 (proftype, "video")) {
    sprof =
        (GstEncodingProfile *) gst_encoding_video_profile_new (formatcaps,
        preset, restrictioncaps, presence);
    gst_encoding_video_profile_set_variableframerate ((GstEncodingVideoProfile
            *) sprof, variableframerate);
    gst_encoding_video_profile_set_pass ((GstEncodingVideoProfile *) sprof,
        pass);
    gst_encoding_profile_set_name (sprof, pname);
    gst_encoding_profile_set_description (sprof, description);
  } else if (!g_strcmp0 (proftype, "audio")) {
    sprof =
        (GstEncodingProfile *) gst_encoding_audio_profile_new (formatcaps,
        preset, restrictioncaps, presence);
    gst_encoding_profile_set_name (sprof, pname);
    gst_encoding_profile_set_description (sprof, description);
  } else
    GST_ERROR ("Unknown profile format '%s'", proftype);

  if (restrictioncaps)
    gst_caps_unref (restrictioncaps);
  if (formatcaps)
    gst_caps_unref (formatcaps);

  g_free (pname);
  g_free (description);
  g_free (preset);
  g_free (proftype);

  return sprof;
}
コード例 #25
0
ファイル: videoscale.c プロジェクト: PeterXu/gst-mobile
static void
_test_negotiation (const gchar * src_templ, const gchar * sink_templ,
    gint width, gint height, gint par_n, gint par_d)
{
  GstElement *pipeline;
  GstElement *src, *capsfilter1, *scale, *capsfilter2, *sink;
  GstBus *bus;
  GMainLoop *loop;
  GstCaps *caps;
  TestNegotiationData data = { 0, 0, 0, 0, FALSE, NULL };
  GstPad *pad;

  GST_DEBUG ("Running test for src templ caps '%s' and sink templ caps '%s'",
      src_templ, sink_templ);

  pipeline = gst_element_factory_make ("pipeline", "pipeline");
  fail_unless (pipeline != NULL);

  src = gst_element_factory_make ("videotestsrc", "src");
  fail_unless (src != NULL);
  g_object_set (G_OBJECT (src), "num-buffers", 1, NULL);

  capsfilter1 = gst_element_factory_make ("capsfilter", "filter1");
  fail_unless (capsfilter1 != NULL);
  caps = gst_caps_from_string (src_templ);
  fail_unless (caps != NULL);
  g_object_set (G_OBJECT (capsfilter1), "caps", caps, NULL);
  gst_caps_unref (caps);

  scale = gst_element_factory_make ("videoscale", "scale");
  fail_unless (scale != NULL);

  capsfilter2 = gst_element_factory_make ("capsfilter", "filter2");
  fail_unless (capsfilter2 != NULL);
  caps = gst_caps_from_string (sink_templ);
  fail_unless (caps != NULL);
  g_object_set (G_OBJECT (capsfilter2), "caps", caps, NULL);
  gst_caps_unref (caps);

  pad = gst_element_get_static_pad (capsfilter2, "sink");
  fail_unless (pad != NULL);
  g_signal_connect (pad, "notify::caps",
      G_CALLBACK (_test_negotiation_notify_caps), &data);
  gst_object_unref (pad);

  sink = gst_element_factory_make ("fakesink", "sink");
  fail_unless (sink != NULL);
  g_object_set (sink, "async", FALSE, NULL);

  gst_bin_add_many (GST_BIN (pipeline), src, capsfilter1, scale, capsfilter2,
      sink, NULL);

  fail_unless (gst_element_link_pads_full (src, "src", capsfilter1, "sink",
          LINK_CHECK_FLAGS));
  fail_unless (gst_element_link_pads_full (capsfilter1, "src", scale, "sink",
          LINK_CHECK_FLAGS));
  fail_unless (gst_element_link_pads_full (scale, "src", capsfilter2, "sink",
          LINK_CHECK_FLAGS));
  fail_unless (gst_element_link_pads_full (capsfilter2, "src", sink, "sink",
          LINK_CHECK_FLAGS));

  loop = g_main_loop_new (NULL, FALSE);

  bus = gst_element_get_bus (pipeline);
  fail_unless (bus != NULL);
  gst_bus_add_signal_watch (bus);

  data.loop = loop;
  data.width = width;
  data.height = height;
  data.par_n = par_n;
  data.par_d = par_d;
  data.ok = FALSE;

  g_signal_connect (bus, "message", G_CALLBACK (_test_negotiation_message),
      &data);

  gst_object_unref (bus);

  fail_unless (gst_element_set_state (pipeline,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS);

  g_main_loop_run (loop);

  fail_unless (data.ok == TRUE);

  fail_unless (gst_element_set_state (pipeline,
          GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS);

  gst_object_unref (pipeline);
  g_main_loop_unref (loop);
}
コード例 #26
0
static GstCaps *
_custom_video_decoder_getcaps (GstVideoDecoder * dec, GstCaps * filter)
{
  return gst_caps_from_string (GETCAPS_CAPS_STR);
}
コード例 #27
0
ファイル: CQmedia.cpp プロジェクト: zhangxuran11/RoomMedia
CQMedia::CQMedia(int winID,QWidget* win, DevType type,QObject* parent):
    CQGstBasic(winID,win,parent)
{
    if(type == ROOM||type == SUBVIDEO)
        qosMax = 20;
    else if(type == CAR)
        qosMax = 20;
    devType = type;
    silenceState = false;
    this->pipeline=gst_pipeline_new( "pipeline" );
    //构造各组件
    GstElement *udpsrc =  gst_element_factory_make("udpsrc","udpsrc");
    g_object_set(G_OBJECT(udpsrc),"do-timestamp",false,NULL);
    g_object_set(G_OBJECT(udpsrc),"timeout",(quint64)2000000,NULL);
    GstElement *queue0 = gst_element_factory_make("queue","queue0");
    g_object_set(G_OBJECT(queue0),"max-size-time",(guint64)0,NULL);
    GstElement *rtpmp2tdepay  =  gst_element_factory_make("rtpmp2tdepay","rtpmp2tdepay");

    GstElement *queue1 = gst_element_factory_make("queue","queue1");
    g_object_set(G_OBJECT(queue1),"max-size-time",(guint64)0,NULL);
    GstElement *aiurdemux = gst_element_factory_make("aiurdemux","aiurdemux");
    //g_object_set(G_OBJECT(aiurdemux),"streaming_latency",(guint64)5000,NULL);
    GstElement *queue2 = gst_element_factory_make("queue","queue2");
    g_object_set(G_OBJECT(queue2),"max-size-time",(guint64)0,NULL);
#ifdef ARM
    GstElement *v_dec = gst_element_factory_make("mfw_vpudecoder","v_dec");
//    g_object_set(G_OBJECT(v_dec),"parser",false,"dbkenable",false,"profiling",false,
//                 "framedrop",true,"min-latency",true,"fmt",(guint64)0,NULL);
    GstElement *v_sink  = gst_element_factory_make("imxv4l2sink","v_sink");
    g_object_set(G_OBJECT(v_sink),"sync",true,"x11enable",true,NULL);
#endif
#ifdef X86
    GstElement *v_dec = gst_element_factory_make("ffdec_h264","v_dec");
    GstElement *colorspace =  gst_element_factory_make("ffmpegcolorspace","colorspace");
    GstElement *v_sink  = gst_element_factory_make("ximagesink ","v_sink ");
    g_object_set(G_OBJECT(v_sink),"sync",true,NULL);
#endif
    GstElement *queue3 = gst_element_factory_make("queue","queue3");
    g_object_set(G_OBJECT(queue3),"max-size-time",(guint64)0,NULL);

#ifdef ARM
    GstElement *a_dec = gst_element_factory_make("beepdec","a_dec");
#endif

#ifdef X86
    GstElement *a_dec = gst_element_factory_make("ffdec_aac","a_dec");
#endif

    GstElement *a_conv = gst_element_factory_make("audioconvert","audioconvert");
    GstElement *capsfilter = gst_element_factory_make("capsfilter","capsfilter");
    GstCaps *a_caps = gst_caps_from_string("audio/x-raw-int, channels=2");
    g_object_set(G_OBJECT(capsfilter),"caps",a_caps,NULL);
    GstElement *audiovol = gst_element_factory_make("volume","audiovol");
    GstElement *a_sink = gst_element_factory_make("alsasink","a_sink");
    g_object_set(G_OBJECT(a_sink),"sync",true,NULL);
//串连组件
#ifdef ARM
    if(pipeline==NULL||udpsrc==NULL||queue0==NULL||rtpmp2tdepay==NULL||queue1==NULL||aiurdemux==NULL||
            queue2==NULL||v_dec==NULL||v_sink==NULL||queue3==NULL||a_dec==NULL||a_conv==NULL||capsfilter==NULL||audiovol==NULL||a_sink==NULL)
    {
        qDebug("video create failed\n");
    }
    gst_bin_add_many (GST_BIN (pipeline), udpsrc,queue0,rtpmp2tdepay,queue1,aiurdemux,
                      queue2,v_dec,queue3,a_dec,a_conv,capsfilter,audiovol,a_sink,v_sink,NULL);
    gst_element_link_many ( udpsrc,queue0,rtpmp2tdepay,queue1,aiurdemux,NULL);
    gst_element_link_many ( queue2,v_dec,v_sink,NULL);
    g_signal_connect (aiurdemux, "pad-added", G_CALLBACK (pad_added_handler),queue2);
    gst_element_link_many ( queue3,a_dec,a_conv,capsfilter,audiovol,a_sink,NULL);
    g_signal_connect (aiurdemux, "pad-added", G_CALLBACK (pad_added_handler), queue3);
#endif
#ifdef X86
    if(pipeline==NULL||udpsrc==NULL||queue0==NULL||rtpmp2tdepay==NULL||queue1==NULL||aiurdemux==NULL||
            queue2==NULL||v_dec==NULL||colorspace==NULL||v_sink==NULL||queue3==NULL||a_dec==NULL||a_conv==NULL||capsfilter==NULL||audiovol==NULL||a_sink==NULL)
    {
        qDebug("video create failed\n");
    }
    gst_bin_add_many (GST_BIN (pipeline), udpsrc,queue0,rtpmp2tdepay,queue1,aiurdemux,
                      queue2,v_dec,colorspace,v_sink,queue3,a_dec,a_conv,capsfilter,audiovol,a_sink,NULL);
    gst_element_link_many ( udpsrc,queue0,rtpmp2tdepay,queue1,aiurdemux,NULL);

    gst_element_link_many ( queue2,v_dec,colorspace,v_sink,NULL);
    g_signal_connect (aiurdemux, "pad-added", G_CALLBACK (pad_added_handler), queue2);
    gst_element_link_many ( queue3,a_dec,a_conv,capsfilter,audiovol,a_sink,NULL);
    g_signal_connect (aiurdemux, "pad-added", G_CALLBACK (pad_added_handler), queue3);

#endif


        //添加消息监听函数
    GstBus* bus=gst_pipeline_get_bus(GST_PIPELINE(pipeline));
    gst_bus_add_watch(bus,bus_callback,this);
    gst_object_unref(bus);
    gst_element_set_state(pipeline,GST_STATE_NULL);
    this->state=STOPPED;
}
コード例 #28
0
ファイル: servicemp3record.cpp プロジェクト: Open-Plus/opgui
int eServiceMP3Record::doPrepare()
{
	if (m_state == stateIdle)
	{
		gchar *uri;
		size_t pos = m_ref.path.find('#');
		std::string stream_uri;
		if (pos != std::string::npos && (m_ref.path.compare(0, 4, "http") == 0 || m_ref.path.compare(0, 4, "rtsp") == 0))
		{
			stream_uri = m_ref.path.substr(0, pos);
			m_extra_headers = m_ref.path.substr(pos + 1);

			pos = m_extra_headers.find("User-Agent=");
			if (pos != std::string::npos)
			{
				size_t hpos_start = pos + 11;
				size_t hpos_end = m_extra_headers.find('&', hpos_start);
				if (hpos_end != std::string::npos)
					m_useragent = m_extra_headers.substr(hpos_start, hpos_end - hpos_start);
				else
					m_useragent = m_extra_headers.substr(hpos_start);
			}

		}
		else
		{
			stream_uri = m_ref.path;
		}
		eDebug("[eMP3ServiceRecord] doPrepare uri=%s", stream_uri.c_str());
		uri = g_strdup_printf ("%s", stream_uri.c_str());

		m_recording_pipeline = gst_pipeline_new ("recording-pipeline");
		m_source = gst_element_factory_make("uridecodebin", "uridec");
		GstElement* sink = gst_element_factory_make("filesink", "fsink");

		// set uridecodebin properties and notify
		g_object_set(m_source, "uri", uri, NULL);
		g_object_set(m_source, "caps", gst_caps_from_string("video/mpegts;video/x-flv;video/x-matroska;video/quicktime;video/x-msvideo;video/x-ms-asf;audio/mpeg;audio/x-flac;audio/x-ac3"), NULL);
		g_signal_connect(m_source, "notify::source", G_CALLBACK(handleUridecNotifySource), this);
		g_signal_connect(m_source, "pad-added", G_CALLBACK(handlePadAdded), sink);
		g_signal_connect(m_source, "autoplug-continue", G_CALLBACK(handleAutoPlugCont), this);

		// set sink properties
		g_object_set(sink, "location", m_filename.c_str(), NULL);

		g_free(uri);
		if (m_recording_pipeline && m_source && sink)
		{
			gst_bin_add_many(GST_BIN(m_recording_pipeline), m_source, sink, NULL);

			GstBus *bus = gst_pipeline_get_bus(GST_PIPELINE(m_recording_pipeline));
#if GST_VERSION_MAJOR < 1
			gst_bus_set_sync_handler(bus, gstBusSyncHandler, this);
#else
			gst_bus_set_sync_handler(bus, gstBusSyncHandler, this, NULL);
#endif
			gst_object_unref(bus);
		}
		else
		{
			m_recording_pipeline = 0;
			eDebug("[eServiceMP3Record] doPrepare Sorry, cannot record: Failed to create GStreamer pipeline!");
			return -1;
		}
	}
	return 0;
}
コード例 #29
0
int
main (int argc, char **argv)
{
    static const GOptionEntry test_goptions[] = {
        {   "videosink", '\0', 0, G_OPTION_ARG_STRING, &opt_videosink_str,
            "videosink to use (default: " DEFAULT_VIDEOSINK ")", NULL
        },
        {   "caps", '\0', 0, G_OPTION_ARG_STRING, &opt_filtercaps_str,
            "filter caps to narrow down formats to test", NULL
        },
        {   "with-ffmpegcolorspace", '\0', 0, G_OPTION_ARG_NONE,
            &opt_with_ffmpegcolorspace,
            "whether to add an ffmpegcolorspace element in front of the sink",
            NULL
        },
        {NULL, '\0', 0, 0, NULL, NULL, NULL}
    };
    GOptionContext *ctx;
    GError *opt_err = NULL;

    GstElement *pipeline, *src, *filter1, *crop, *scale, *filter2, *csp, *sink;
    GMainLoop *loop;
    GstCaps *filter_caps = NULL;
    GList *caps_list, *l;

    if (!g_thread_supported ())
        g_thread_init (NULL);

    /* command line option parsing */
    ctx = g_option_context_new ("");
    g_option_context_add_group (ctx, gst_init_get_option_group ());
    g_option_context_add_main_entries (ctx, test_goptions, NULL);

    if (!g_option_context_parse (ctx, &argc, &argv, &opt_err)) {
        g_error ("Error parsing command line options: %s", opt_err->message);
        return -1;
    }

    GST_DEBUG_CATEGORY_INIT (videocrop_test_debug, "videocroptest", 0, "vctest");

    loop = g_main_loop_new (NULL, FALSE);

    pipeline = gst_pipeline_new ("pipeline");
    src = gst_element_factory_make ("videotestsrc", "videotestsrc");
    g_assert (src != NULL);
    filter1 = gst_element_factory_make ("capsfilter", "capsfilter1");
    g_assert (filter1 != NULL);
    crop = gst_element_factory_make ("videocrop", "videocrop");
    g_assert (crop != NULL);
    scale = gst_element_factory_make ("videoscale", "videoscale");
    g_assert (scale != NULL);
    filter2 = gst_element_factory_make ("capsfilter", "capsfilter2");
    g_assert (filter2 != NULL);

    if (opt_with_ffmpegcolorspace) {
        g_print ("Adding ffmpegcolorspace\n");
        csp = gst_element_factory_make ("ffmpegcolorspace", "colorspace");
    } else {
        csp = gst_element_factory_make ("identity", "colorspace");
    }
    g_assert (csp != NULL);

    if (opt_filtercaps_str) {
        filter_caps = gst_caps_from_string (opt_filtercaps_str);
        if (filter_caps == NULL) {
            g_error ("Invalid filter caps string '%s'", opt_filtercaps_str);
        } else {
            g_print ("Using filter caps '%s'\n", opt_filtercaps_str);
        }
    }

    if (opt_videosink_str) {
        g_print ("Trying videosink '%s' ...", opt_videosink_str);
        sink = gst_element_factory_make (opt_videosink_str, "sink");
        g_print ("%s\n", (sink) ? "ok" : "element couldn't be created");
    } else {
        sink = NULL;
    }

    if (sink == NULL) {
        g_print ("Trying videosink '%s' ...", DEFAULT_VIDEOSINK);
        sink = gst_element_factory_make (DEFAULT_VIDEOSINK, "sink");
        g_print ("%s\n", (sink) ? "ok" : "element couldn't be created");
    }
    if (sink == NULL) {
        g_print ("Trying videosink '%s' ...", "xvimagesink");
        sink = gst_element_factory_make ("xvimagesink", "sink");
        g_print ("%s\n", (sink) ? "ok" : "element couldn't be created");
    }
    if (sink == NULL) {
        g_print ("Trying videosink '%s' ...", "ximagesink");
        sink = gst_element_factory_make ("ximagesink", "sink");
        g_print ("%s\n", (sink) ? "ok" : "element couldn't be created");
    }

    g_assert (sink != NULL);

    gst_bin_add_many (GST_BIN (pipeline), src, filter1, crop, scale, filter2,
                      csp, sink, NULL);

    if (!gst_element_link (src, filter1))
        g_error ("Failed to link videotestsrc to capsfilter1");

    if (!gst_element_link (filter1, crop))
        g_error ("Failed to link capsfilter1 to videocrop");

    if (!gst_element_link (crop, scale))
        g_error ("Failed to link videocrop to videoscale");

    if (!gst_element_link (scale, filter2))
        g_error ("Failed to link videoscale to capsfilter2");

    if (!gst_element_link (filter2, csp))
        g_error ("Failed to link capsfilter2 to ffmpegcolorspace");

    if (!gst_element_link (csp, sink))
        g_error ("Failed to link ffmpegcolorspace to video sink");

    caps_list = video_crop_get_test_caps (crop);
    for (l = caps_list; l != NULL; l = l->next) {
        GstStateChangeReturn ret;
        GstCaps *caps, *out_caps;
        gboolean skip = FALSE;
        gchar *s;

        if (filter_caps) {
            GstCaps *icaps;

            icaps = gst_caps_intersect (filter_caps, GST_CAPS (l->data));
            skip = gst_caps_is_empty (icaps);
            gst_caps_unref (icaps);
        }

        /* this is the size of our window (stays fixed) */
        out_caps = gst_caps_copy (GST_CAPS (l->data));
        gst_structure_set (gst_caps_get_structure (out_caps, 0), "width",
                           G_TYPE_INT, OUT_WIDTH, "height", G_TYPE_INT, OUT_HEIGHT, NULL);

        g_object_set (filter2, "caps", out_caps, NULL);

        /* filter1 gets these too to prevent videotestsrc from renegotiating */
        g_object_set (filter1, "caps", out_caps, NULL);
        gst_caps_unref (out_caps);

        caps = gst_caps_copy (GST_CAPS (l->data));
        GST_INFO ("testing format: %" GST_PTR_FORMAT, caps);

        s = gst_caps_to_string (caps);

        if (skip) {
            g_print ("Skipping format: %s\n", s);
            g_free (s);
            continue;
        }

        g_print ("Format: %s\n", s);

        caps = gst_caps_make_writable (caps);

        /* FIXME: check return values */
        ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
        if (ret != GST_STATE_CHANGE_FAILURE) {
            ret = gst_element_get_state (pipeline, NULL, NULL, -1);

            if (ret != GST_STATE_CHANGE_FAILURE) {
                test_with_caps (src, crop, caps);
            } else {
                g_print ("Format: %s not supported (failed to go to PLAYING)\n", s);
            }
        } else {
            g_print ("Format: %s not supported\n", s);
        }

        gst_element_set_state (pipeline, GST_STATE_NULL);

        gst_caps_unref (caps);
        g_free (s);
    }

    g_list_foreach (caps_list, (GFunc) gst_caps_unref, NULL);
    g_list_free (caps_list);

    gst_element_set_state (pipeline, GST_STATE_NULL);
    gst_object_unref (pipeline);

    return 0;
}
コード例 #30
0
ファイル: multipartdemux.c プロジェクト: ChinnaSuhas/ossbuild
static GstMultipartPad *
gst_multipart_find_pad_by_mime (GstMultipartDemux * demux, gchar * mime,
    gboolean * created)
{
  GSList *walk;

  walk = demux->srcpads;
  while (walk) {
    GstMultipartPad *pad = (GstMultipartPad *) walk->data;

    if (!strcmp (pad->mime, mime)) {
      if (created) {
        *created = FALSE;
      }
      return pad;
    }

    walk = walk->next;
  }
  /* pad not found, create it */
  {
    GstPad *pad;
    GstMultipartPad *mppad;
    gchar *name;
    const gchar *capsname;
    GstCaps *caps;

    mppad = g_new0 (GstMultipartPad, 1);

    GST_DEBUG_OBJECT (demux, "creating pad with mime: %s", mime);

    name = g_strdup_printf ("src_%d", demux->numpads);
    pad =
        gst_pad_new_from_static_template (&multipart_demux_src_template_factory,
        name);
    g_free (name);

    /* take the mime type, convert it to the caps name */
    capsname = gst_multipart_demux_get_gstname (demux, mime);
    caps = gst_caps_from_string (capsname);
    GST_DEBUG_OBJECT (demux, "caps for pad: %s", capsname);
    gst_pad_use_fixed_caps (pad);
    gst_pad_set_caps (pad, caps);
    gst_caps_unref (caps);

    mppad->pad = pad;
    mppad->mime = g_strdup (mime);
    mppad->last_ret = GST_FLOW_OK;

    demux->srcpads = g_slist_prepend (demux->srcpads, mppad);
    demux->numpads++;

    gst_pad_set_active (pad, TRUE);
    gst_element_add_pad (GST_ELEMENT_CAST (demux), pad);

    if (created) {
      *created = TRUE;
    }

    if (demux->singleStream) {
        gst_element_no_more_pads(GST_ELEMENT_CAST (demux));
    }
    
    return mppad;
  }
}