void
_bp_parse_stream_info (BansheePlayer *player)
{
    gint audios_streams, video_streams, text_streams;
    GstPad *vpad = NULL;

    g_object_get (G_OBJECT (player->playbin), "n-audio", &audios_streams,
        "n-video", &video_streams, "n-text", &text_streams, NULL);

    if (video_streams) {
        gint i;
        /* Try to obtain a video pad */
        for (i = 0; i < video_streams && vpad == NULL; i++) {
            g_signal_emit_by_name (player->playbin, "get-video-pad", i, &vpad);
        }
    }

    if (G_LIKELY (vpad)) {
        GstCaps *caps = gst_pad_get_negotiated_caps (vpad);
        if (G_LIKELY (caps)) {
            cb_caps_set (G_OBJECT (vpad), NULL, player);
            gst_caps_unref (caps);
        }
        g_signal_connect (vpad, "notify::caps", G_CALLBACK (cb_caps_set), player);
        gst_object_unref (vpad);
    }
}
Beispiel #2
0
static void
update_stream_info(signal_user_data_t *ud)
{
    GList *vstreams, *ll;
    GstPad *vpad = NULL;

    vstreams = get_stream_info_objects_for_type(ud->preview->play, "video");
    if (vstreams)
    {
        for (ll = vstreams; vpad == NULL && ll != NULL; ll = ll->next)
        {
            g_object_get(ll->data, "object", &vpad, NULL);
        }
    }
    if (vpad)
    {
        GstCaps *caps;

        caps = gst_pad_get_negotiated_caps(vpad);
        if (caps)
        {
            caps_set(caps, ud);
            gst_caps_unref(caps);
        }
        //g_signal_connect(vpad, "notify::caps", G_CALLBACK(caps_set_cb), preview);
        gst_object_unref(vpad);
    }
    g_list_foreach(vstreams, (GFunc)g_object_unref, NULL);
    g_list_free(vstreams);
}
/* Retrieve the video sink's Caps and tell the application about the media size */
static void check_media_size (CustomData *data) {
  JNIEnv *env = get_jni_env ();
  GstElement *video_sink;
  GstPad *video_sink_pad;
  GstCaps *caps;
  GstVideoFormat fmt;
  int width;
  int height;

  /* Retrieve the Caps at the entrance of the video sink */
  g_object_get (data->pipeline, "video-sink", &video_sink, NULL);
  video_sink_pad = gst_element_get_static_pad (video_sink, "sink");
  caps = gst_pad_get_negotiated_caps (video_sink_pad);

  if (gst_video_format_parse_caps(caps, &fmt, &width, &height)) {
    int par_n, par_d;
    if (gst_video_parse_caps_pixel_aspect_ratio (caps, &par_n, &par_d)) {
      width = width * par_n / par_d;
    }
    GST_DEBUG ("Media size is %dx%d, notifying application", width, height);

    (*env)->CallVoidMethod (env, data->app, on_media_size_changed_method_id, (jint)width, (jint)height);
    if ((*env)->ExceptionCheck (env)) {
      GST_ERROR ("Failed to call Java method");
      (*env)->ExceptionClear (env);
    }
  }

  gst_caps_unref(caps);
  gst_object_unref (video_sink_pad);
  gst_object_unref(video_sink);
}
static GstStructure *
collect_stream_information (GstDiscoverer * dc, PrivateStream * ps, guint idx)
{
  GstCaps *caps;
  GstStructure *st;
  gchar *stname;

  stname = g_strdup_printf ("stream-%02d", idx);
  st = gst_structure_empty_new (stname);
  g_free (stname);

  /* Get caps */
  caps = gst_pad_get_negotiated_caps (ps->pad);
  if (!caps) {
    GST_WARNING ("Couldn't get negotiated caps from %s:%s",
        GST_DEBUG_PAD_NAME (ps->pad));
    caps = gst_pad_get_caps (ps->pad);
  }
  if (caps) {
    GST_DEBUG ("Got caps %" GST_PTR_FORMAT, caps);
    gst_structure_id_set (st, _CAPS_QUARK, GST_TYPE_CAPS, caps, NULL);

    gst_caps_unref (caps);
  }
  if (ps->tags)
    gst_structure_id_set (st, _TAGS_QUARK, GST_TYPE_STRUCTURE, ps->tags, NULL);

  return st;
}
Beispiel #5
0
static gboolean
get_file_info (const gchar * filename, gint * width, gint * height)
{
  GstElement *playbin = gst_element_factory_make ("playbin2", NULL);
  GstElement *fakesink = gst_element_factory_make ("fakesink", NULL);
  GstState state = GST_STATE_NULL;
  GstPad *pad;
  GstCaps *caps;
  gchar *uri = g_strdup_printf ("file://%s", filename);

  g_object_set (playbin, "video-sink", fakesink, NULL);
  g_object_set (playbin, "uri", uri, NULL);
  g_free (uri);

  gst_element_set_state (playbin, GST_STATE_PAUSED);

  gst_element_get_state (playbin, &state, NULL, GST_SECOND * 5);

  fail_unless (state == GST_STATE_PAUSED);

  g_signal_emit_by_name (playbin, "get-video-pad", 0, &pad, NULL);
  caps = gst_pad_get_negotiated_caps (pad);
  fail_unless (gst_structure_get_int (gst_caps_get_structure (caps, 0), "width",
          width));
  fail_unless (gst_structure_get_int (gst_caps_get_structure (caps, 0),
          "height", height));

  gst_object_unref (pad);
  gst_element_set_state (playbin, GST_STATE_NULL);
  gst_object_unref (playbin);
  return TRUE;
}
/* this tests that the output is a correct discontinuous stream
 * if the input is; ie input drops in time come out the same way */
static void
test_discont_stream_instance (int inrate, int outrate, int samples,
    int numbuffers)
{
  GstElement *audioresample;
  GstBuffer *inbuffer, *outbuffer;
  GstCaps *caps;
  GstClockTime ints;

  int i, j;
  gint16 *p;

  audioresample = setup_audioresample (2, inrate, outrate);
  caps = gst_pad_get_negotiated_caps (mysrcpad);
  fail_unless (gst_caps_is_fixed (caps));

  fail_unless (gst_element_set_state (audioresample,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS,
      "could not set to playing");

  for (j = 1; j <= numbuffers; ++j) {

    inbuffer = gst_buffer_new_and_alloc (samples * 4);
    GST_BUFFER_DURATION (inbuffer) = samples * GST_SECOND / inrate;
    /* "drop" half the buffers */
    ints = GST_BUFFER_DURATION (inbuffer) * 2 * (j - 1);
    GST_BUFFER_TIMESTAMP (inbuffer) = ints;
    GST_BUFFER_OFFSET (inbuffer) = (j - 1) * 2 * samples;
    GST_BUFFER_OFFSET_END (inbuffer) = j * 2 * samples + samples;

    gst_buffer_set_caps (inbuffer, caps);

    p = (gint16 *) GST_BUFFER_DATA (inbuffer);

    /* create a 16 bit signed ramp */
    for (i = 0; i < samples; ++i) {
      *p = -32767 + i * (65535 / samples);
      ++p;
      *p = -32767 + i * (65535 / samples);
      ++p;
    }

    /* pushing gives away my reference ... */
    fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK);

    /* check if the timestamp of the pushed buffer matches the incoming one */
    outbuffer = g_list_nth_data (buffers, g_list_length (buffers) - 1);
    fail_if (outbuffer == NULL);
    fail_unless_equals_uint64 (ints, GST_BUFFER_TIMESTAMP (outbuffer));
    if (j > 1) {
      fail_unless (GST_BUFFER_IS_DISCONT (outbuffer),
          "expected discont buffer");
    }
  }

  /* cleanup */
  gst_caps_unref (caps);
  cleanup_audioresample (audioresample);
}
/* this tests that the output is a perfect stream if the input is */
static void
test_perfect_stream_instance (int inrate, int outrate, int samples,
    int numbuffers)
{
  GstElement *audioresample;
  GstBuffer *inbuffer, *outbuffer;
  GstCaps *caps;
  guint64 offset = 0;

  int i, j;
  gint16 *p;

  audioresample = setup_audioresample (2, inrate, outrate, 16, FALSE);
  caps = gst_pad_get_negotiated_caps (mysrcpad);
  fail_unless (gst_caps_is_fixed (caps));

  fail_unless (gst_element_set_state (audioresample,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS,
      "could not set to playing");

  for (j = 1; j <= numbuffers; ++j) {

    inbuffer = gst_buffer_new_and_alloc (samples * 4);
    GST_BUFFER_DURATION (inbuffer) = GST_FRAMES_TO_CLOCK_TIME (samples, inrate);
    GST_BUFFER_TIMESTAMP (inbuffer) = GST_BUFFER_DURATION (inbuffer) * (j - 1);
    GST_BUFFER_OFFSET (inbuffer) = offset;
    offset += samples;
    GST_BUFFER_OFFSET_END (inbuffer) = offset;

    gst_buffer_set_caps (inbuffer, caps);

    p = (gint16 *) GST_BUFFER_DATA (inbuffer);

    /* create a 16 bit signed ramp */
    for (i = 0; i < samples; ++i) {
      *p = -32767 + i * (65535 / samples);
      ++p;
      *p = -32767 + i * (65535 / samples);
      ++p;
    }

    /* pushing gives away my reference ... */
    fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK);
    /* ... but it ends up being collected on the global buffer list */
    fail_unless_equals_int (g_list_length (buffers), j);
  }

  /* FIXME: we should make audioresample handle eos by flushing out the last
   * samples, which will give us one more, small, buffer */
  fail_if ((outbuffer = (GstBuffer *) buffers->data) == NULL);
  ASSERT_BUFFER_REFCOUNT (outbuffer, "outbuffer", 1);

  fail_unless_perfect_stream ();

  /* cleanup */
  gst_caps_unref (caps);
  cleanup_audioresample (audioresample);
}
Beispiel #8
0
static void
draw_puzzle (GstVideofilter * videofilter, void *destp, void *srcp)
{
  GstPuzzle *puzzle;
  int width, height;
  guint i;
  GstVideoImage dest, src;

  puzzle = GST_PUZZLE (videofilter);
  if (!puzzle->format) {
    puzzle->format =
        gst_video_format_find_by_structure (gst_caps_get_structure
        (gst_pad_get_negotiated_caps (videofilter->sinkpad), 0));
  }
  width = gst_videofilter_get_input_width (videofilter);
  height = gst_videofilter_get_input_height (videofilter);
  gst_video_image_setup (&dest, puzzle->format, destp, width, height);
  gst_video_image_setup (&src, puzzle->format, srcp, width, height);
  /* use multiples of 4 here to get around drawing problems with YUV colorspaces */
  width = (width / puzzle->columns) & ~3;
  height = (height / puzzle->rows) & ~3;
  if (width == 0 || height == 0) {
    gst_video_image_copy_area (&dest, 0, 0, &src, 0, 0,
        gst_videofilter_get_input_width (videofilter),
        gst_videofilter_get_input_height (videofilter));
    return;
  }
  if (width * puzzle->columns != gst_videofilter_get_input_width (videofilter)) {
    guint w =
        gst_videofilter_get_input_width (videofilter) - width * puzzle->columns;

    gst_video_image_copy_area (&dest, width * puzzle->columns, 0, &src,
        width * puzzle->columns, 0, w,
        gst_videofilter_get_input_height (videofilter));
  }
  if (height * puzzle->rows != gst_videofilter_get_input_height (videofilter)) {
    guint h =
        gst_videofilter_get_input_height (videofilter) - height * puzzle->rows;

    gst_video_image_copy_area (&dest, 0, height * puzzle->rows, &src, 0,
        height * puzzle->rows, gst_videofilter_get_input_width (videofilter),
        h);
  }

  for (i = 0; i < puzzle->tiles; i++) {
    if (!puzzle->solved && i == puzzle->position) {
      gst_video_image_draw_rectangle (&dest, width * (i % puzzle->columns),
          height * (i / puzzle->columns), width, height,
          &GST_VIDEO_COLOR_WHITE, TRUE);
    } else {
      gst_video_image_copy_area (&dest, width * (i % puzzle->columns),
          height * (i / puzzle->columns), &src,
          width * (puzzle->permutation[i] % puzzle->columns),
          height * (puzzle->permutation[i] / puzzle->columns), width, height);
    }
  }
}
/*
 * metadata_write_probe:
 * @pad: sink pad of metadata muxer
 * @buffer: received buffer
 * @u_data: image bin object
 *
 * Buffer probe that sets Xmp.dc.type and Xmp.dc.format tags
 * to metadata muxer based on preceding element src pad caps.
 *
 * Returns: TRUE always
 */
static gboolean
metadata_write_probe (GstPad * pad, GstBuffer * buffer, gpointer u_data)
{
  /* Add XMP tags */
  GstCameraBinImage *img = NULL;
  GstTagSetter *setter = NULL;
  GstPad *srcpad = NULL;
  GstCaps *caps = NULL;
  GstStructure *st = NULL;

  img = GST_CAMERABIN_IMAGE (u_data);

  g_return_val_if_fail (img != NULL, TRUE);

  if (GST_IS_TAG_SETTER (img->formatter)) {
    setter = GST_TAG_SETTER (img->formatter);
  }

  if (!setter) {
    GST_WARNING_OBJECT (img, "setting tags failed");
    goto done;
  }

  /* Xmp.dc.type tag */
  gst_tag_setter_add_tags (setter, GST_TAG_MERGE_REPLACE,
      GST_TAG_CODEC, "Image", NULL);
  /* Xmp.dc.format tag */
  if (img->enc) {
    srcpad = gst_element_get_static_pad (img->enc, "src");
  }
  GST_LOG_OBJECT (img, "srcpad:%" GST_PTR_FORMAT, srcpad);
  if (srcpad) {
    caps = gst_pad_get_negotiated_caps (srcpad);
    GST_LOG_OBJECT (img, "caps:%" GST_PTR_FORMAT, caps);
    if (caps) {
      /* If there are many structures, we can't know which one to use */
      if (gst_caps_get_size (caps) != 1) {
        GST_WARNING_OBJECT (img, "can't decide structure for format tag");
        goto done;
      }
      st = gst_caps_get_structure (caps, 0);
      if (st) {
        GST_DEBUG_OBJECT (img, "Xmp.dc.format:%s", gst_structure_get_name (st));
        gst_tag_setter_add_tags (setter, GST_TAG_MERGE_REPLACE,
            GST_TAG_VIDEO_CODEC, gst_structure_get_name (st), NULL);
      }
    }
  }
done:
  if (caps)
    gst_caps_unref (caps);
  if (srcpad)
    gst_object_unref (srcpad);

  return TRUE;
}
Beispiel #10
0
static void
update_stream_info (GthMediaViewerPage *self)
{
	GList  *streaminfo;
	GstPad *videopad;

	streaminfo = NULL;
	videopad = NULL;

	g_object_get (self->priv->playbin, "stream-info", &streaminfo, NULL);
	streaminfo = g_list_copy (streaminfo);
	g_list_foreach (streaminfo, (GFunc) g_object_ref, NULL);

	for (/* void */ ; streaminfo; streaminfo = streaminfo->next) {
		GObject    *info;
		int         type;
		GParamSpec *pspec;
		GEnumValue *val;

		info = streaminfo->data;
		if (info == NULL)
			continue;

                type = -1;

		g_object_get (info, "type", &type, NULL);
		pspec = g_object_class_find_property (G_OBJECT_GET_CLASS (info), "type");
		val = g_enum_get_value (G_PARAM_SPEC_ENUM (pspec)->enum_class, type);

		if (strcmp (val->value_nick, "audio") == 0) {
			self->priv->has_audio = TRUE;
		}
		else if (strcmp (val->value_nick, "video") == 0) {
			self->priv->has_video = TRUE;
			if (videopad == NULL)
				g_object_get (info, "object", &videopad, NULL);
		}
	}

	if (videopad != NULL) {
		GstCaps *caps;

		if ((caps = gst_pad_get_negotiated_caps (videopad)) != NULL) {
			GstStructure *structure;

			structure = gst_caps_get_structure (caps, 0);
			gst_structure_get_fraction (structure, "framerate", &self->priv->video_fps_n, &self->priv->video_fps_d);

			gst_caps_unref (caps);
		}
	}

	g_list_foreach (streaminfo, (GFunc) g_object_unref, NULL);
	g_list_free (streaminfo);
}
void test_preference_passthrough()
{
  GstStateChangeReturn ret;
  GstElement *pipeline, *src;
  GstStructure *s;
  GstMessage *msg;
  GstCaps *caps;
  GstPad *pad;
  GstBus *bus;
  GError *error = NULL;
  gint rate = 0;

  xmlfile = "test_preference_passthrough";
std_log(LOG_FILENAME_LINE, "Test Started test_preference_passthrough");
  pipeline = gst_parse_launch ("audiotestsrc num-buffers=1 name=src ! "
      "audioresample ! "
      "audio/x-raw-int,rate=8000,channels=1,width=16,depth=16,signed=(boolean)true,endianness=(int)BYTE_ORDER ! "
      "fakesink can-activate-pull=0 ", &error);
  fail_unless (pipeline != NULL, "Error parsing pipeline: %s",
      error ? error->message : "(invalid error)");

  ret = gst_element_set_state (pipeline, GST_STATE_PLAYING);
  fail_unless_equals_int (ret, GST_STATE_CHANGE_ASYNC);

  /* run until we receive EOS */
  bus = gst_element_get_bus (pipeline);
  fail_if (bus == NULL);
  msg = gst_bus_timed_pop_filtered (bus, -1, GST_MESSAGE_EOS);
  gst_message_unref (msg);
  gst_object_unref (bus);

  src = gst_bin_get_by_name (GST_BIN (pipeline), "src");
  fail_unless (src != NULL);
  pad = gst_element_get_static_pad (src, "src");
  fail_unless (pad != NULL);
  caps = gst_pad_get_negotiated_caps (pad);
  GST_LOG ("negotiated audiotestsrc caps: %" GST_PTR_FORMAT, caps);
  fail_unless (caps != NULL);
  s = gst_caps_get_structure (caps, 0);
  fail_unless (gst_structure_get_int (s, "rate", &rate));
  /* there's no need to resample, audiotestsrc supports any rate, so make
   * sure audioresample provided upstream with the right caps to negotiate
   * this correctly */
  fail_unless_equals_int (rate, 8000);
  gst_caps_unref (caps);
  gst_object_unref (pad);
  gst_object_unref (src);

  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (pipeline);
  
  std_log(LOG_FILENAME_LINE, "Test Successful");
  create_xml(0);
}
nsresult GStreamerReader::CheckSupportedFormats()
{
  bool done = false;
  bool unsupported = false;

  GstIterator *it = gst_bin_iterate_recurse(GST_BIN(mPlayBin));
  while (!done) {
    GstElement* element;
    GstIteratorResult res = gst_iterator_next(it, (void **)&element);
    switch(res) {
      case GST_ITERATOR_OK:
      {
        GstElementFactory* factory = gst_element_get_factory(element);
        if (factory) {
          const char* klass = gst_element_factory_get_klass(factory);
          GstPad* pad = gst_element_get_pad(element, "sink");
          if (pad) {
            GstCaps* caps = gst_pad_get_negotiated_caps(pad);

            if (caps) {
              /* check for demuxers but ignore elements like id3demux */
              if (strstr (klass, "Demuxer") && !strstr(klass, "Metadata"))
                unsupported = !GStreamerFormatHelper::Instance()->CanHandleContainerCaps(caps);
              else if (strstr (klass, "Decoder"))
                unsupported = !GStreamerFormatHelper::Instance()->CanHandleCodecCaps(caps);

              gst_caps_unref(caps);
            }
            gst_object_unref(pad);
          }
        }

        gst_object_unref(element);
        done = unsupported;
        break;
      }
      case GST_ITERATOR_RESYNC:
        unsupported = false;
        done = false;
        break;
      case GST_ITERATOR_ERROR:
        done = true;
        break;
      case GST_ITERATOR_DONE:
        done = true;
        break;
    }
  }

  return unsupported ? NS_ERROR_FAILURE : NS_OK;
}
Beispiel #13
0
void Pipeline::cb_video_size (GstPad* pad, GParamSpec* pspec, Pipeline* p)
{
    GstCaps* caps = gst_pad_get_negotiated_caps(pad);
    if (caps)
    {
        qDebug ("negotiated caps : %s", gst_caps_to_string(caps)) ;
        const GstStructure* str = gst_caps_get_structure (caps, 0);
        gint width = 0;
        gint height = 0;
        if (gst_structure_get_int (str, "width", &width)  &&
            gst_structure_get_int (str, "height", &height) )
        p->resize(width, height);
        gst_caps_unref(caps) ;
    }
}
void GStreamerReader::VideoPreroll()
{
  /* The first video buffer has reached the video sink. Get width and height */
  LOG(PR_LOG_DEBUG, ("Video preroll"));
  GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
  gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height);
  GstStructure* structure = gst_caps_get_structure(caps, 0);
  gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
  NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution");
  mInfo.mDisplay = nsIntSize(mPicture.width, mPicture.height);
  mInfo.mHasVideo = true;
  gst_caps_unref(caps);
  gst_object_unref(sinkpad);
}
Beispiel #15
0
static gboolean
gst_pad_is_negotiated (GstPad * pad)
{
  GstCaps *caps;

  g_return_val_if_fail (pad != NULL, FALSE);

  caps = gst_pad_get_negotiated_caps (pad);
  if (caps) {
    gst_caps_unref (caps);
    return TRUE;
  }

  return FALSE;
}
void QGstreamerVideoWidgetControl::updateNativeVideoSize()
{
    if (m_videoSink) {
        //find video native size to update video widget size hint
        GstPad *pad = gst_element_get_static_pad(m_videoSink,"sink");
        GstCaps *caps = gst_pad_get_negotiated_caps(pad);

        if (caps) {
            m_widget->setNativeSize(QGstUtils::capsCorrectedResolution(caps));
            gst_caps_unref(caps);
        }
    } else {
        if (m_widget)
            m_widget->setNativeSize(QSize());
    }
}
Beispiel #17
0
subtype_t getSubtitleType(GstPad* pad, gchar *g_codec=NULL)
{
	subtype_t type = stUnknown;
	GstCaps* caps = gst_pad_get_negotiated_caps(pad);
	if (!caps && !g_codec)
	{
		caps = gst_pad_get_allowed_caps(pad);
	}

	if ( caps )
	{
		GstStructure* str = gst_caps_get_structure(caps, 0);
		const gchar *g_type = gst_structure_get_name(str);
		eDebug("getSubtitleType::subtitle probe caps type=%s", g_type);

		if ( !strcmp(g_type, "video/x-dvd-subpicture") )
			type = stVOB;
		else if ( !strcmp(g_type, "text/x-pango-markup") )
			type = stSRT;
		else if ( !strcmp(g_type, "text/plain") )
			type = stPlainText;
		else if ( !strcmp(g_type, "subpicture/x-pgs") )
			type = stPGS;
		else
			eDebug("getSubtitleType::unsupported subtitle caps %s (%s)", g_type, g_codec);
	}
	else if ( g_codec )
	{
		eDebug("getSubtitleType::subtitle probe codec tag=%s", g_codec);
		if ( !strcmp(g_codec, "VOB") )
			type = stVOB;
		else if ( !strcmp(g_codec, "SubStation Alpha") || !strcmp(g_codec, "SSA") )
			type = stSSA;
		else if ( !strcmp(g_codec, "ASS") )
			type = stASS;
		else if ( !strcmp(g_codec, "SRT") )
			type = stSRT;
		else if ( !strcmp(g_codec, "UTF-8 plain text") )
			type = stPlainText;
		else
			eDebug("getSubtitleType::unsupported subtitle codec %s", g_codec);
	}
	else
		eDebug("getSubtitleType::unidentifiable subtitle stream!");

	return type;
}
//-------------------------------------------------------------------------------
// Name: GSTGetPadHeight
// Arguments: GstPad
// Description: Returns the video's pad height
// usage: int width = object->GSTGetPadheight (GstPad *)pPad);
//-------------------------------------------------------------------------------
gint GSTVideoControl::GSTGetPadHeight (GstPad* pVideoPad)
{
	gint iHeight = 0;
	GstCaps* pCaps = NULL;
	GstStructure* pStructure = NULL;

	pCaps = gst_pad_get_negotiated_caps (pVideoPad);
	if (pCaps)
	{
		pStructure = gst_caps_get_structure (pCaps, 0);
		gst_structure_get_int (pStructure, "height", &iHeight);
	}
	else
		g_print ("gst_pad_height() - Could not get caps for the pad!\n");

	return iHeight;
}
//-------------------------------------------------------------------------------
// Name: GSTGetPadWidth
// Arguments: GstPad
// Description: Returns the video's pad width
// usage: int width = object->GSTGetPadWidth (GstPad *)pPad);
//-------------------------------------------------------------------------------
gint GSTVideoControl::GSTGetPadWidth (GstPad* pVideoPad)
{
	gint iWidth = 0;
	GstCaps* pCaps = NULL;
	GstStructure* pStructure = NULL;

	pCaps = gst_pad_get_negotiated_caps (pVideoPad);
	if (pCaps)
	{
		pStructure = gst_caps_get_structure (pCaps, 0);
		gst_structure_get_int (pStructure, "width", &iWidth);
	}
	else
		g_print ("gst_pad_width() - Could not get caps for the pad!\n");

	return iWidth;
}
void GStreamerReader::AudioPreroll()
{
  /* The first audio buffer has reached the audio sink. Get rate and channels */
  LOG(PR_LOG_DEBUG, ("Audio preroll"));
  GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
  GstStructure* s = gst_caps_get_structure(caps, 0);
  mInfo.mAudioRate = mInfo.mAudioChannels = 0;
  gst_structure_get_int(s, "rate", (gint*) &mInfo.mAudioRate);
  gst_structure_get_int(s, "channels", (gint*) &mInfo.mAudioChannels);
  NS_ASSERTION(mInfo.mAudioRate != 0, ("audio rate is zero"));
  NS_ASSERTION(mInfo.mAudioChannels != 0, ("audio channels is zero"));
  NS_ASSERTION(mInfo.mAudioChannels > 0 && mInfo.mAudioChannels <= MAX_CHANNELS,
      "invalid audio channels number");
  mInfo.mHasAudio = true;
  gst_caps_unref(caps);
  gst_object_unref(sinkpad);
}
/**
 * overrides the default buffer allocation for output port to allow
 * pad_alloc'ing from the srcpad
 */
static GstBuffer *
buffer_alloc (GOmxPort *port, gint len)
{
    GstOmxBaseFilter2 *self = port->core->object;
    GstBuffer *buf;
    GstFlowReturn ret;
	int i;

	for (i = 0; i < NUM_OUTPUTS; i++)
		if (port == self->out_port[i]) break;
	if (i >= NUM_OUTPUTS) return NULL;

#if 1
    /** @todo remove this check */
    if (G_LIKELY (self->in_port->enabled))
    {
        GstCaps *caps = NULL;

        caps = gst_pad_get_negotiated_caps (self->srcpad[i]);

        if (!caps)
        {
            /** @todo We shouldn't be doing this. */
            GOmxCore *gomx = self->gomx;
            GST_WARNING_OBJECT (self, "faking settings changed notification");
            if (gomx->settings_changed_cb)
                gomx->settings_changed_cb (gomx);
        }
        else
        {
            GST_LOG_OBJECT (self, "caps already fixed: %" GST_PTR_FORMAT, caps);
            gst_caps_unref (caps);
        }
    }
#endif

    ret = gst_pad_alloc_buffer_and_set_caps (
            self->srcpad[i], GST_BUFFER_OFFSET_NONE,
            len, GST_PAD_CAPS (self->srcpad[i]), &buf);

    if (ret == GST_FLOW_OK) return buf;

    return NULL;
}
void test_live_switch()
{
  GstElement *audioresample;
  GstEvent *newseg;
  GstCaps *caps;
  xmlfile = "test_live_switch";
std_log(LOG_FILENAME_LINE, "Test Started test_live_switch");
  audioresample = setup_audioresample (4, 48000, 48000, 16, FALSE);

  /* Let the sinkpad act like something that can only handle things of
   * rate 48000- and can only allocate buffers for that rate, but if someone
   * tries to get a buffer with a rate higher then 48000 tries to renegotiate
   * */
  gst_pad_set_bufferalloc_function (mysinkpad, live_switch_alloc_only_48000);
  gst_pad_set_getcaps_function (mysinkpad, live_switch_get_sink_caps);

  gst_pad_use_fixed_caps (mysrcpad);

  caps = gst_pad_get_negotiated_caps (mysrcpad);
  fail_unless (gst_caps_is_fixed (caps));

  fail_unless (gst_element_set_state (audioresample,
          GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS,
      "could not set to playing");

  newseg = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, -1, 0);
  fail_unless (gst_pad_push_event (mysrcpad, newseg) != FALSE);

  /* downstream can provide the requested rate, a buffer alloc will be passed
   * on */
  live_switch_push (48000, caps);

  /* Downstream can never accept this rate, buffer alloc isn't passed on */
  live_switch_push (40000, caps);

  /* Downstream can provide the requested rate but will re-negotiate */
  live_switch_push (50000, caps);

  cleanup_audioresample (audioresample);
  gst_caps_unref (caps);
  
  std_log(LOG_FILENAME_LINE, "Test Successful");
  create_xml(0);
}
Beispiel #23
0
void GStreamerReader::VideoPreroll()
{
  /* The first video buffer has reached the video sink. Get width and height */
  LOG(PR_LOG_DEBUG, "Video preroll");
  GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mVideoAppSink), "sink");
  int PARNumerator, PARDenominator;
#if GST_VERSION_MAJOR >= 1
  GstCaps* caps = gst_pad_get_current_caps(sinkpad);
  memset (&mVideoInfo, 0, sizeof (mVideoInfo));
  gst_video_info_from_caps(&mVideoInfo, caps);
  mFormat = mVideoInfo.finfo->format;
  mPicture.width = mVideoInfo.width;
  mPicture.height = mVideoInfo.height;
  PARNumerator = GST_VIDEO_INFO_PAR_N(&mVideoInfo);
  PARDenominator = GST_VIDEO_INFO_PAR_D(&mVideoInfo);
#else
  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
  gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height);
  if (!gst_video_parse_caps_pixel_aspect_ratio(caps, &PARNumerator, &PARDenominator)) {
    PARNumerator = 1;
    PARDenominator = 1;
  }
#endif
  NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution");

  // Calculate display size according to pixel aspect ratio.
  nsIntRect pictureRect(0, 0, mPicture.width, mPicture.height);
  nsIntSize frameSize = nsIntSize(mPicture.width, mPicture.height);
  nsIntSize displaySize = nsIntSize(mPicture.width, mPicture.height);
  ScaleDisplayByAspectRatio(displaySize, float(PARNumerator) / float(PARDenominator));

  // If video frame size is overflow, stop playing.
  if (IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
    GstStructure* structure = gst_caps_get_structure(caps, 0);
    gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
    mInfo.mVideo.mDisplay = ThebesIntSize(displaySize.ToIntSize());
    mInfo.mVideo.mHasVideo = true;
  } else {
    LOG(PR_LOG_DEBUG, "invalid video region");
    Eos();
  }
  gst_caps_unref(caps);
  gst_object_unref(sinkpad);
}
void QGstreamerPlayerSession::updateVideoResolutionTag()
{
    QSize size;
    QSize aspectRatio;

    GstPad *pad = gst_element_get_static_pad(m_videoIdentity, "src");
    GstCaps *caps = gst_pad_get_negotiated_caps(pad);

    if (caps) {
        const GstStructure *structure = gst_caps_get_structure(caps, 0);
        gst_structure_get_int(structure, "width", &size.rwidth());
        gst_structure_get_int(structure, "height", &size.rheight());

        gint aspectNum = 0;
        gint aspectDenum = 0;
        if (!size.isEmpty() && gst_structure_get_fraction(
                    structure, "pixel-aspect-ratio", &aspectNum, &aspectDenum)) {
            if (aspectDenum > 0)
                aspectRatio = QSize(aspectNum, aspectDenum);
        }
        gst_caps_unref(caps);
    }

    gst_object_unref(GST_OBJECT(pad));

    QSize currentSize = m_tags.value("resolution").toSize();
    QSize currentAspectRatio = m_tags.value("pixel-aspect-ratio").toSize();

    if (currentSize != size || currentAspectRatio != aspectRatio) {
        if (aspectRatio.isEmpty())
            m_tags.remove("pixel-aspect-ratio");

        if (size.isEmpty()) {
            m_tags.remove("resolution");
        } else {
            m_tags.insert("resolution", QVariant(size));
            if (!aspectRatio.isEmpty())
                m_tags.insert("pixel-aspect-ratio", QVariant(aspectRatio));
        }

        emit tagsChanged();
    }
}
Beispiel #25
0
void QGstreamerVideoWindow::updateNativeVideoSize()
{
    const QSize oldSize = m_nativeSize;
    m_nativeSize = QSize();

    if (m_videoSink) {
        //find video native size to update video widget size hint
        GstPad *pad = gst_element_get_static_pad(m_videoSink,"sink");
        GstCaps *caps = gst_pad_get_negotiated_caps(pad);

        if (caps) {
            m_nativeSize = QGstUtils::capsCorrectedResolution(caps);
            gst_caps_unref(caps);
        }
    }

    if (m_nativeSize != oldSize)
        emit nativeSizeChanged();
}
void GStreamerReader::AudioPreroll()
{
  /* The first audio buffer has reached the audio sink. Get rate and channels */
  LOG(LogLevel::Debug, "Audio preroll");
  GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mAudioAppSink), "sink");
#if GST_VERSION_MAJOR >= 1
  GstCaps *caps = gst_pad_get_current_caps(sinkpad);
#else
  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
#endif
  GstStructure* s = gst_caps_get_structure(caps, 0);
  mInfo.mAudio.mRate = mInfo.mAudio.mChannels = 0;
  gst_structure_get_int(s, "rate", (gint*) &mInfo.mAudio.mRate);
  gst_structure_get_int(s, "channels", (gint*) &mInfo.mAudio.mChannels);
  NS_ASSERTION(mInfo.mAudio.mRate != 0, ("audio rate is zero"));
  NS_ASSERTION(mInfo.mAudio.mChannels != 0, ("audio channels is zero"));
  NS_ASSERTION(mInfo.mAudio.mChannels > 0 && mInfo.mAudio.mChannels <= MAX_CHANNELS,
      "invalid audio channels number");
  gst_caps_unref(caps);
  gst_object_unref(sinkpad);
}
static void
cb_caps_set (GObject *obj, GParamSpec *pspec, BansheePlayer *p)
{
    GstStructure * s = NULL;
    GstCaps * caps = gst_pad_get_negotiated_caps (GST_PAD (obj));

    if (G_UNLIKELY (!caps)) {
        return;
    }

    /* Get video decoder caps */
    s = gst_caps_get_structure (caps, 0);
    if (s) {
        const GValue *par;

        /* We need at least width/height and framerate */
        if (!(gst_structure_get_fraction (s, "framerate", &p->fps_n, &p->fps_d) &&
            gst_structure_get_int (s, "width", &p->width) && gst_structure_get_int (s, "height", &p->height))) {
            return;
        }

        /* Get the PAR if available */
        par = gst_structure_get_value (s, "pixel-aspect-ratio");
        if (par) {
            p->par_n = gst_value_get_fraction_numerator (par);
            p->par_d = gst_value_get_fraction_denominator (par);
        }
        else { /* Square pixels */
            p->par_n = 1;
            p->par_d = 1;
        }

        /* Notify PlayerEngine if a callback was set */
        if (p->video_geometry_notify_cb != NULL) {
            p->video_geometry_notify_cb (p, p->width, p->height, p->fps_n, p->fps_d, p->par_n, p->par_d);
        }
    }

    gst_caps_unref (caps);
}
Beispiel #28
0
static GstBuffer *
gst_ks_video_src_alloc_buffer (guint size, guint alignment, gpointer user_data)
{
  GstKsVideoSrc *self = GST_KS_VIDEO_SRC (user_data);
  GstBuffer *buf;
  GstCaps *caps;
  GstFlowReturn flow_ret;

  caps = gst_pad_get_negotiated_caps (GST_BASE_SRC_PAD (self));
  if (caps == NULL)
    goto error_no_caps;
  flow_ret = gst_pad_alloc_buffer (GST_BASE_SRC_PAD (self), 0,
      size + (alignment - 1), caps, &buf);
  gst_caps_unref (caps);
  if (G_UNLIKELY (flow_ret != GST_FLOW_OK))
    goto error_alloc_buffer;

  GST_BUFFER_DATA (buf) =
      GSIZE_TO_POINTER ((GPOINTER_TO_SIZE (GST_BUFFER_DATA (buf)) + (alignment -
              1)) & ~(alignment - 1));
  GST_BUFFER_SIZE (buf) = size;

  return buf;

error_no_caps:
  {
    GST_ELEMENT_ERROR (self, CORE, NEGOTIATION,
        ("not negotiated"), ("maybe setcaps failed?"));

    return NULL;
  }
error_alloc_buffer:
  {
    GST_ELEMENT_ERROR (self, CORE, PAD, ("alloc_buffer failed"), (NULL));

    return NULL;
  }
}
Beispiel #29
0
void QGstreamerGLTextureRenderer::updateNativeVideoSize()
{
    const QSize oldSize = m_nativeSize;

    if (m_videoSink) {
        //find video native size to update video widget size hint
        GstPad *pad = gst_element_get_static_pad(m_videoSink,"sink");
        GstCaps *caps = gst_pad_get_negotiated_caps(pad);

        if (caps) {
            m_nativeSize = QGstUtils::capsCorrectedResolution(caps);
            gst_caps_unref(caps);
        }
    } else {
        m_nativeSize = QSize();
    }
#ifdef GL_TEXTURE_SINK_DEBUG
    qDebug() << Q_FUNC_INFO << oldSize << m_nativeSize << m_videoSink;
#endif

    if (m_nativeSize != oldSize)
        emit nativeSizeChanged();
}
Beispiel #30
0
void GStreamerReader::VideoPreroll()
{
  /* The first video buffer has reached the video sink. Get width and height */
  LOG(PR_LOG_DEBUG, ("Video preroll"));
  GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mVideoAppSink), "sink");
#if GST_VERSION_MAJOR >= 1
  GstCaps* caps = gst_pad_get_current_caps(sinkpad);
  memset (&mVideoInfo, 0, sizeof (mVideoInfo));
  gst_video_info_from_caps(&mVideoInfo, caps);
  mFormat = mVideoInfo.finfo->format;
  mPicture.width = mVideoInfo.width;
  mPicture.height = mVideoInfo.height;
#else
  GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
  gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height);
#endif
  GstStructure* structure = gst_caps_get_structure(caps, 0);
  gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
  NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution");
  mInfo.mVideo.mDisplay = ThebesIntSize(mPicture.Size());
  mInfo.mVideo.mHasVideo = true;
  gst_caps_unref(caps);
  gst_object_unref(sinkpad);
}