static GstSample *
totem_gst_tag_list_get_cover_real (GstTagList *tag_list)
{
  GstSample *cover_sample = NULL;
  guint i;

  for (i = 0; ; i++) {
    GstSample *sample;
    GstCaps *caps;
    const GstStructure *caps_struct;
    int type;

    if (!gst_tag_list_get_sample_index (tag_list, GST_TAG_IMAGE, i, &sample))
      break;

    caps = gst_sample_get_caps (sample);
    caps_struct = gst_caps_get_structure (caps, 0);
    gst_structure_get_enum (caps_struct,
			    "image-type",
			    GST_TYPE_TAG_IMAGE_TYPE,
			    &type);
    if (type == GST_TAG_IMAGE_TYPE_UNDEFINED) {
      if (cover_sample == NULL) {
	/* take a ref here since we will continue and unref below */
	cover_sample = gst_sample_ref (sample);
      }
    } else if (type == GST_TAG_IMAGE_TYPE_FRONT_COVER) {
      cover_sample = sample;
      break;
    }
    gst_sample_unref (sample);
  }

  return cover_sample;
}
Exemplo n.º 2
0
template<> GstSample* refGPtr<GstSample>(GstSample* ptr)
{
    if (ptr)
        gst_sample_ref(ptr);

    return ptr;
}
void PlaybackPipeline::enqueueSample(PassRefPtr<MediaSample> prsample)
{
    RefPtr<MediaSample> rsample = prsample;
    AtomicString trackId = rsample->trackID();

    TRACE_MEDIA_MESSAGE("enqueing sample trackId=%s PTS=%f presentationSize=%.0fx%.0f at %" GST_TIME_FORMAT, trackId.string().utf8().data(), rsample->presentationTime().toFloat(), rsample->presentationSize().width(), rsample->presentationSize().height(), GST_TIME_ARGS(floatToGstClockTime(rsample->presentationTime().toDouble())));

    ASSERT(WTF::isMainThread());

    GST_OBJECT_LOCK(m_webKitMediaSrc.get());
    Stream* stream = getStreamByTrackId(m_webKitMediaSrc.get(), trackId);

    if (!stream) {
        WARN_MEDIA_MESSAGE("No stream!");
        GST_OBJECT_UNLOCK(m_webKitMediaSrc.get());
        return;
    }

    GstElement* appsrc = stream->appsrc;
    GST_OBJECT_UNLOCK(m_webKitMediaSrc.get());

    GStreamerMediaSample* sample = static_cast<GStreamerMediaSample*>(rsample.get());
    if (sample->sample() && gst_sample_get_buffer(sample->sample())) {
        GstSample* gstsample = gst_sample_ref(sample->sample());
        GST_BUFFER_FLAG_UNSET(gst_sample_get_buffer(gstsample), GST_BUFFER_FLAG_DECODE_ONLY);
        push_sample(GST_APP_SRC(appsrc), gstsample);
        // gst_app_src_push_sample() uses transfer-none for gstsample
        gst_sample_unref(gstsample);
    }
}
void MediaPlayerPrivateGStreamerBase::triggerRepaint(GstSample* sample)
{
    g_return_if_fail(GST_IS_SAMPLE(sample));

    {
        GMutexLocker<GMutex> lock(m_sampleMutex);
        if (m_sample)
            gst_sample_unref(m_sample);
        m_sample = gst_sample_ref(sample);
    }

#if USE(TEXTURE_MAPPER_GL) && !USE(COORDINATED_GRAPHICS)
    if (supportsAcceleratedRendering() && m_player->mediaPlayerClient()->mediaPlayerRenderingCanBeAccelerated(m_player) && client()) {
        client()->setPlatformLayerNeedsDisplay();
        return;
    }
#endif

    m_player->repaint();
}
Exemplo n.º 5
0
GstPlayerMediaInfo *
gst_player_media_info_copy (GstPlayerMediaInfo * ref)
{
  GList *l;
  GstPlayerMediaInfo *info;

  if (!ref)
    return NULL;

  info = gst_player_media_info_new (ref->uri);
  info->duration = ref->duration;
  info->seekable = ref->seekable;
  if (ref->tags)
    info->tags = gst_tag_list_ref (ref->tags);
  if (ref->title)
    info->title = g_strdup (ref->title);
  if (ref->container)
    info->container = g_strdup (ref->container);
  if (ref->image_sample)
    info->image_sample = gst_sample_ref (ref->image_sample);

  for (l = ref->stream_list; l != NULL; l = l->next) {
    GstPlayerStreamInfo *s;

    s = gst_player_stream_info_copy ((GstPlayerStreamInfo *) l->data);
    info->stream_list = g_list_append (info->stream_list, s);

    if (GST_IS_PLAYER_AUDIO_INFO (s))
      info->audio_stream_list = g_list_append (info->audio_stream_list, s);
    else if (GST_IS_PLAYER_VIDEO_INFO (s))
      info->video_stream_list = g_list_append (info->video_stream_list, s);
    else
      info->subtitle_stream_list =
          g_list_append (info->subtitle_stream_list, s);
  }

  return info;
}
void PlaybackPipeline::flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample> > samples)
{
    ASSERT(WTF::isMainThread());

    if (samples.size() == 0) {
        LOG_MEDIA_MESSAGE("No samples, trackId unknown");
        return;
    }

    AtomicString trackId = samples[0]->trackID();
    LOG_MEDIA_MESSAGE("flushAndEnqueueNonDisplayingSamples: trackId=%s PTS[0]=%f ... PTS[n]=%f", trackId.string().utf8().data(), samples[0]->presentationTime().toFloat(), samples[samples.size()-1]->presentationTime().toFloat());

    GST_DEBUG_OBJECT(m_webKitMediaSrc.get(), "Flushing and re-enqueing %d samples for stream %s", samples.size(), trackId.string().utf8().data());

    GST_OBJECT_LOCK(m_webKitMediaSrc.get());
    Stream* stream = getStreamByTrackId(m_webKitMediaSrc.get(), trackId);

    if (!stream) {
        GST_OBJECT_UNLOCK(m_webKitMediaSrc.get());
        return;
    }

    GstElement* appsrc = stream->appsrc;
    GST_OBJECT_UNLOCK(m_webKitMediaSrc.get());

    // Actually no need to flush. The seek preparations have done it for us.

    for (Vector<RefPtr<MediaSample> >::iterator it = samples.begin(); it != samples.end(); ++it) {
        GStreamerMediaSample* sample = static_cast<GStreamerMediaSample*>(it->get());
        if (sample->sample() && gst_sample_get_buffer(sample->sample())) {
            GstSample* gstsample = gst_sample_ref(sample->sample());
            GST_BUFFER_FLAG_SET(gst_sample_get_buffer(gstsample), GST_BUFFER_FLAG_DECODE_ONLY);
            push_sample(GST_APP_SRC(appsrc), gstsample);
            // gst_app_src_push_sample() uses transfer-none for gstsample
            gst_sample_unref(gstsample);
        }
    }
}
Exemplo n.º 7
0
/**
 * gst_video_convert_sample_async:
 * @sample: a #GstSample
 * @to_caps: the #GstCaps to convert to
 * @timeout: the maximum amount of time allowed for the processing.
 * @callback: %GstVideoConvertSampleCallback that will be called after conversion.
 * @user_data: extra data that will be passed to the @callback
 * @destroy_notify: %GDestroyNotify to be called after @user_data is not needed anymore
 *
 * Converts a raw video buffer into the specified output caps.
 *
 * The output caps can be any raw video formats or any image formats (jpeg, png, ...).
 *
 * The width, height and pixel-aspect-ratio can also be specified in the output caps.
 *
 * @callback will be called after conversion, when an error occured or if conversion didn't
 * finish after @timeout. @callback will always be called from the thread default
 * %GMainContext, see g_main_context_get_thread_default(). If GLib before 2.22 is used,
 * this will always be the global default main context.
 *
 * @destroy_notify will be called after the callback was called and @user_data is not needed
 * anymore.
 */
void
gst_video_convert_sample_async (GstSample * sample,
    const GstCaps * to_caps, GstClockTime timeout,
    GstVideoConvertSampleCallback callback, gpointer user_data,
    GDestroyNotify destroy_notify)
{
  GMainContext *context = NULL;
  GError *error = NULL;
  GstBus *bus;
  GstBuffer *buf;
  GstCaps *from_caps, *to_caps_copy = NULL;
  GstElement *pipeline, *src, *sink;
  guint i, n;
  GSource *source;
  GstVideoConvertSampleContext *ctx;

  g_return_if_fail (sample != NULL);
  buf = gst_sample_get_buffer (sample);
  g_return_if_fail (buf != NULL);

  g_return_if_fail (to_caps != NULL);

  from_caps = gst_sample_get_caps (sample);
  g_return_if_fail (from_caps != NULL);
  g_return_if_fail (callback != NULL);

  context = g_main_context_get_thread_default ();

  if (!context)
    context = g_main_context_default ();

  to_caps_copy = gst_caps_new_empty ();
  n = gst_caps_get_size (to_caps);
  for (i = 0; i < n; i++) {
    GstStructure *s = gst_caps_get_structure (to_caps, i);

    s = gst_structure_copy (s);
    gst_structure_remove_field (s, "framerate");
    gst_caps_append_structure (to_caps_copy, s);
  }

  pipeline =
      build_convert_frame_pipeline (&src, &sink, from_caps,
      gst_buffer_get_video_crop_meta (buf), to_caps_copy, &error);
  if (!pipeline)
    goto no_pipeline;

  bus = gst_element_get_bus (pipeline);

  ctx = g_slice_new0 (GstVideoConvertSampleContext);
  g_mutex_init (&ctx->mutex);
  //ctx->buffer = gst_buffer_ref (buf);
  ctx->sample = gst_sample_ref (sample);
  ctx->callback = callback;
  ctx->user_data = user_data;
  ctx->destroy_notify = destroy_notify;
  ctx->context = g_main_context_ref (context);
  ctx->finished = FALSE;
  ctx->pipeline = pipeline;

  if (timeout != GST_CLOCK_TIME_NONE) {
    ctx->timeout_source = g_timeout_source_new (timeout / GST_MSECOND);
    g_source_set_callback (ctx->timeout_source,
        (GSourceFunc) convert_frame_timeout_callback, ctx, NULL);
    g_source_attach (ctx->timeout_source, context);
  }

  g_signal_connect (src, "need-data",
      G_CALLBACK (convert_frame_need_data_callback), ctx);
  g_signal_connect (sink, "new-preroll",
      G_CALLBACK (convert_frame_new_preroll_callback), ctx);

  source = gst_bus_create_watch (bus);
  g_source_set_callback (source, (GSourceFunc) convert_frame_bus_callback,
      ctx, NULL);
  g_source_attach (source, context);
  g_source_unref (source);

  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  gst_object_unref (bus);
  gst_caps_unref (to_caps_copy);

  return;
  /* ERRORS */
no_pipeline:
  {
    GstVideoConvertSampleCallbackContext *ctx;
    GSource *source;

    gst_caps_unref (to_caps_copy);

    ctx = g_slice_new0 (GstVideoConvertSampleCallbackContext);
    ctx->callback = callback;
    ctx->user_data = user_data;
    ctx->destroy_notify = destroy_notify;
    ctx->sample = NULL;
    ctx->error = error;

    source = g_timeout_source_new (0);
    g_source_set_callback (source,
        (GSourceFunc) convert_frame_dispatch_callback, ctx,
        (GDestroyNotify) gst_video_convert_frame_callback_context_free);
    g_source_attach (source, context);
    g_source_unref (source);
  }
}