static GstBufferPool *
gst_msdkvpp_create_buffer_pool (GstMsdkVPP * thiz, GstPadDirection direction,
    GstCaps * caps, guint min_num_buffers)
{
  GstBufferPool *pool = NULL;
  GstStructure *config;
  GstAllocator *allocator = NULL;
  GstVideoInfo info;
  GstVideoInfo *pool_info = NULL;
  GstVideoAlignment align;
  GstAllocationParams params = { 0, 31, 0, 0, };
  mfxFrameAllocResponse *alloc_resp = NULL;
  gboolean use_dmabuf = FALSE;

  if (direction == GST_PAD_SINK) {
    alloc_resp = &thiz->in_alloc_resp;
    pool_info = &thiz->sinkpad_buffer_pool_info;
    use_dmabuf = thiz->use_sinkpad_dmabuf;
  } else if (direction == GST_PAD_SRC) {
    alloc_resp = &thiz->out_alloc_resp;
    pool_info = &thiz->srcpad_buffer_pool_info;
    use_dmabuf = thiz->use_srcpad_dmabuf;
  }

  pool = gst_msdk_buffer_pool_new (thiz->context, alloc_resp);
  if (!pool)
    goto error_no_pool;

  if (!gst_video_info_from_caps (&info, caps))
    goto error_no_video_info;

  gst_msdk_set_video_alignment (&info, &align);
  gst_video_info_align (&info, &align);

  if (use_dmabuf)
    allocator =
        gst_msdk_dmabuf_allocator_new (thiz->context, &info, alloc_resp);
  else if (thiz->use_video_memory)
    allocator = gst_msdk_video_allocator_new (thiz->context, &info, alloc_resp);
  else
    allocator = gst_msdk_system_allocator_new (&info);

  if (!allocator)
    goto error_no_allocator;

  config = gst_buffer_pool_get_config (GST_BUFFER_POOL_CAST (pool));
  gst_buffer_pool_config_set_params (config, caps, info.size, min_num_buffers,
      0);

  gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META);
  gst_buffer_pool_config_add_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
  if (thiz->use_video_memory) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_MSDK_USE_VIDEO_MEMORY);
    if (use_dmabuf)
      gst_buffer_pool_config_add_option (config,
          GST_BUFFER_POOL_OPTION_MSDK_USE_DMABUF);
  }

  gst_buffer_pool_config_set_video_alignment (config, &align);
  gst_buffer_pool_config_set_allocator (config, allocator, &params);
  gst_object_unref (allocator);

  if (!gst_buffer_pool_set_config (pool, config))
    goto error_pool_config;

  /* Updating pool_info with algined info of allocator */
  *pool_info = info;

  return pool;

error_no_pool:
  {
    GST_INFO_OBJECT (thiz, "Failed to create bufferpool");
    return NULL;
  }
error_no_video_info:
  {
    GST_INFO_OBJECT (thiz, "Failed to get Video info from caps");
    return NULL;
  }
error_no_allocator:
  {
    GST_INFO_OBJECT (thiz, "Failed to create allocator");
    if (pool)
      gst_object_unref (pool);
    return NULL;
  }
error_pool_config:
  {
    GST_INFO_OBJECT (thiz, "Failed to set config");
    if (pool)
      gst_object_unref (pool);
    if (allocator)
      gst_object_unref (allocator);
    return NULL;
  }
}
Beispiel #2
0
static gboolean
video_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstVideoBufferPool *vpool = GST_VIDEO_BUFFER_POOL_CAST (pool);
  GstVideoBufferPoolPrivate *priv = vpool->priv;
  GstVideoInfo info;
  GstCaps *caps;
  guint size, min_buffers, max_buffers;
  gint width, height;
  GstAllocator *allocator;
  GstAllocationParams params;

  if (!gst_buffer_pool_config_get_params (config, &caps, &size, &min_buffers,
          &max_buffers))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  if (size < info.size)
    goto wrong_size;

  if (!gst_buffer_pool_config_get_allocator (config, &allocator, &params))
    goto wrong_config;

  width = info.width;
  height = info.height;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, width, height, caps);

  if (priv->caps)
    gst_caps_unref (priv->caps);
  priv->caps = gst_caps_ref (caps);

  priv->params = params;
  if (priv->allocator)
    gst_object_unref (priv->allocator);
  if ((priv->allocator = allocator))
    gst_object_ref (allocator);

  /* enable metadata based on config of the pool */
  priv->add_videometa =
      gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);

  /* parse extra alignment info */
  priv->need_alignment = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);

  if (priv->need_alignment && priv->add_videometa) {
    /* get and apply the alignment to the info */
    gst_buffer_pool_config_get_video_alignment (config, &priv->video_align);

    /* apply the alignment to the info */
    if (!gst_video_info_align (&info, &priv->video_align))
      goto failed_to_align;

    gst_buffer_pool_config_set_video_alignment (config, &priv->video_align);
  }
  info.size = MAX (size, info.size);
  priv->info = info;

  gst_buffer_pool_config_set_params (config, caps, info.size, min_buffers,
      max_buffers);

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config);

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
wrong_size:
  {
    GST_WARNING_OBJECT (pool,
        "Provided size is to small for the caps: %u", size);
    return FALSE;
  }
failed_to_align:
  {
    GST_WARNING_OBJECT (pool, "Failed to align");
    return FALSE;
  }
}
Beispiel #3
0
nsresult GStreamerReader::ReadMetadata(MediaInfo* aInfo,
                                       MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  nsresult ret = NS_OK;

  /*
   * Parse MP3 headers before we kick off the GStreamer pipeline otherwise there
   * might be concurrent stream operations happening on both decoding and gstreamer
   * threads which will screw the GStreamer state machine.
   */
  bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3);
  if (isMP3) {
    ParseMP3Headers();
  }


  /* We do 3 attempts here: decoding audio and video, decoding video only,
   * decoding audio only. This allows us to play streams that have one broken
   * stream but that are otherwise decodeable.
   */
  guint flags[3] = {GST_PLAY_FLAG_VIDEO|GST_PLAY_FLAG_AUDIO,
    static_cast<guint>(~GST_PLAY_FLAG_AUDIO), static_cast<guint>(~GST_PLAY_FLAG_VIDEO)};
  guint default_flags, current_flags;
  g_object_get(mPlayBin, "flags", &default_flags, nullptr);

  GstMessage* message = nullptr;
  for (unsigned int i = 0; i < G_N_ELEMENTS(flags); i++) {
    current_flags = default_flags & flags[i];
    g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, nullptr);

    /* reset filter caps to ANY */
    GstCaps* caps = gst_caps_new_any();
    GstElement* filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
    g_object_set(filter, "caps", caps, nullptr);
    gst_object_unref(filter);

    filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");
    g_object_set(filter, "caps", caps, nullptr);
    gst_object_unref(filter);
    gst_caps_unref(caps);
    filter = nullptr;

    if (!(current_flags & GST_PLAY_FLAG_AUDIO))
      filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
    else if (!(current_flags & GST_PLAY_FLAG_VIDEO))
      filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");

    if (filter) {
      /* Little trick: set the target caps to "skip" so that playbin2 fails to
       * find a decoder for the stream we want to skip.
       */
      GstCaps* filterCaps = gst_caps_new_simple ("skip", nullptr, nullptr);
      g_object_set(filter, "caps", filterCaps, nullptr);
      gst_caps_unref(filterCaps);
      gst_object_unref(filter);
    }

    LOG(PR_LOG_DEBUG, "starting metadata pipeline");
    if (gst_element_set_state(mPlayBin, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) {
      LOG(PR_LOG_DEBUG, "metadata pipeline state change failed");
      ret = NS_ERROR_FAILURE;
      continue;
    }

    /* Wait for ASYNC_DONE, which is emitted when the pipeline is built,
     * prerolled and ready to play. Also watch for errors.
     */
    message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE,
                 (GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
    if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ASYNC_DONE) {
      LOG(PR_LOG_DEBUG, "read metadata pipeline prerolled");
      gst_message_unref(message);
      ret = NS_OK;
      break;
    } else {
      LOG(PR_LOG_DEBUG, "read metadata pipeline failed to preroll: %s",
            gst_message_type_get_name (GST_MESSAGE_TYPE (message)));

      if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ERROR) {
        GError* error;
        gchar* debug;
        gst_message_parse_error(message, &error, &debug);
        LOG(PR_LOG_ERROR, "read metadata error: %s: %s", error->message, debug);
        g_error_free(error);
        g_free(debug);
      }
      /* Unexpected stream close/EOS or other error. We'll give up if all
       * streams are in error/eos. */
      gst_element_set_state(mPlayBin, GST_STATE_NULL);
      gst_message_unref(message);
      ret = NS_ERROR_FAILURE;
    }
  }

  if (NS_SUCCEEDED(ret))
    ret = CheckSupportedFormats();

  if (NS_FAILED(ret))
    /* we couldn't get this to play */
    return ret;

  /* report the duration */
  gint64 duration;

  if (isMP3 && mMP3FrameParser.IsMP3()) {
    // The MP3FrameParser has reported a duration; use that over the gstreamer
    // reported duration for inter-platform consistency.
    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
    mUseParserDuration = true;
    mLastParserDuration = mMP3FrameParser.GetDuration();
    mDecoder->SetMediaDuration(mLastParserDuration);
  } else {
    LOG(PR_LOG_DEBUG, "querying duration");
    // Otherwise use the gstreamer duration.
#if GST_VERSION_MAJOR >= 1
    if (gst_element_query_duration(GST_ELEMENT(mPlayBin),
          GST_FORMAT_TIME, &duration)) {
#else
    GstFormat format = GST_FORMAT_TIME;
    if (gst_element_query_duration(GST_ELEMENT(mPlayBin),
      &format, &duration) && format == GST_FORMAT_TIME) {
#endif
      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
      LOG(PR_LOG_DEBUG, "have duration %" GST_TIME_FORMAT, GST_TIME_ARGS(duration));
      duration = GST_TIME_AS_USECONDS (duration);
      mDecoder->SetMediaDuration(duration);
    }
  }

  int n_video = 0, n_audio = 0;
  g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, nullptr);
  mInfo.mVideo.mHasVideo = n_video != 0;
  mInfo.mAudio.mHasAudio = n_audio != 0;

  *aInfo = mInfo;

  *aTags = nullptr;

  // Watch the pipeline for fatal errors
#if GST_VERSION_MAJOR >= 1
  gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this, nullptr);
#else
  gst_bus_set_sync_handler(mBus, GStreamerReader::ErrorCb, this);
#endif

  /* set the pipeline to PLAYING so that it starts decoding and queueing data in
   * the appsinks */
  gst_element_set_state(mPlayBin, GST_STATE_PLAYING);

  return NS_OK;
}

bool
GStreamerReader::IsMediaSeekable()
{
  if (mUseParserDuration) {
    return true;
  }

  gint64 duration;
#if GST_VERSION_MAJOR >= 1
  if (gst_element_query_duration(GST_ELEMENT(mPlayBin), GST_FORMAT_TIME,
                                 &duration)) {
#else
  GstFormat format = GST_FORMAT_TIME;
  if (gst_element_query_duration(GST_ELEMENT(mPlayBin), &format, &duration) &&
      format == GST_FORMAT_TIME) {
#endif
    return true;
  }

  return false;
}

nsresult GStreamerReader::CheckSupportedFormats()
{
  bool done = false;
  bool unsupported = false;

  GstIterator* it = gst_bin_iterate_recurse(GST_BIN(mPlayBin));
  while (!done) {
    GstIteratorResult res;
    GstElement* element;

#if GST_VERSION_MAJOR >= 1
    GValue value = {0,};
    res = gst_iterator_next(it, &value);
#else
    res = gst_iterator_next(it, (void **) &element);
#endif
    switch(res) {
      case GST_ITERATOR_OK:
      {
#if GST_VERSION_MAJOR >= 1
        element = GST_ELEMENT (g_value_get_object (&value));
#endif
        GstElementFactory* factory = gst_element_get_factory(element);
        if (factory) {
          const char* klass = gst_element_factory_get_klass(factory);
          GstPad* pad = gst_element_get_static_pad(element, "sink");
          if (pad) {
            GstCaps* caps;

#if GST_VERSION_MAJOR >= 1
            caps = gst_pad_get_current_caps(pad);
#else
            caps = gst_pad_get_negotiated_caps(pad);
#endif

            if (caps) {
              /* check for demuxers but ignore elements like id3demux */
              if (strstr (klass, "Demuxer") && !strstr(klass, "Metadata"))
                unsupported = !GStreamerFormatHelper::Instance()->CanHandleContainerCaps(caps);
              else if (strstr (klass, "Decoder") && !strstr(klass, "Generic"))
                unsupported = !GStreamerFormatHelper::Instance()->CanHandleCodecCaps(caps);

              gst_caps_unref(caps);
            }
            gst_object_unref(pad);
          }
        }

#if GST_VERSION_MAJOR >= 1
        g_value_unset (&value);
#else
        gst_object_unref(element);
#endif
        done = unsupported;
        break;
      }
      case GST_ITERATOR_RESYNC:
        unsupported = false;
        done = false;
        break;
      case GST_ITERATOR_ERROR:
        done = true;
        break;
      case GST_ITERATOR_DONE:
        done = true;
        break;
    }
  }

  return unsupported ? NS_ERROR_FAILURE : NS_OK;
}

nsresult GStreamerReader::ResetDecode()
{
  nsresult res = NS_OK;

  LOG(PR_LOG_DEBUG, "reset decode");

  if (NS_FAILED(MediaDecoderReader::ResetDecode())) {
    res = NS_ERROR_FAILURE;
  }

  mVideoQueue.Reset();
  mAudioQueue.Reset();

  mVideoSinkBufferCount = 0;
  mAudioSinkBufferCount = 0;
  mReachedAudioEos = false;
  mReachedVideoEos = false;
#if GST_VERSION_MAJOR >= 1
  mConfigureAlignment = true;
#endif

  LOG(PR_LOG_DEBUG, "reset decode done");

  return res;
}

bool GStreamerReader::DecodeAudioData()
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  GstBuffer *buffer = nullptr;

  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

    if (mReachedAudioEos && !mAudioSinkBufferCount) {
      return false;
    }

    /* Wait something to be decoded before return or continue */
    if (!mAudioSinkBufferCount) {
      if(!mVideoSinkBufferCount) {
        /* We have nothing decoded so it makes no sense to return to the state machine
         * as it will call us back immediately, we'll return again and so on, wasting
         * CPU cycles for no job done. So, block here until there is either video or
         * audio data available
        */
        mon.Wait();
        if (!mAudioSinkBufferCount) {
          /* There is still no audio data available, so either there is video data or
           * something else has happened (Eos, etc...). Return to the state machine
           * to process it.
           */
          return true;
        }
      }
      else {
        return true;
      }
    }

#if GST_VERSION_MAJOR >= 1
    GstSample *sample = gst_app_sink_pull_sample(mAudioAppSink);
    buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
    gst_sample_unref(sample);
#else
    buffer = gst_app_sink_pull_buffer(mAudioAppSink);
#endif

    mAudioSinkBufferCount--;
  }

  int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
    timestamp = gst_segment_to_stream_time(&mAudioSegment,
                                           GST_FORMAT_TIME, timestamp);
  }
  timestamp = GST_TIME_AS_USECONDS(timestamp);

  int64_t offset = GST_BUFFER_OFFSET(buffer);
  guint8* data;
#if GST_VERSION_MAJOR >= 1
  GstMapInfo info;
  gst_buffer_map(buffer, &info, GST_MAP_READ);
  unsigned int size = info.size;
  data = info.data;
#else
  unsigned int size = GST_BUFFER_SIZE(buffer);
  data = GST_BUFFER_DATA(buffer);
#endif
  int32_t frames = (size / sizeof(AudioDataValue)) / mInfo.mAudio.mChannels;

  typedef AudioCompactor::NativeCopy GstCopy;
  mAudioCompactor.Push(offset,
                       timestamp,
                       mInfo.mAudio.mRate,
                       frames,
                       mInfo.mAudio.mChannels,
                       GstCopy(data,
                               size,
                               mInfo.mAudio.mChannels));
#if GST_VERSION_MAJOR >= 1
  gst_buffer_unmap(buffer, &info);
#endif

  gst_buffer_unref(buffer);

  return true;
}

bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                       int64_t aTimeThreshold)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

  GstBuffer *buffer = nullptr;

  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

    if (mReachedVideoEos && !mVideoSinkBufferCount) {
      return false;
    }

    /* Wait something to be decoded before return or continue */
    if (!mVideoSinkBufferCount) {
      if (!mAudioSinkBufferCount) {
        /* We have nothing decoded so it makes no sense to return to the state machine
         * as it will call us back immediately, we'll return again and so on, wasting
         * CPU cycles for no job done. So, block here until there is either video or
         * audio data available
        */
        mon.Wait();
        if (!mVideoSinkBufferCount) {
          /* There is still no video data available, so either there is audio data or
           * something else has happened (Eos, etc...). Return to the state machine
           * to process it
           */
          return true;
        }
      }
      else {
        return true;
      }
    }

    mDecoder->NotifyDecodedFrames(0, 1);

#if GST_VERSION_MAJOR >= 1
    GstSample *sample = gst_app_sink_pull_sample(mVideoAppSink);
    buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
    gst_sample_unref(sample);
#else
    buffer = gst_app_sink_pull_buffer(mVideoAppSink);
#endif
    mVideoSinkBufferCount--;
  }

  bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT);
  if ((aKeyFrameSkip && !isKeyframe)) {
    gst_buffer_unref(buffer);
    return true;
  }

  int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
  {
    ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
    timestamp = gst_segment_to_stream_time(&mVideoSegment,
                                           GST_FORMAT_TIME, timestamp);
  }
  NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp),
               "frame has invalid timestamp");

  timestamp = GST_TIME_AS_USECONDS(timestamp);
  int64_t duration = 0;
  if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
    duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
  else if (fpsNum && fpsDen)
    /* add 1-frame duration */
    duration = gst_util_uint64_scale(GST_USECOND, fpsDen, fpsNum);

  if (timestamp < aTimeThreshold) {
    LOG(PR_LOG_DEBUG, "skipping frame %" GST_TIME_FORMAT
                      " threshold %" GST_TIME_FORMAT,
                      GST_TIME_ARGS(timestamp * 1000),
                      GST_TIME_ARGS(aTimeThreshold * 1000));
    gst_buffer_unref(buffer);
    return true;
  }

  if (!buffer)
    /* no more frames */
    return true;

#if GST_VERSION_MAJOR >= 1
  if (mConfigureAlignment && buffer->pool) {
    GstStructure *config = gst_buffer_pool_get_config(buffer->pool);
    GstVideoAlignment align;
    if (gst_buffer_pool_config_get_video_alignment(config, &align))
      gst_video_info_align(&mVideoInfo, &align);
    gst_structure_free(config);
    mConfigureAlignment = false;
  }
#endif

  nsRefPtr<PlanarYCbCrImage> image = GetImageFromBuffer(buffer);
  if (!image) {
    /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
     * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
     */
    GstBuffer* tmp = nullptr;
    CopyIntoImageBuffer(buffer, &tmp, image);
    gst_buffer_unref(buffer);
    buffer = tmp;
  }

  int64_t offset = mDecoder->GetResource()->Tell(); // Estimate location in media.
  VideoData* video = VideoData::CreateFromImage(mInfo.mVideo,
                                                mDecoder->GetImageContainer(),
                                                offset, timestamp, duration,
                                                static_cast<Image*>(image.get()),
                                                isKeyframe, -1, mPicture);
  mVideoQueue.Push(video);

  gst_buffer_unref(buffer);

  return true;
}
static gboolean
xvimage_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstXvImageBufferPool *xvpool = GST_XVIMAGE_BUFFER_POOL_CAST (pool);
  GstVideoInfo info;
  GstCaps *caps;
  guint size, min_buffers, max_buffers;
  GstXvContext *context;

  if (!gst_buffer_pool_config_get_params (config, &caps, &size, &min_buffers,
          &max_buffers))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, info.width, info.height,
      caps);

  context = gst_xvimage_allocator_peek_context (xvpool->allocator);

  xvpool->im_format = gst_xvcontext_get_format_from_info (context, &info);
  if (xvpool->im_format == -1)
    goto unknown_format;

  if (xvpool->caps)
    gst_caps_unref (xvpool->caps);
  xvpool->caps = gst_caps_ref (caps);

  /* enable metadata based on config of the pool */
  xvpool->add_metavideo =
      gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);

  /* parse extra alignment info */
  xvpool->need_alignment = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);

  if (xvpool->need_alignment) {
    gst_buffer_pool_config_get_video_alignment (config, &xvpool->align);

    GST_LOG_OBJECT (pool, "padding %u-%ux%u-%u", xvpool->align.padding_top,
        xvpool->align.padding_left, xvpool->align.padding_left,
        xvpool->align.padding_bottom);

    /* do padding and alignment */
    gst_video_info_align (&info, &xvpool->align);

    gst_buffer_pool_config_set_video_alignment (config, &xvpool->align);

    /* we need the video metadata too now */
    xvpool->add_metavideo = TRUE;
  } else {
    gst_video_alignment_reset (&xvpool->align);
  }

  /* add the padding */
  xvpool->padded_width =
      GST_VIDEO_INFO_WIDTH (&info) + xvpool->align.padding_left +
      xvpool->align.padding_right;
  xvpool->padded_height =
      GST_VIDEO_INFO_HEIGHT (&info) + xvpool->align.padding_top +
      xvpool->align.padding_bottom;

  xvpool->info = info;
  xvpool->crop.x = xvpool->align.padding_left;
  xvpool->crop.y = xvpool->align.padding_top;
  xvpool->crop.w = xvpool->info.width;
  xvpool->crop.h = xvpool->info.height;

  gst_buffer_pool_config_set_params (config, caps, info.size, min_buffers,
      max_buffers);

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config);

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
unknown_format:
  {
    GST_WARNING_OBJECT (pool, "failed to get format from caps %"
        GST_PTR_FORMAT, caps);
    return FALSE;
  }
}
static gboolean
gst_gl_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstGLBufferPool *glpool = GST_GL_BUFFER_POOL_CAST (pool);
  GstGLBufferPoolPrivate *priv = glpool->priv;
  GstVideoInfo info;
  GstCaps *caps = NULL;
  guint min_buffers, max_buffers;
  guint max_align, n;
  GstAllocator *allocator = NULL;
  GstAllocationParams alloc_params;
  GstGLTextureTarget tex_target;
  gboolean ret = TRUE;
  gint p;

  if (!gst_buffer_pool_config_get_params (config, &caps, NULL, &min_buffers,
          &max_buffers))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, info.width, info.height,
      caps);

  if (!gst_buffer_pool_config_get_allocator (config, &allocator, &alloc_params))
    goto wrong_config;

  gst_caps_replace (&priv->caps, caps);

  if (priv->allocator)
    gst_object_unref (priv->allocator);

  if (allocator) {
    if (!GST_IS_GL_MEMORY_ALLOCATOR (allocator)) {
      gst_object_unref (allocator);
      goto wrong_allocator;
    } else {
      priv->allocator = gst_object_ref (allocator);
    }
  } else {
    priv->allocator =
        GST_ALLOCATOR (gst_gl_memory_allocator_get_default (glpool->context));
    g_assert (priv->allocator);
  }

  priv->add_videometa = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);
  priv->add_glsyncmeta = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_GL_SYNC_META);

  if (priv->gl_params)
    gst_gl_allocation_params_free ((GstGLAllocationParams *) priv->gl_params);
  priv->gl_params = (GstGLVideoAllocationParams *)
      gst_buffer_pool_config_get_gl_allocation_params (config);
  if (!priv->gl_params)
    priv->gl_params = gst_gl_video_allocation_params_new (glpool->context,
        &alloc_params, &info, -1, NULL, 0, 0);

  max_align = alloc_params.align;

  if (gst_buffer_pool_config_has_option (config,
          GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT)) {
    priv->add_videometa = TRUE;

    gst_buffer_pool_config_get_video_alignment (config,
        priv->gl_params->valign);

    for (n = 0; n < GST_VIDEO_MAX_PLANES; ++n)
      max_align |= priv->gl_params->valign->stride_align[n];

    for (n = 0; n < GST_VIDEO_MAX_PLANES; ++n)
      priv->gl_params->valign->stride_align[n] = max_align;

    gst_video_info_align (priv->gl_params->v_info, priv->gl_params->valign);

    gst_buffer_pool_config_set_video_alignment (config,
        priv->gl_params->valign);
  }

  if (alloc_params.align < max_align) {
    GST_WARNING_OBJECT (pool, "allocation params alignment %u is smaller "
        "than the max specified video stride alignment %u, fixing",
        (guint) alloc_params.align, max_align);

    alloc_params.align = max_align;
    gst_buffer_pool_config_set_allocator (config, allocator, &alloc_params);
    if (priv->gl_params->parent.alloc_params)
      gst_allocation_params_free (priv->gl_params->parent.alloc_params);
    priv->gl_params->parent.alloc_params =
        gst_allocation_params_copy (&alloc_params);
  }

  {
    GstStructure *s = gst_caps_get_structure (caps, 0);
    const gchar *target_str = gst_structure_get_string (s, "texture-target");
    gboolean multiple_texture_targets = FALSE;

    tex_target = priv->gl_params->target;
    if (target_str)
      tex_target = gst_gl_texture_target_from_string (target_str);

    if (gst_buffer_pool_config_has_option (config,
            GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_2D)) {
      if (tex_target && tex_target != GST_GL_TEXTURE_TARGET_2D)
        multiple_texture_targets = TRUE;
      tex_target = GST_GL_TEXTURE_TARGET_2D;
    }
    if (gst_buffer_pool_config_has_option (config,
            GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_RECTANGLE)) {
      if (tex_target && tex_target != GST_GL_TEXTURE_TARGET_RECTANGLE)
        multiple_texture_targets = TRUE;
      tex_target = GST_GL_TEXTURE_TARGET_RECTANGLE;
    }
    if (gst_buffer_pool_config_has_option (config,
            GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_EXTERNAL_OES)) {
      if (tex_target && tex_target != GST_GL_TEXTURE_TARGET_EXTERNAL_OES)
        multiple_texture_targets = TRUE;
      tex_target = GST_GL_TEXTURE_TARGET_EXTERNAL_OES;
    }

    if (!tex_target)
      tex_target = GST_GL_TEXTURE_TARGET_2D;

    if (multiple_texture_targets) {
      GST_WARNING_OBJECT (pool, "Multiple texture targets configured either "
          "through caps or buffer pool options");
      ret = FALSE;
    }

    priv->gl_params->target = tex_target;
  }

  /* Recalulate the size and offset as we don't add padding between planes. */
  priv->gl_params->v_info->size = 0;
  for (p = 0; p < GST_VIDEO_INFO_N_PLANES (priv->gl_params->v_info); p++) {
    priv->gl_params->v_info->offset[p] = priv->gl_params->v_info->size;
    priv->gl_params->v_info->size +=
        gst_gl_get_plane_data_size (priv->gl_params->v_info,
        priv->gl_params->valign, p);
  }

  gst_buffer_pool_config_set_params (config, caps,
      priv->gl_params->v_info->size, min_buffers, max_buffers);

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config) && ret;

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
wrong_allocator:
  {
    GST_WARNING_OBJECT (pool, "Incorrect allocator type for this pool");
    return FALSE;
  }
}
Beispiel #6
0
bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
                                       int64_t aTimeThreshold)
{
    NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");

    GstBuffer *buffer = nullptr;

    {
        ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);

        if (mReachedVideoEos && !mVideoSinkBufferCount) {
            return false;
        }

        /* Wait something to be decoded before return or continue */
        if (!mVideoSinkBufferCount) {
            if (!mAudioSinkBufferCount) {
                /* We have nothing decoded so it makes no sense to return to the state machine
                 * as it will call us back immediately, we'll return again and so on, wasting
                 * CPU cycles for no job done. So, block here until there is either video or
                 * audio data available
                */
                mon.Wait();
                if (!mVideoSinkBufferCount) {
                    /* There is still no video data available, so either there is audio data or
                     * something else has happened (Eos, etc...). Return to the state machine
                     * to process it
                     */
                    return true;
                }
            }
            else {
                return true;
            }
        }

        mDecoder->NotifyDecodedFrames(0, 1);

#if GST_VERSION_MAJOR >= 1
        GstSample *sample = gst_app_sink_pull_sample(mVideoAppSink);
        buffer = gst_buffer_ref(gst_sample_get_buffer(sample));
        gst_sample_unref(sample);
#else
        buffer = gst_app_sink_pull_buffer(mVideoAppSink);
#endif
        mVideoSinkBufferCount--;
    }

    bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DELTA_UNIT);
    if ((aKeyFrameSkip && !isKeyframe)) {
        gst_buffer_unref(buffer);
        return true;
    }

    int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
    {
        ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
        timestamp = gst_segment_to_stream_time(&mVideoSegment,
                                               GST_FORMAT_TIME, timestamp);
    }
    NS_ASSERTION(GST_CLOCK_TIME_IS_VALID(timestamp),
                 "frame has invalid timestamp");

    timestamp = GST_TIME_AS_USECONDS(timestamp);
    int64_t duration = 0;
    if (GST_CLOCK_TIME_IS_VALID(GST_BUFFER_DURATION(buffer)))
        duration = GST_TIME_AS_USECONDS(GST_BUFFER_DURATION(buffer));
    else if (fpsNum && fpsDen)
        /* add 1-frame duration */
        duration = gst_util_uint64_scale(GST_USECOND, fpsDen, fpsNum);

    if (timestamp < aTimeThreshold) {
        LOG(PR_LOG_DEBUG, "skipping frame %" GST_TIME_FORMAT
            " threshold %" GST_TIME_FORMAT,
            GST_TIME_ARGS(timestamp * 1000),
            GST_TIME_ARGS(aTimeThreshold * 1000));
        gst_buffer_unref(buffer);
        return true;
    }

    if (!buffer)
        /* no more frames */
        return true;

#if GST_VERSION_MAJOR >= 1
    if (mConfigureAlignment && buffer->pool) {
        GstStructure *config = gst_buffer_pool_get_config(buffer->pool);
        GstVideoAlignment align;
        if (gst_buffer_pool_config_get_video_alignment(config, &align))
            gst_video_info_align(&mVideoInfo, &align);
        gst_structure_free(config);
        mConfigureAlignment = false;
    }
#endif

    nsRefPtr<PlanarYCbCrImage> image = GetImageFromBuffer(buffer);
    if (!image) {
        /* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
         * allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
         */
        GstBuffer* tmp = nullptr;
        CopyIntoImageBuffer(buffer, &tmp, image);
        gst_buffer_unref(buffer);
        buffer = tmp;
    }

    int64_t offset = mDecoder->GetResource()->Tell(); // Estimate location in media.
    VideoData* video = VideoData::CreateFromImage(mInfo.mVideo,
                       mDecoder->GetImageContainer(),
                       offset, timestamp, duration,
                       static_cast<Image*>(image.get()),
                       isKeyframe, -1, mPicture);
    mVideoQueue.Push(video);

    gst_buffer_unref(buffer);

    return true;
}
static gboolean
gst_gl_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstGLBufferPool *glpool = GST_GL_BUFFER_POOL_CAST (pool);
  GstGLBufferPoolPrivate *priv = glpool->priv;
  GstVideoInfo info;
  GstCaps *caps = NULL;
  guint min_buffers, max_buffers;
  GstAllocator *allocator = NULL;
  GstAllocationParams alloc_params;
  gboolean reset = TRUE;
  gint p;

  if (!gst_buffer_pool_config_get_params (config, &caps, NULL, &min_buffers,
          &max_buffers))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, info.width, info.height,
      caps);

  if (!gst_buffer_pool_config_get_allocator (config, &allocator, &alloc_params))
    goto wrong_config;

  if (priv->allocator)
    gst_object_unref (priv->allocator);

  if (!allocator) {
    gst_gl_memory_init ();
    priv->allocator = gst_allocator_find (GST_GL_MEMORY_ALLOCATOR);
  } else {
    priv->allocator = gst_object_ref (allocator);
  }

  priv->params = alloc_params;

  priv->im_format = GST_VIDEO_INFO_FORMAT (&info);
  if (priv->im_format == -1)
    goto unknown_format;

  if (priv->caps)
    reset = !gst_caps_is_equal (priv->caps, caps);

  gst_caps_replace (&priv->caps, caps);
  priv->info = info;

  priv->add_videometa = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);
  priv->add_uploadmeta = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_GL_TEXTURE_UPLOAD_META);
  priv->add_glsyncmeta = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_GL_SYNC_META);

#if GST_GL_HAVE_PLATFORM_EGL
  g_assert (priv->allocator != NULL);
  priv->want_eglimage =
      (g_strcmp0 (priv->allocator->mem_type, GST_EGL_IMAGE_MEMORY_TYPE) == 0);
#else
  priv->want_eglimage = FALSE;
#endif

  if (gst_buffer_pool_config_has_option (config,
          GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT)) {

    priv->add_videometa = TRUE;

    gst_buffer_pool_config_get_video_alignment (config, &priv->valign);
    gst_video_info_align (&priv->info, &priv->valign);

    gst_buffer_pool_config_set_video_alignment (config, &priv->valign);
  } else {
    gst_video_alignment_reset (&priv->valign);
  }

  if (reset) {
    if (glpool->upload)
      gst_object_unref (glpool->upload);

    glpool->upload = gst_gl_upload_meta_new (glpool->context);
  }

  /* Recalulate the size as we don't add padding between planes. */
  priv->info.size = 0;
  for (p = 0; p < GST_VIDEO_INFO_N_PLANES (&priv->info); p++) {
    priv->info.size +=
        gst_gl_get_plane_data_size (&priv->info, &priv->valign, p);
  }

  gst_buffer_pool_config_set_params (config, caps, priv->info.size,
      min_buffers, max_buffers);

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config);

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
unknown_format:
  {
    GST_WARNING_OBJECT (glpool, "failed to get format from caps %"
        GST_PTR_FORMAT, caps);
    GST_ELEMENT_ERROR (glpool, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            priv->info.width, priv->info.height),
        ("Invalid input caps %" GST_PTR_FORMAT, caps));
    return FALSE;
  }
}
Beispiel #8
0
static bool gst_vlc_video_info_from_vout( GstVideoInfo *p_info,
        GstVideoAlignment *p_align, GstCaps *p_caps, decoder_t *p_dec,
        picture_t *p_pic_info )
{
    const GstVideoFormatInfo *p_vinfo = p_info->finfo;
    picture_t *p_pic = NULL;
    int i;

    /* Ensure the queue is empty */
    gst_vlc_dec_ensure_empty_queue( p_dec );
    gst_video_info_align( p_info, p_align );

    if( !gst_vlc_set_vout_fmt( p_info, p_align, p_caps, p_dec ))
    {
        msg_Err( p_dec, "failed to set output format to vout" );
        return false;
    }

    /* Acquire a picture and release it. This is to get the picture
     * stride/offsets info for the Gstreamer decoder looking to use
     * downstream bufferpool directly; Zero-Copy */
    if( !decoder_UpdateVideoFormat( p_dec ) )
        p_pic = decoder_NewPicture( p_dec );
    if( !p_pic )
    {
        msg_Err( p_dec, "failed to acquire picture from vout; for pic info" );
        return false;
    }

    /* reject if strides don't match */
    for( i = 0; i < p_pic->i_planes; i++ )
        if( p_info->stride[i] != p_pic->p[i].i_pitch )
            goto strides_mismatch;

    p_info->offset[0] = 0;
    for( i = 1; i < p_pic->i_planes; i++ )
    {
        p_info->offset[i] = p_info->offset[i-1] +
            p_pic->p[i-1].i_pitch * p_pic->p[i-1].i_lines;
    }
    GST_VIDEO_INFO_SIZE( p_info ) = p_info->offset[i-1] +
        p_pic->p[i-1].i_pitch * p_pic->p[i-1].i_lines;

    for( i = 0; i < p_pic->i_planes; i++ )
    {
        int i_v_edge, i_h_edge;

        i_h_edge =
            GST_VIDEO_FORMAT_INFO_SCALE_WIDTH( p_vinfo, i,
                    p_align->padding_left);
        i_v_edge =
            GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT( p_vinfo, i,
                    p_align->padding_top);

        p_info->offset[i] += ( i_v_edge * p_info->stride[i] ) +
            ( i_h_edge * GST_VIDEO_FORMAT_INFO_PSTRIDE( p_vinfo, i ));
    }

    memcpy( p_pic_info, p_pic, sizeof( picture_t ));
    picture_Release( p_pic );

    return true;

strides_mismatch:
    msg_Err( p_dec, "strides mismatch" );
    picture_Release( p_pic );
    return false;
}
Beispiel #9
0
static gboolean
xvimage_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstXvImageBufferPool *xvpool = GST_XVIMAGE_BUFFER_POOL_CAST (pool);
  GstXvImageBufferPoolPrivate *priv = xvpool->priv;
  GstVideoInfo info;
  GstCaps *caps;

  if (!gst_buffer_pool_config_get_params (config, &caps, NULL, NULL, NULL))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, info.width, info.height,
      caps);

  priv->im_format = gst_xvimagesink_get_format_from_info (xvpool->sink, &info);
  if (priv->im_format == -1)
    goto unknown_format;

  if (priv->caps)
    gst_caps_unref (priv->caps);
  priv->caps = gst_caps_ref (caps);

  /* enable metadata based on config of the pool */
  priv->add_metavideo =
      gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);

  /* parse extra alignment info */
  priv->need_alignment = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);

  if (priv->need_alignment) {
    gst_buffer_pool_config_get_video_alignment (config, &priv->align);

    GST_LOG_OBJECT (pool, "padding %u-%ux%u-%u", priv->align.padding_top,
        priv->align.padding_left, priv->align.padding_left,
        priv->align.padding_bottom);

    /* do padding and alignment */
    gst_video_info_align (&info, &priv->align);

    /* we need the video metadata too now */
    priv->add_metavideo = TRUE;
  } else {
    gst_video_alignment_reset (&priv->align);
  }

  /* add the padding */
  priv->padded_width =
      GST_VIDEO_INFO_WIDTH (&info) + priv->align.padding_left +
      priv->align.padding_right;
  priv->padded_height =
      GST_VIDEO_INFO_HEIGHT (&info) + priv->align.padding_top +
      priv->align.padding_bottom;

  priv->info = info;

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config);

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
unknown_format:
  {
    GST_WARNING_OBJECT (xvpool->sink, "failed to get format from caps %"
        GST_PTR_FORMAT, caps);
    GST_ELEMENT_ERROR (xvpool->sink, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            priv->info.width, priv->info.height),
        ("Invalid input caps %" GST_PTR_FORMAT, caps));
    return FALSE;;
  }
}
Beispiel #10
0
static gboolean
ximage_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstXImageBufferPool *xpool = GST_XIMAGE_BUFFER_POOL_CAST (pool);
  GstXImageBufferPoolPrivate *priv = xpool->priv;
  GstVideoInfo info;
  GstCaps *caps;

  if (!gst_buffer_pool_config_get_params (config, &caps, NULL, NULL, NULL))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, info.width, info.height,
      caps);

  /* keep track of the width and height and caps */
  if (priv->caps)
    gst_caps_unref (priv->caps);
  priv->caps = gst_caps_ref (caps);

  /* check for the configured metadata */
  priv->add_metavideo =
      gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);

  /* parse extra alignment info */
  priv->need_alignment = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);

  if (priv->need_alignment) {
    gst_buffer_pool_config_get_video_alignment (config, &priv->align);

    GST_LOG_OBJECT (pool, "padding %u-%ux%u-%u", priv->align.padding_top,
        priv->align.padding_left, priv->align.padding_left,
        priv->align.padding_bottom);

    /* do padding and alignment */
    gst_video_info_align (&info, &priv->align);

    /* we need the video metadata too now */
    priv->add_metavideo = TRUE;
  } else {
    gst_video_alignment_reset (&priv->align);
  }

  /* add the padding */
  priv->padded_width =
      GST_VIDEO_INFO_WIDTH (&info) + priv->align.padding_left +
      priv->align.padding_right;
  priv->padded_height =
      GST_VIDEO_INFO_HEIGHT (&info) + priv->align.padding_top +
      priv->align.padding_bottom;

  priv->info = info;

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config);

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
}
static gboolean
gst_gl_buffer_pool_set_config (GstBufferPool * pool, GstStructure * config)
{
  GstGLBufferPool *glpool = GST_GL_BUFFER_POOL_CAST (pool);
  GstGLBufferPoolPrivate *priv = glpool->priv;
  GstVideoInfo info;
  GstCaps *caps = NULL;
  guint min_buffers, max_buffers;
  guint max_align, n;
  GstAllocator *allocator = NULL;
  GstAllocationParams alloc_params;
  gboolean reset = TRUE, ret = TRUE;
  gint p;

  if (!gst_buffer_pool_config_get_params (config, &caps, NULL, &min_buffers,
          &max_buffers))
    goto wrong_config;

  if (caps == NULL)
    goto no_caps;

  /* now parse the caps from the config */
  if (!gst_video_info_from_caps (&info, caps))
    goto wrong_caps;

  GST_LOG_OBJECT (pool, "%dx%d, caps %" GST_PTR_FORMAT, info.width, info.height,
      caps);

  if (!gst_buffer_pool_config_get_allocator (config, &allocator, &alloc_params))
    goto wrong_config;

  if (priv->allocator)
    gst_object_unref (priv->allocator);

  if (!allocator) {
    gst_gl_memory_init ();
    priv->allocator = gst_allocator_find (GST_GL_MEMORY_ALLOCATOR);
  } else {
    priv->allocator = gst_object_ref (allocator);
  }

  priv->params = alloc_params;

  priv->im_format = GST_VIDEO_INFO_FORMAT (&info);
  if (priv->im_format == -1)
    goto unknown_format;

  if (priv->caps)
    reset = !gst_caps_is_equal (priv->caps, caps);

  gst_caps_replace (&priv->caps, caps);
  priv->info = info;

  priv->add_videometa = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_META);
  priv->add_uploadmeta = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_GL_TEXTURE_UPLOAD_META);
  priv->add_glsyncmeta = gst_buffer_pool_config_has_option (config,
      GST_BUFFER_POOL_OPTION_GL_SYNC_META);

#if GST_GL_HAVE_PLATFORM_EGL
  g_assert (priv->allocator != NULL);
  priv->want_eglimage =
      (g_strcmp0 (priv->allocator->mem_type, GST_EGL_IMAGE_MEMORY_TYPE) == 0);
#else
  priv->want_eglimage = FALSE;
#endif

  max_align = alloc_params.align;

  if (gst_buffer_pool_config_has_option (config,
          GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT)) {

    priv->add_videometa = TRUE;

    gst_buffer_pool_config_get_video_alignment (config, &priv->valign);

    for (n = 0; n < GST_VIDEO_MAX_PLANES; ++n)
      max_align |= priv->valign.stride_align[n];

    for (n = 0; n < GST_VIDEO_MAX_PLANES; ++n)
      priv->valign.stride_align[n] = max_align;

    gst_video_info_align (&priv->info, &priv->valign);

    gst_buffer_pool_config_set_video_alignment (config, &priv->valign);
  } else {
    gst_video_alignment_reset (&priv->valign);
  }

  if (alloc_params.align < max_align) {
    GST_WARNING_OBJECT (pool, "allocation params alignment %u is smaller "
        "than the max specified video stride alignment %u, fixing",
        (guint) alloc_params.align, max_align);

    alloc_params.align = max_align;
    gst_buffer_pool_config_set_allocator (config, allocator, &alloc_params);
    priv->params = alloc_params;
  }

  if (reset) {
    if (glpool->upload)
      gst_object_unref (glpool->upload);

    glpool->upload = gst_gl_upload_meta_new (glpool->context);
  }

  priv->tex_target = 0;
  {
    GstStructure *s = gst_caps_get_structure (caps, 0);
    const gchar *target_str = gst_structure_get_string (s, "texture-target");
    gboolean multiple_texture_targets = FALSE;

    if (target_str)
      priv->tex_target = gst_gl_texture_target_from_string (target_str);

    if (gst_buffer_pool_config_has_option (config,
            GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_2D)) {
      if (priv->tex_target)
        multiple_texture_targets = TRUE;
      priv->tex_target = GST_GL_TEXTURE_TARGET_2D;
    }
    if (gst_buffer_pool_config_has_option (config,
            GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_RECTANGLE)) {
      if (priv->tex_target)
        multiple_texture_targets = TRUE;
      priv->tex_target = GST_GL_TEXTURE_TARGET_RECTANGLE;
    }
    if (gst_buffer_pool_config_has_option (config,
            GST_BUFFER_POOL_OPTION_GL_TEXTURE_TARGET_EXTERNAL_OES)) {
      if (priv->tex_target)
        multiple_texture_targets = TRUE;
      priv->tex_target = GST_GL_TEXTURE_TARGET_EXTERNAL_OES;
    }

    if (!priv->tex_target)
      priv->tex_target = GST_GL_TEXTURE_TARGET_2D;

    if (multiple_texture_targets) {
      GST_WARNING_OBJECT (pool, "Multiple texture targets configured either "
          "through caps or buffer pool options");
      ret = FALSE;
    }
  }

  /* Recalulate the size and offset as we don't add padding between planes. */
  priv->info.size = 0;
  for (p = 0; p < GST_VIDEO_INFO_N_PLANES (&priv->info); p++) {
    priv->info.offset[p] = priv->info.size;
    priv->info.size +=
        gst_gl_get_plane_data_size (&priv->info, &priv->valign, p);
  }

  gst_buffer_pool_config_set_params (config, caps, priv->info.size,
      min_buffers, max_buffers);

  return GST_BUFFER_POOL_CLASS (parent_class)->set_config (pool, config) && ret;

  /* ERRORS */
wrong_config:
  {
    GST_WARNING_OBJECT (pool, "invalid config");
    return FALSE;
  }
no_caps:
  {
    GST_WARNING_OBJECT (pool, "no caps in config");
    return FALSE;
  }
wrong_caps:
  {
    GST_WARNING_OBJECT (pool,
        "failed getting geometry from caps %" GST_PTR_FORMAT, caps);
    return FALSE;
  }
unknown_format:
  {
    GST_WARNING_OBJECT (glpool, "failed to get format from caps %"
        GST_PTR_FORMAT, caps);
    GST_ELEMENT_ERROR (glpool, RESOURCE, WRITE,
        ("Failed to create output image buffer of %dx%d pixels",
            priv->info.width, priv->info.height),
        ("Invalid input caps %" GST_PTR_FORMAT, caps));
    return FALSE;
  }
}
static GstFlowReturn
handle_sequence (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstClockTime latency;
  const mpeg2_sequence_t *sequence;
  GstVideoCodecState *state;
  GstVideoInfo *vinfo;
  GstVideoFormat format;

  sequence = info->sequence;

  if (sequence->frame_period == 0)
    goto invalid_frame_period;

  /* mpeg2 video can only be from 16x16 to 4096x4096. Everything
   * else is a corrupted file */
  if (sequence->width > 4096 || sequence->width < 16 ||
      sequence->height > 4096 || sequence->height < 16)
    goto invalid_size;

  GST_DEBUG_OBJECT (mpeg2dec,
      "widthxheight: %dx%d , decoded_widthxheight: %dx%d",
      sequence->picture_width, sequence->picture_height, sequence->width,
      sequence->height);

  gst_video_alignment_reset (&mpeg2dec->valign);

  if (sequence->picture_width < sequence->width ||
      sequence->picture_height < sequence->height) {
    GST_DEBUG_OBJECT (mpeg2dec, "we need to crop");
    mpeg2dec->valign.padding_right = sequence->width - sequence->picture_width;
    mpeg2dec->valign.padding_bottom =
        sequence->height - sequence->picture_height;
    mpeg2dec->need_alignment = TRUE;
  } else if (sequence->picture_width == sequence->width ||
      sequence->picture_height == sequence->height) {
    GST_DEBUG_OBJECT (mpeg2dec, "no cropping needed");
    mpeg2dec->need_alignment = FALSE;
  } else {
    goto invalid_picture;
  }

  /* get subsampling */
  if (sequence->chroma_width < sequence->width) {
    /* horizontally subsampled */
    if (sequence->chroma_height < sequence->height) {
      /* and vertically subsamples */
      format = GST_VIDEO_FORMAT_I420;
    } else {
      format = GST_VIDEO_FORMAT_Y42B;
    }
  } else {
    /* not subsampled */
    format = GST_VIDEO_FORMAT_Y444;
  }

  state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (mpeg2dec),
      format, sequence->picture_width, sequence->picture_height,
      mpeg2dec->input_state);
  vinfo = &state->info;

  /* If we don't have a valid upstream PAR override it */
  if (GST_VIDEO_INFO_PAR_N (vinfo) == 1 &&
      GST_VIDEO_INFO_PAR_D (vinfo) == 1 &&
      sequence->pixel_width != 0 && sequence->pixel_height != 0) {
#if MPEG2_RELEASE >= MPEG2_VERSION(0,5,0)
    guint pixel_width, pixel_height;
    if (mpeg2_guess_aspect (sequence, &pixel_width, &pixel_height)) {
      vinfo->par_n = pixel_width;
      vinfo->par_d = pixel_height;
    }
#else
    vinfo->par_n = sequence->pixel_width;
    vinfo->par_d = sequence->pixel_height;
#endif
    GST_DEBUG_OBJECT (mpeg2dec, "Setting PAR %d x %d",
        vinfo->par_n, vinfo->par_d);
  }
  vinfo->fps_n = 27000000;
  vinfo->fps_d = sequence->frame_period;

  if (!(sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE))
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED;
  else
    vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

  vinfo->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2;
  vinfo->colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235;

  if (sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION) {
    /* do color description */
    switch (sequence->colour_primaries) {
      case 1:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709;
        break;
      case 4:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M;
        break;
      case 5:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG;
        break;
      case 6:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M;
        break;
      case 7:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN;
        break;
    }
    /* matrix coefficients */
    switch (sequence->matrix_coefficients) {
      case 1:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709;
        break;
      case 4:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_FCC;
        break;
      case 5:
      case 6:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601;
        break;
      case 7:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 8-255 reseved */
      default:
        vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN;
        break;
    }
    /* transfer characteristics */
    switch (sequence->transfer_characteristics) {
      case 1:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 4:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA22;
        break;
      case 5:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA28;
        break;
      case 6:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709;
        break;
      case 7:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M;
        break;
      case 8:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10;
        break;
        /* 0 forbidden */
        /* 2 unspecified */
        /* 3 reserved */
        /* 9-255 reseved */
      default:
        vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN;
        break;
    }
  }

  GST_DEBUG_OBJECT (mpeg2dec,
      "sequence flags: %d, frame period: %d, frame rate: %d/%d",
      sequence->flags, sequence->frame_period, vinfo->fps_n, vinfo->fps_d);
  GST_DEBUG_OBJECT (mpeg2dec, "profile: %02x, colour_primaries: %d",
      sequence->profile_level_id, sequence->colour_primaries);
  GST_DEBUG_OBJECT (mpeg2dec, "transfer chars: %d, matrix coef: %d",
      sequence->transfer_characteristics, sequence->matrix_coefficients);
  GST_DEBUG_OBJECT (mpeg2dec,
      "FLAGS: CONSTRAINED_PARAMETERS:%d, PROGRESSIVE_SEQUENCE:%d",
      sequence->flags & SEQ_FLAG_CONSTRAINED_PARAMETERS,
      sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE);
  GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: LOW_DELAY:%d, COLOUR_DESCRIPTION:%d",
      sequence->flags & SEQ_FLAG_LOW_DELAY,
      sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION);

  /* Save the padded video information */
  mpeg2dec->decoded_info = *vinfo;
  gst_video_info_align (&mpeg2dec->decoded_info, &mpeg2dec->valign);

  /* Mpeg2dec has 2 frame latency to produce a picture and 1 frame latency in
   * it's parser */
  latency = gst_util_uint64_scale (3, vinfo->fps_d, vinfo->fps_n);
  gst_video_decoder_set_latency (GST_VIDEO_DECODER (mpeg2dec), latency,
      latency);

  if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (mpeg2dec)))
    goto negotiation_fail;

  gst_video_codec_state_unref (state);

  mpeg2_custom_fbuf (mpeg2dec->decoder, 1);

  init_dummybuf (mpeg2dec);

  /* Pump in some null buffers, because otherwise libmpeg2 doesn't
   * initialise the discard_fbuf->id */
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL);
  gst_mpeg2dec_clear_buffers (mpeg2dec);

  return ret;

invalid_frame_period:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Frame period is 0!");
    return GST_FLOW_ERROR;
  }
invalid_size:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Invalid frame dimensions: %d x %d",
        sequence->width, sequence->height);
    return GST_FLOW_ERROR;
  }

invalid_picture:
  {
    GST_ERROR_OBJECT (mpeg2dec, "Picture dimension bigger then frame: "
        "%d x %d is bigger then %d x %d", sequence->picture_width,
        sequence->picture_height, sequence->width, sequence->height);
    return GST_FLOW_ERROR;
  }


negotiation_fail:
  {
    GST_WARNING_OBJECT (mpeg2dec, "Failed to negotiate with downstream");
    gst_video_codec_state_unref (state);
    return GST_FLOW_ERROR;
  }
}
Beispiel #13
0
static gboolean
gst_msdkdec_set_src_caps (GstMsdkDec * thiz, gboolean need_allocation)
{
  GstVideoCodecState *output_state;
  GstVideoInfo *vinfo;
  GstVideoAlignment align;
  GstCaps *allocation_caps = NULL;
  GstVideoFormat format;
  guint width, height;
  const gchar *format_str;

  /* use display width and display height in output state which
   * will be using for caps negotiation */
  width =
      thiz->param.mfx.FrameInfo.CropW ? thiz->param.mfx.
      FrameInfo.CropW : GST_VIDEO_INFO_WIDTH (&thiz->input_state->info);
  height =
      thiz->param.mfx.FrameInfo.CropH ? thiz->param.mfx.
      FrameInfo.CropH : GST_VIDEO_INFO_HEIGHT (&thiz->input_state->info);

  format =
      gst_msdk_get_video_format_from_mfx_fourcc (thiz->param.mfx.
      FrameInfo.FourCC);

  if (format == GST_VIDEO_FORMAT_UNKNOWN) {
    GST_WARNING_OBJECT (thiz, "Failed to find a valid video format\n");
    return FALSE;
  }

  output_state =
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (thiz),
      format, width, height, thiz->input_state);
  if (!output_state)
    return FALSE;

  /* Ensure output_state->caps and info has same width and height
   * Also mandate the 32 bit alignment */
  vinfo = &output_state->info;
  gst_msdk_set_video_alignment (vinfo, &align);
  gst_video_info_align (vinfo, &align);
  output_state->caps = gst_video_info_to_caps (vinfo);
  if (srcpad_can_dmabuf (thiz))
    gst_caps_set_features (output_state->caps, 0,
        gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_DMABUF, NULL));
  thiz->output_info = output_state->info;

  if (need_allocation) {
    /* Find allocation width and height */
    width =
        GST_ROUND_UP_16 (thiz->param.mfx.FrameInfo.Width ? thiz->param.mfx.
        FrameInfo.Width : GST_VIDEO_INFO_WIDTH (&output_state->info));
    height =
        GST_ROUND_UP_32 (thiz->param.mfx.FrameInfo.Height ? thiz->param.mfx.
        FrameInfo.Height : GST_VIDEO_INFO_HEIGHT (&output_state->info));

    /* set allocation width and height in allocation_caps
     * which may or may not be similar to the output_state caps */
    allocation_caps = gst_caps_copy (output_state->caps);
    format_str =
        gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (&thiz->output_info));
    gst_caps_set_simple (allocation_caps, "width", G_TYPE_INT, width, "height",
        G_TYPE_INT, height, "format", G_TYPE_STRING, format_str, NULL);
    GST_INFO_OBJECT (thiz, "new alloc caps = %" GST_PTR_FORMAT,
        allocation_caps);
    gst_caps_replace (&thiz->allocation_caps, allocation_caps);
  } else {
    /* We keep the allocation parameters as it is to avoid pool renegotiation.
     * For codecs like VP9, dynamic resolution change doesn't requires allocation
     * reset if the new video frame resolution is lower than the
     * already configured one */
    allocation_caps = gst_caps_copy (thiz->allocation_caps);
  }

  gst_caps_replace (&output_state->allocation_caps, allocation_caps);
  if (allocation_caps)
    gst_caps_unref (allocation_caps);

  gst_video_codec_state_unref (output_state);
  return TRUE;
}
Beispiel #14
0
static GstBufferPool *
gst_msdkdec_create_buffer_pool (GstMsdkDec * thiz, GstVideoInfo * info,
    guint num_buffers)
{
  GstBufferPool *pool = NULL;
  GstStructure *config;
  GstAllocator *allocator = NULL;
  GstVideoAlignment align;
  GstCaps *caps = NULL;
  GstAllocationParams params = { 0, 31, 0, 0, };
  mfxFrameAllocResponse *alloc_resp = NULL;

  g_return_val_if_fail (info, NULL);
  g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (info)
      && GST_VIDEO_INFO_HEIGHT (info), NULL);

  alloc_resp = &thiz->alloc_resp;

  pool = gst_msdk_buffer_pool_new (thiz->context, alloc_resp);
  if (!pool)
    goto error_no_pool;

  if (G_UNLIKELY (!IS_ALIGNED (GST_VIDEO_INFO_WIDTH (info), 16)
          || !IS_ALIGNED (GST_VIDEO_INFO_HEIGHT (info), 32))) {
    gst_msdk_set_video_alignment (info, &align);
    gst_video_info_align (info, &align);
  }

  caps = gst_video_info_to_caps (info);

  /* allocators should use the same width/height/stride/height_alignment of
   * negotiated output caps which is what we configure in msdk_allocator */
  if (thiz->use_dmabuf)
    allocator = gst_msdk_dmabuf_allocator_new (thiz->context, info, alloc_resp);
  else if (thiz->use_video_memory)
    allocator = gst_msdk_video_allocator_new (thiz->context, info, alloc_resp);
  else
    allocator = gst_msdk_system_allocator_new (info);

  if (!allocator)
    goto error_no_allocator;

  config = gst_buffer_pool_get_config (GST_BUFFER_POOL_CAST (pool));
  gst_buffer_pool_config_set_params (config, caps,
      GST_VIDEO_INFO_SIZE (info), num_buffers, 0);
  gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META);
  gst_buffer_pool_config_add_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);

  if (thiz->use_video_memory) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_MSDK_USE_VIDEO_MEMORY);
    if (thiz->use_dmabuf)
      gst_buffer_pool_config_add_option (config,
          GST_BUFFER_POOL_OPTION_MSDK_USE_DMABUF);
  }

  gst_buffer_pool_config_set_video_alignment (config, &align);
  gst_buffer_pool_config_set_allocator (config, allocator, &params);
  gst_object_unref (allocator);

  if (!gst_buffer_pool_set_config (pool, config))
    goto error_pool_config;

  return pool;

error_no_pool:
  {
    GST_INFO_OBJECT (thiz, "failed to create bufferpool");
    return NULL;
  }
error_no_allocator:
  {
    GST_INFO_OBJECT (thiz, "failed to create allocator");
    gst_object_unref (pool);
    return NULL;
  }
error_pool_config:
  {
    GST_INFO_OBJECT (thiz, "failed to set config");
    gst_object_unref (pool);
    gst_object_unref (allocator);
    return NULL;
  }
}