Exemplo n.º 1
0
static GstCaps *
_set_caps_features_with_passthrough (const GstCaps * caps,
    const gchar * feature_name, GstCapsFeatures * passthrough)
{
  guint i, j, m, n;
  GstCaps *tmp;

  tmp = gst_caps_new_empty ();

  n = gst_caps_get_size (caps);
  for (i = 0; i < n; i++) {
    GstCapsFeatures *features, *orig_features;
    GstStructure *s = gst_caps_get_structure (caps, i);

    orig_features = gst_caps_get_features (caps, i);
    features = gst_caps_features_new (feature_name, NULL);

    if (gst_caps_features_is_any (orig_features)) {
      /* if we have any features, we add both the features with and without @passthrough */
      gst_caps_append_structure_full (tmp, gst_structure_copy (s),
          gst_caps_features_copy (features));

      m = gst_caps_features_get_size (passthrough);
      for (j = 0; j < m; j++) {
        const gchar *feature = gst_caps_features_get_nth (passthrough, j);

        /* if we already have the features */
        if (gst_caps_features_contains (features, feature))
          continue;

        gst_caps_features_add (features, feature);
      }
    } else {
      m = gst_caps_features_get_size (orig_features);
      for (j = 0; j < m; j++) {
        const gchar *feature = gst_caps_features_get_nth (orig_features, j);

        /* if we already have the features */
        if (gst_caps_features_contains (features, feature))
          continue;

        if (g_strcmp0 (feature, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY) == 0)
          continue;

        if (gst_caps_features_contains (passthrough, feature)) {
          gst_caps_features_add (features, feature);
        }
      }
    }

    gst_caps_append_structure_full (tmp, gst_structure_copy (s), features);
  }

  return tmp;
}
/**
 * gst_dvbsub_overlay_intersect_by_feature:
 *
 * Creates a new #GstCaps based on the following filtering rule.
 *
 * For each individual caps contained in given caps, if the
 * caps uses the given caps feature, keep a version of the caps
 * with the feature and an another one without. Otherwise, intersect
 * the caps with the given filter.
 *
 * Returns: the new #GstCaps
 */
static GstCaps *
gst_dvbsub_overlay_intersect_by_feature (GstCaps * caps,
    const gchar * feature, GstCaps * filter)
{
  int i, caps_size;
  GstCaps *new_caps;

  new_caps = gst_caps_new_empty ();

  caps_size = gst_caps_get_size (caps);
  for (i = 0; i < caps_size; i++) {
    GstStructure *caps_structure = gst_caps_get_structure (caps, i);
    GstCapsFeatures *caps_features =
        gst_caps_features_copy (gst_caps_get_features (caps, i));
    GstCaps *filtered_caps;
    GstCaps *simple_caps =
        gst_caps_new_full (gst_structure_copy (caps_structure), NULL);
    gst_caps_set_features (simple_caps, 0, caps_features);

    if (gst_caps_features_contains (caps_features, feature)) {
      gst_caps_append (new_caps, gst_caps_copy (simple_caps));

      gst_caps_features_remove (caps_features, feature);
      filtered_caps = gst_caps_ref (simple_caps);
    } else {
      filtered_caps = gst_caps_intersect_full (simple_caps, filter,
          GST_CAPS_INTERSECT_FIRST);
    }

    gst_caps_unref (simple_caps);
    gst_caps_append (new_caps, filtered_caps);
  }

  return new_caps;
}
/* Checks whether the supplied caps contain VA surfaces */
gboolean
gst_caps_has_vaapi_surface (GstCaps * caps)
{
  gboolean found_caps = FALSE;
  guint i, num_structures;

  g_return_val_if_fail (caps != NULL, FALSE);

  num_structures = gst_caps_get_size (caps);
  if (num_structures < 1)
    return FALSE;

  for (i = 0; i < num_structures && !found_caps; i++) {
    GstCapsFeatures *const features = gst_caps_get_features (caps, i);

#if GST_CHECK_VERSION(1,3,0)
    /* Skip ANY features, we need an exact match for correct evaluation */
    if (gst_caps_features_is_any (features))
      continue;
#endif

    found_caps = gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE);
  }
  return found_caps;
}
Exemplo n.º 4
0
/***********************************************************
 * Name: init_textures
 *
 * Arguments:
 *       APP_STATE_T *state - holds OGLES model info
 *
 * Description:   Initialise OGL|ES texture surfaces to use image
 *                buffers
 *
 * Returns: void
 *
 ***********************************************************/
static void
init_textures (APP_STATE_T * state, GstBuffer * buffer)
{
    GstCapsFeatures *feature = gst_caps_get_features (state->caps, 0);

    if (gst_caps_features_contains (feature, "memory:EGLImage")) {
        /* nothing special to do */
        g_print ("Prepare texture for EGLImage\n");
        state->can_avoid_upload = FALSE;
        glGenTextures (1, &state->tex);
        glBindTexture (GL_TEXTURE_2D, state->tex);
    } else if (gst_caps_features_contains (feature, "memory:GLMemory")) {
        g_print ("Prepare texture for GLMemory\n");
        state->can_avoid_upload = TRUE;
        state->tex = 0;
    } else if (gst_caps_features_contains (feature,
                                           "meta:GstVideoGLTextureUploadMeta")) {
        GstVideoMeta *meta = NULL;
        g_print ("Prepare texture for GstVideoGLTextureUploadMeta\n");
        meta = gst_buffer_get_video_meta (buffer);
        state->can_avoid_upload = FALSE;
        glGenTextures (1, &state->tex);
        glBindTexture (GL_TEXTURE_2D, state->tex);
        glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA8, meta->width, meta->height, 0,
                      GL_RGBA, GL_UNSIGNED_BYTE, NULL);
    } else {
        g_assert_not_reached ();
    }

#if 0
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
#else
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
#endif

    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    assert (glGetError () == GL_NO_ERROR);
}
static GstCaps *
_set_caps_features_with_passthrough (const GstCaps * caps,
    const gchar * feature_name, GstCapsFeatures * passthrough)
{
  guint i, j, m, n;
  GstCaps *tmp;

  tmp = gst_caps_copy (caps);

  n = gst_caps_get_size (caps);
  for (i = 0; i < n; i++) {
    GstCapsFeatures *features, *orig_features;

    orig_features = gst_caps_get_features (caps, i);
    features = gst_caps_features_new (feature_name, NULL);

    m = gst_caps_features_get_size (orig_features);
    for (j = 0; j < m; j++) {
      const gchar *feature = gst_caps_features_get_nth (orig_features, j);

      /* if we already have the features */
      if (gst_caps_features_contains (features, feature))
        continue;

      if (g_strcmp0 (feature, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY) == 0)
        continue;

      if (passthrough && gst_caps_features_contains (passthrough, feature)) {
        gst_caps_features_add (features, feature);
      }
    }

    gst_caps_set_features (tmp, i, features);
  }

  return tmp;
}
Exemplo n.º 6
0
static gboolean
_gst_caps_has_feature (const GstCaps * caps, const gchar * feature)
{
  guint i;

  for (i = 0; i < gst_caps_get_size (caps); i++) {
    GstCapsFeatures *const features = gst_caps_get_features (caps, i);
    /* Skip ANY features, we need an exact match for correct evaluation */
    if (gst_caps_features_is_any (features))
      continue;
    if (gst_caps_features_contains (features, feature))
      return TRUE;
  }
  return FALSE;
}
Exemplo n.º 7
0
static gboolean
gst_vtdec_negotiate (GstVideoDecoder * decoder)
{
  GstVideoCodecState *output_state = NULL;
  GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL;
  GstVideoFormat format;
  GstStructure *structure;
  const gchar *s;
  GstVtdec *vtdec;
  OSStatus err = noErr;
  GstCapsFeatures *features = NULL;
  gboolean output_textures;

  vtdec = GST_VTDEC (decoder);
  if (vtdec->session)
    gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE);

  output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec));
  if (output_state) {
    prevcaps = gst_caps_ref (output_state->caps);
    gst_video_codec_state_unref (output_state);
  }

  peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL);
  if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) {
    /* The hardware decoder can become (temporarily) unavailable across
     * VTDecompressionSessionCreate/Destroy calls. So if the currently configured
     * caps are still accepted by downstream we keep them so we don't have to
     * destroy and recreate the session.
     */
    GST_INFO_OBJECT (vtdec,
        "current and peer caps are compatible, keeping current caps");
    caps = gst_caps_ref (prevcaps);
  } else {
    templcaps =
        gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder));
    caps =
        gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST);
    gst_caps_unref (templcaps);
  }
  gst_caps_unref (peercaps);

  caps = gst_caps_truncate (gst_caps_make_writable (caps));
  structure = gst_caps_get_structure (caps, 0);
  s = gst_structure_get_string (structure, "format");
  format = gst_video_format_from_string (s);
  features = gst_caps_get_features (caps, 0);
  if (features)
    features = gst_caps_features_copy (features);

  output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec),
      format, vtdec->video_info.width, vtdec->video_info.height,
      vtdec->input_state);
  output_state->caps = gst_video_info_to_caps (&output_state->info);
  if (features) {
    gst_caps_set_features (output_state->caps, 0, features);
    output_textures =
        gst_caps_features_contains (features,
        GST_CAPS_FEATURE_MEMORY_GL_MEMORY);
    if (output_textures)
      gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING,
#if !HAVE_IOS
          GST_GL_TEXTURE_TARGET_RECTANGLE_STR,
#else
          GST_GL_TEXTURE_TARGET_2D_STR,
#endif
          NULL);
  }
  gst_caps_unref (caps);

  if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) {
    gboolean renegotiating = vtdec->session != NULL;

    GST_INFO_OBJECT (vtdec,
        "negotiated output format %" GST_PTR_FORMAT " previous %"
        GST_PTR_FORMAT, output_state->caps, prevcaps);

    if (vtdec->session)
      gst_vtdec_invalidate_session (vtdec);

    err = gst_vtdec_create_session (vtdec, format, TRUE);
    if (err == noErr) {
      GST_INFO_OBJECT (vtdec, "using hardware decoder");
    } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) {
      GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore");
      err = gst_vtdec_create_session (vtdec, format, FALSE);
    }

    if (err != noErr) {
      GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL),
          ("VTDecompressionSessionCreate returned %d", (int) err));
    }
  }

  if (vtdec->texture_cache != NULL && !output_textures) {
    gst_video_texture_cache_free (vtdec->texture_cache);
    vtdec->texture_cache = NULL;
  }

  if (err == noErr && output_textures) {
    /* call this regardless of whether caps have changed or not since a new
     * local context could have become available
     */
    gst_gl_context_helper_ensure_context (vtdec->ctxh);

    GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p",
        vtdec->ctxh->context,
        vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL);

    if (vtdec->texture_cache
        && vtdec->texture_cache->ctx != vtdec->ctxh->context) {
      gst_video_texture_cache_free (vtdec->texture_cache);
      vtdec->texture_cache = NULL;
    }
    if (!vtdec->texture_cache)
      setup_texture_cache (vtdec, vtdec->ctxh->context);
  }

  if (prevcaps)
    gst_caps_unref (prevcaps);

  if (err != noErr)
    return FALSE;

  return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder);
}
/* only negotiate/query video overlay composition support for now */
static gboolean
gst_dvbsub_overlay_negotiate (GstDVBSubOverlay * overlay, GstCaps * caps)
{
  gboolean ret;
  gboolean attach = FALSE;
  gboolean caps_has_meta = TRUE;
  GstCapsFeatures *f;

  GST_DEBUG_OBJECT (overlay, "performing negotiation");

  if (!caps) {
    caps = gst_pad_get_current_caps (overlay->srcpad);
  } else {
    gst_caps_ref (caps);
  }

  if (!caps || gst_caps_is_empty (caps))
    goto no_format;

  /* Try to use the overlay meta if possible */
  f = gst_caps_get_features (caps, 0);

  /* if the caps doesn't have the overlay meta, we query if downstream
   * accepts it before trying the version without the meta
   * If upstream already is using the meta then we can only use it */
  if (!f
      || !gst_caps_features_contains (f,
          GST_CAPS_FEATURE_META_GST_VIDEO_OVERLAY_COMPOSITION)) {
    GstCaps *overlay_caps;
    GstCaps *peercaps;

    /* In this case we added the meta, but we can work without it
     * so preserve the original caps so we can use it as a fallback */
    overlay_caps = gst_caps_copy (caps);

    f = gst_caps_get_features (overlay_caps, 0);
    gst_caps_features_add (f,
        GST_CAPS_FEATURE_META_GST_VIDEO_OVERLAY_COMPOSITION);

    /* FIXME: We should probably check if downstream *prefers* the
     * overlay meta, and only enforce usage of it if we can't handle
     * the format ourselves and thus would have to drop the overlays.
     * Otherwise we should prefer what downstream wants here.
     */
    peercaps = gst_pad_peer_query_caps (overlay->srcpad, NULL);
    caps_has_meta = gst_caps_can_intersect (peercaps, overlay_caps);
    gst_caps_unref (peercaps);

    GST_DEBUG_OBJECT (overlay, "Downstream accepts the overlay meta: %d",
        caps_has_meta);
    if (caps_has_meta) {
      gst_caps_unref (caps);
      caps = overlay_caps;

    } else {
      /* fallback to the original */
      gst_caps_unref (overlay_caps);
      caps_has_meta = FALSE;
    }

  }
  GST_DEBUG_OBJECT (overlay, "Using caps %" GST_PTR_FORMAT, caps);
  ret = gst_pad_set_caps (overlay->srcpad, caps);

  if (ret) {
    GstQuery *query;

    /* find supported meta */
    query = gst_query_new_allocation (caps, FALSE);

    if (!gst_pad_peer_query (overlay->srcpad, query)) {
      /* no problem, we use the query defaults */
      GST_DEBUG_OBJECT (overlay, "ALLOCATION query failed");
    }

    if (caps_has_meta && gst_query_find_allocation_meta (query,
            GST_VIDEO_OVERLAY_COMPOSITION_META_API_TYPE, NULL))
      attach = TRUE;

    overlay->attach_compo_to_buffer = attach;

    gst_query_unref (query);
  }
  gst_caps_unref (caps);

  return ret;

no_format:
  {
    if (caps)
      gst_caps_unref (caps);
    return FALSE;
  }
}
/* Based on gstbasetextoverlay.c */
static gboolean
gst_overlay_composition_negotiate (GstOverlayComposition * self, GstCaps * caps)
{
  gboolean upstream_has_meta = FALSE;
  gboolean caps_has_meta = FALSE;
  gboolean alloc_has_meta = FALSE;
  gboolean attach = FALSE;
  gboolean ret = TRUE;
  guint width, height;
  GstCapsFeatures *f;
  GstCaps *overlay_caps;
  GstQuery *query;
  guint alloc_index;

  GST_DEBUG_OBJECT (self, "performing negotiation");

  /* Clear any pending reconfigure to avoid negotiating twice */
  gst_pad_check_reconfigure (self->srcpad);

  self->window_width = self->window_height = 0;

  if (!caps)
    caps = gst_pad_get_current_caps (self->sinkpad);
  else
    gst_caps_ref (caps);

  if (!caps || gst_caps_is_empty (caps))
    goto no_format;

  /* Check if upstream caps have meta */
  if ((f = gst_caps_get_features (caps, 0))) {
    upstream_has_meta = gst_caps_features_contains (f,
        GST_CAPS_FEATURE_META_GST_VIDEO_OVERLAY_COMPOSITION);
  }

  /* Initialize dimensions */
  width = self->info.width;
  height = self->info.height;

  if (upstream_has_meta) {
    overlay_caps = gst_caps_ref (caps);
  } else {
    GstCaps *peercaps;

    /* BaseTransform requires caps for the allocation query to work */
    overlay_caps = gst_caps_copy (caps);
    f = gst_caps_get_features (overlay_caps, 0);
    gst_caps_features_add (f,
        GST_CAPS_FEATURE_META_GST_VIDEO_OVERLAY_COMPOSITION);

    /* Then check if downstream accept overlay composition in caps */
    /* FIXME: We should probably check if downstream *prefers* the
     * overlay meta, and only enforce usage of it if we can't handle
     * the format ourselves and thus would have to drop the overlays.
     * Otherwise we should prefer what downstream wants here.
     */
    peercaps = gst_pad_peer_query_caps (self->srcpad, overlay_caps);
    caps_has_meta = !gst_caps_is_empty (peercaps);
    gst_caps_unref (peercaps);

    GST_DEBUG_OBJECT (self, "caps have overlay meta %d", caps_has_meta);
  }

  if (upstream_has_meta || caps_has_meta) {
    /* Send caps immediatly, it's needed by GstBaseTransform to get a reply
     * from allocation query */
    ret = gst_pad_set_caps (self->srcpad, overlay_caps);

    /* First check if the allocation meta has compositon */
    query = gst_query_new_allocation (overlay_caps, FALSE);

    if (!gst_pad_peer_query (self->srcpad, query)) {
      /* no problem, we use the query defaults */
      GST_DEBUG_OBJECT (self, "ALLOCATION query failed");

      /* In case we were flushing, mark reconfigure and fail this method,
       * will make it retry */
      if (GST_PAD_IS_FLUSHING (self->srcpad))
        ret = FALSE;
    }

    alloc_has_meta = gst_query_find_allocation_meta (query,
        GST_VIDEO_OVERLAY_COMPOSITION_META_API_TYPE, &alloc_index);

    GST_DEBUG_OBJECT (self, "sink alloc has overlay meta %d", alloc_has_meta);

    if (alloc_has_meta) {
      const GstStructure *params;

      gst_query_parse_nth_allocation_meta (query, alloc_index, &params);
      if (params) {
        if (gst_structure_get (params, "width", G_TYPE_UINT, &width,
                "height", G_TYPE_UINT, &height, NULL)) {
          GST_DEBUG_OBJECT (self, "received window size: %dx%d", width, height);
          g_assert (width != 0 && height != 0);
        }
      }
    }

    gst_query_unref (query);
  }

  /* Update render size if needed */
  self->window_width = width;
  self->window_height = height;

  /* For backward compatbility, we will prefer bliting if downstream
   * allocation does not support the meta. In other case we will prefer
   * attaching, and will fail the negotiation in the unlikely case we are
   * force to blit, but format isn't supported. */

  if (upstream_has_meta) {
    attach = TRUE;
  } else if (caps_has_meta) {
    if (alloc_has_meta) {
      attach = TRUE;
    } else {
      /* Don't attach unless we cannot handle the format */
      attach = !can_blend_caps (caps);
    }
  } else {
    ret = can_blend_caps (caps);
  }

  /* If we attach, then pick the overlay caps */
  if (attach) {
    GST_DEBUG_OBJECT (self, "Using caps %" GST_PTR_FORMAT, overlay_caps);
    /* Caps where already sent */
  } else if (ret) {
    GST_DEBUG_OBJECT (self, "Using caps %" GST_PTR_FORMAT, caps);
    ret = gst_pad_set_caps (self->srcpad, caps);
  }

  self->attach_compo_to_buffer = attach;

  if (!ret) {
    GST_DEBUG_OBJECT (self, "negotiation failed, schedule reconfigure");
    gst_pad_mark_reconfigure (self->srcpad);
  }

  g_signal_emit (self, overlay_composition_signals[SIGNAL_CAPS_CHANGED], 0,
      caps, self->window_width, self->window_height, NULL);

  gst_caps_unref (overlay_caps);
  gst_caps_unref (caps);

  return ret;

no_format:
  {
    if (caps)
      gst_caps_unref (caps);
    gst_pad_mark_reconfigure (self->srcpad);
    return FALSE;
  }
}
Exemplo n.º 10
0
static gboolean
gst_vtdec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  gboolean ret;
  GstCaps *caps;
  GstCapsFeatures *features;
  GstVtdec *vtdec = GST_VTDEC (decoder);

  ret =
      GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->decide_allocation
      (decoder, query);
  if (!ret)
    goto out;

  gst_query_parse_allocation (query, &caps, NULL);
  if (caps) {
    GstGLContext *gl_context = NULL;
    features = gst_caps_get_features (caps, 0);

    if (gst_caps_features_contains (features,
            GST_CAPS_FEATURE_MEMORY_GL_MEMORY)) {
      GstContext *context = NULL;
      GstQuery *query = gst_query_new_context ("gst.gl.local_context");
      if (gst_pad_peer_query (GST_VIDEO_DECODER_SRC_PAD (decoder), query)) {

        gst_query_parse_context (query, &context);
        if (context) {
          const GstStructure *s = gst_context_get_structure (context);
          gst_structure_get (s, "context", GST_GL_TYPE_CONTEXT, &gl_context,
              NULL);
        }
      }
      gst_query_unref (query);

      if (context) {
        GstVideoFormat internal_format;
        GstVideoCodecState *output_state =
            gst_video_decoder_get_output_state (decoder);

        GST_INFO_OBJECT (decoder, "pushing textures. GL context %p", context);
        if (vtdec->texture_cache)
          gst_core_video_texture_cache_free (vtdec->texture_cache);

#ifdef HAVE_IOS
        internal_format = GST_VIDEO_FORMAT_NV12;
#else
        internal_format = GST_VIDEO_FORMAT_UYVY;
#endif
        vtdec->texture_cache = gst_core_video_texture_cache_new (gl_context);
        gst_core_video_texture_cache_set_format (vtdec->texture_cache,
            internal_format, output_state->caps);
        gst_video_codec_state_unref (output_state);
        gst_object_unref (gl_context);
      } else {
        GST_WARNING_OBJECT (decoder,
            "got memory:GLMemory caps but not GL context from downstream element");
      }
    }
  }

out:
  return ret;
}
Exemplo n.º 11
0
GstVaapiCapsFeature
gst_vaapi_find_preferred_caps_feature (GstPad * pad, GstCaps * allowed_caps,
    GstVideoFormat * out_format_ptr)
{
  GstVaapiCapsFeature feature = GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED;
  guint i, j, num_structures;
  GstCaps *peer_caps, *out_caps = NULL, *caps = NULL;
  static const guint feature_list[] = { GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE,
    GST_VAAPI_CAPS_FEATURE_DMABUF,
    GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META,
    GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY,
  };

  /* query with no filter */
  peer_caps = gst_pad_peer_query_caps (pad, NULL);
  if (!peer_caps)
    goto cleanup;
  if (gst_caps_is_empty (peer_caps))
    goto cleanup;

  /* filter against our allowed caps */
  out_caps = gst_caps_intersect_full (allowed_caps, peer_caps,
      GST_CAPS_INTERSECT_FIRST);

  /* default feature */
  feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY;

  /* if downstream requests caps ANY, system memory is preferred */
  if (gst_caps_is_any (peer_caps))
    goto find_format;

  num_structures = gst_caps_get_size (out_caps);
  for (i = 0; i < num_structures; i++) {
    GstCapsFeatures *const features = gst_caps_get_features (out_caps, i);
    GstStructure *const structure = gst_caps_get_structure (out_caps, i);

    /* Skip ANY features, we need an exact match for correct evaluation */
    if (gst_caps_features_is_any (features))
      continue;

    gst_caps_replace (&caps, NULL);
    caps = gst_caps_new_full (gst_structure_copy (structure), NULL);
    if (!caps)
      continue;
    gst_caps_set_features (caps, 0, gst_caps_features_copy (features));

    for (j = 0; j < G_N_ELEMENTS (feature_list); j++) {
      if (gst_vaapi_caps_feature_contains (caps, feature_list[j])
          && feature < feature_list[j]) {
        feature = feature_list[j];
        break;
      }
    }

    /* Stop at the first match, the caps should already be sorted out
       by preference order from downstream elements */
    if (feature != GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY)
      break;
  }

  if (!caps)
    goto cleanup;

find_format:
  if (out_format_ptr) {
    GstVideoFormat out_format;
    GstStructure *structure = NULL;
    const GValue *format_list;
    GstCapsFeatures *features;

    /* if the best feature is SystemMemory, we should choose the
     * vidoe/x-raw caps in the filtered peer caps set. If not, use
     * the first caps, which is the preferred by downstream. */
    if (feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) {
      gst_caps_replace (&caps, out_caps);
      num_structures = gst_caps_get_size (caps);
      for (i = 0; i < num_structures; i++) {
        structure = gst_caps_get_structure (caps, i);
        features = gst_caps_get_features (caps, i);
        if (!gst_caps_features_is_any (features)
            && gst_caps_features_contains (features,
                gst_vaapi_caps_feature_to_string
                (GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY)))
          break;
      }
    } else {
      structure = gst_caps_get_structure (caps, 0);
    }
    if (!structure)
      goto cleanup;
    format_list = gst_structure_get_value (structure, "format");
    if (!format_list)
      goto cleanup;
    out_format = gst_vaapi_find_preferred_format (format_list, *out_format_ptr);
    if (out_format == GST_VIDEO_FORMAT_UNKNOWN)
      goto cleanup;

    *out_format_ptr = out_format;
  }

cleanup:
  gst_caps_replace (&caps, NULL);
  gst_caps_replace (&out_caps, NULL);
  gst_caps_replace (&peer_caps, NULL);
  return feature;
}