static gboolean gst_gl_view_convert_element_set_caps (GstGLFilter * filter, GstCaps * incaps, GstCaps * outcaps) { GstGLViewConvertElement *viewconvert_filter = GST_GL_VIEW_CONVERT_ELEMENT (filter); GstCapsFeatures *gl_features; gboolean ret; GST_DEBUG_OBJECT (filter, "incaps %" GST_PTR_FORMAT " outcaps %" GST_PTR_FORMAT, incaps, outcaps); /* The view_convert component needs RGBA caps */ incaps = gst_caps_copy (incaps); outcaps = gst_caps_copy (outcaps); gst_caps_set_simple (incaps, "format", G_TYPE_STRING, "RGBA", NULL); gl_features = gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY); gst_caps_set_features (incaps, 0, gl_features); gst_caps_set_simple (outcaps, "format", G_TYPE_STRING, "RGBA", NULL); gl_features = gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY); gst_caps_set_features (outcaps, 0, gl_features); ret = gst_gl_view_convert_set_caps (viewconvert_filter->viewconvert, incaps, outcaps); gst_caps_unref (incaps); gst_caps_unref (outcaps); return ret; }
/** * gst_dvbsub_overlay_intersect_by_feature: * * Creates a new #GstCaps based on the following filtering rule. * * For each individual caps contained in given caps, if the * caps uses the given caps feature, keep a version of the caps * with the feature and an another one without. Otherwise, intersect * the caps with the given filter. * * Returns: the new #GstCaps */ static GstCaps * gst_dvbsub_overlay_intersect_by_feature (GstCaps * caps, const gchar * feature, GstCaps * filter) { int i, caps_size; GstCaps *new_caps; new_caps = gst_caps_new_empty (); caps_size = gst_caps_get_size (caps); for (i = 0; i < caps_size; i++) { GstStructure *caps_structure = gst_caps_get_structure (caps, i); GstCapsFeatures *caps_features = gst_caps_features_copy (gst_caps_get_features (caps, i)); GstCaps *filtered_caps; GstCaps *simple_caps = gst_caps_new_full (gst_structure_copy (caps_structure), NULL); gst_caps_set_features (simple_caps, 0, caps_features); if (gst_caps_features_contains (caps_features, feature)) { gst_caps_append (new_caps, gst_caps_copy (simple_caps)); gst_caps_features_remove (caps_features, feature); filtered_caps = gst_caps_ref (simple_caps); } else { filtered_caps = gst_caps_intersect_full (simple_caps, filter, GST_CAPS_INTERSECT_FIRST); } gst_caps_unref (simple_caps); gst_caps_append (new_caps, filtered_caps); } return new_caps; }
/* Called after videoaggregator fixates our caps */ static gboolean _negotiated_caps (GstAggregator * agg, GstCaps * caps) { GstVideoAggregator *vagg = GST_VIDEO_AGGREGATOR (agg); GstGLStereoMix *mix = GST_GL_STEREO_MIX (vagg); GstCaps *in_caps; GST_LOG_OBJECT (mix, "Configured output caps %" GST_PTR_FORMAT, caps); if (GST_AGGREGATOR_CLASS (parent_class)->negotiated_src_caps) if (!GST_AGGREGATOR_CLASS (parent_class)->negotiated_src_caps (agg, caps)) return FALSE; /* Update the glview_convert output */ /* We can configure the view_converter now */ gst_gl_view_convert_set_context (mix->viewconvert, GST_GL_BASE_MIXER (mix)->context); in_caps = gst_video_info_to_caps (&mix->mix_info); gst_caps_set_features (in_caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY)); gst_caps_set_simple (in_caps, "texture-target", G_TYPE_STRING, GST_GL_TEXTURE_TARGET_2D_STR, NULL); gst_gl_view_convert_set_caps (mix->viewconvert, in_caps, caps); return TRUE; }
static gboolean check_bin (KmsTreeBin * tree_bin, const GstCaps * caps) { gboolean ret = FALSE; GstElement *output_tee = kms_tree_bin_get_output_tee (tree_bin); GstPad *tee_sink = gst_element_get_static_pad (output_tee, "sink"); GstCaps *current_caps = kms_tree_bin_get_input_caps (tree_bin); if (current_caps == NULL) { current_caps = gst_pad_get_allowed_caps (tee_sink); GST_TRACE_OBJECT (tree_bin, "Allowed caps are: %" GST_PTR_FORMAT, current_caps); } else { GST_TRACE_OBJECT (tree_bin, "Current caps are: %" GST_PTR_FORMAT, current_caps); } if (current_caps != NULL) { //TODO: Remove this when problem in negotiation with features will be //resolved GstCaps *caps_without_features = gst_caps_make_writable (current_caps); gst_caps_set_features (caps_without_features, 0, gst_caps_features_new_empty ()); if (gst_caps_can_intersect (caps, caps_without_features)) { ret = TRUE; } gst_caps_unref (caps_without_features); } g_object_unref (tee_sink); return ret; }
static GstCaps * gst_msdkvpp_fixate_caps (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps, GstCaps * othercaps) { GstMsdkVPP *thiz = GST_MSDKVPP (trans); GstCaps *result = NULL; gboolean *use_dmabuf; if (direction == GST_PAD_SRC) { result = gst_caps_fixate (result); use_dmabuf = &thiz->use_sinkpad_dmabuf; } else { result = gst_msdkvpp_fixate_srccaps (thiz, caps, othercaps); use_dmabuf = &thiz->use_srcpad_dmabuf; } GST_DEBUG_OBJECT (trans, "fixated to %" GST_PTR_FORMAT, result); gst_caps_unref (othercaps); if (pad_can_dmabuf (thiz, direction == GST_PAD_SRC ? GST_PAD_SINK : GST_PAD_SRC, result)) { gst_caps_set_features (result, 0, gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_DMABUF, NULL)); *use_dmabuf = TRUE; } return result; }
static void _init_upload (GstGLStereoSplit * split) { GstGLContext *context = split->context; if (!split->upload) { GstCaps *in_caps = gst_pad_get_current_caps (GST_PAD (split->sink_pad)); GstCaps *split_caps = gst_pad_get_current_caps (split->left_pad); GstCaps *upload_caps = gst_caps_copy (in_caps); GstCapsFeatures *gl_features = gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY); GstCaps *gl_caps; split->upload = gst_gl_upload_new (context); gst_caps_set_features (upload_caps, 0, gst_caps_features_copy (gl_features)); gst_gl_upload_set_caps (split->upload, in_caps, upload_caps); gst_caps_unref (in_caps); gl_caps = gst_caps_copy (upload_caps); gst_caps_set_simple (gl_caps, "format", G_TYPE_STRING, "RGBA", NULL); gst_caps_set_features (gl_caps, 0, gst_caps_features_copy (gl_features)); if (!split->convert) { split->convert = gst_gl_color_convert_new (context); gst_gl_color_convert_set_caps (split->convert, upload_caps, gl_caps); } gst_caps_unref (upload_caps); gst_caps_features_free (gl_features); gst_gl_view_convert_set_context (split->viewconvert, split->context); split_caps = gst_caps_make_writable (split_caps); gst_caps_set_simple (split_caps, "multiview-mode", G_TYPE_STRING, "separated", "views", G_TYPE_INT, 2, NULL); gst_gl_view_convert_set_caps (split->viewconvert, gl_caps, split_caps); gst_caps_unref (split_caps); gst_caps_unref (gl_caps); } }
GstCaps * _owr_payload_create_raw_caps(OwrPayload *payload) { OwrPayloadPrivate *priv; OwrMediaType media_type; GstCaps *caps = NULL; guint channels = 0; guint width = 0, height = 0; gdouble framerate = 0.0; gint fps_n = 0, fps_d = 1; g_return_val_if_fail(payload, NULL); priv = payload->priv; g_object_get(payload, "media-type", &media_type, NULL); switch (media_type) { case OWR_MEDIA_TYPE_AUDIO: if (OWR_IS_AUDIO_PAYLOAD(payload)) g_object_get(OWR_AUDIO_PAYLOAD(payload), "channels", &channels, NULL); caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, priv->clock_rate, NULL); if (channels > 0) { gst_caps_set_simple(caps, "channels", G_TYPE_INT, channels, NULL); } break; case OWR_MEDIA_TYPE_VIDEO: if (OWR_IS_VIDEO_PAYLOAD(payload)) { g_object_get(OWR_VIDEO_PAYLOAD(payload), "width", &width, "height", &height, "framerate", &framerate, NULL); } caps = gst_caps_new_empty_simple(_owr_codec_type_to_caps_mime(media_type, priv->codec_type)); #ifdef __APPLE__ if (priv->codec_type == OWR_CODEC_TYPE_H264) gst_caps_set_features(caps, 0, gst_caps_features_new_any()); #endif gst_caps_set_simple(caps, "width", G_TYPE_INT, width > 0 ? width : LIMITED_WIDTH, NULL); gst_caps_set_simple(caps, "height", G_TYPE_INT, height > 0 ? height : LIMITED_HEIGHT, NULL); framerate = framerate > 0.0 ? framerate : LIMITED_FRAMERATE; gst_util_double_to_fraction(framerate, &fps_n, &fps_d); gst_caps_set_simple(caps, "framerate", GST_TYPE_FRACTION, fps_n, fps_d, NULL); break; default: g_return_val_if_reached(NULL); } return caps; }
GstCaps * gst_gl_caps_replace_all_caps_features (const GstCaps * caps, const gchar * feature_name) { GstCaps *tmp = gst_caps_copy (caps); guint n = gst_caps_get_size (tmp); guint i = 0; for (i = 0; i < n; i++) { gst_caps_set_features (tmp, i, gst_caps_features_from_string (feature_name)); } return tmp; }
GstCaps * gst_vulkan_swapper_get_supported_caps (GstVulkanSwapper * swapper, GError ** error) { GstStructure *s; GstCaps *caps; g_return_val_if_fail (GST_IS_VULKAN_SWAPPER (swapper), NULL); if (!_vulkan_swapper_retrieve_surface_properties (swapper, error)) return NULL; caps = gst_caps_new_empty_simple ("video/x-raw"); gst_caps_set_features (caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_VULKAN_BUFFER)); s = gst_caps_get_structure (caps, 0); { int i; GValue list = G_VALUE_INIT; g_value_init (&list, GST_TYPE_LIST); if (swapper->n_surf_formats && swapper->surf_formats[0].format == VK_FORMAT_UNDEFINED) { _add_vk_format_to_list (&list, VK_FORMAT_B8G8R8A8_UNORM); } else { for (i = 0; i < swapper->n_surf_formats; i++) { _add_vk_format_to_list (&list, swapper->surf_formats[i].format); } } gst_structure_set_value (s, "format", &list); g_value_unset (&list); } { guint32 max_dim = swapper->device->gpu_props.limits.maxImageDimension2D; gst_structure_set (s, "width", GST_TYPE_INT_RANGE, 1, (gint) max_dim, "height", GST_TYPE_INT_RANGE, 1, (gint) max_dim, "pixel-aspect-ratio", GST_TYPE_FRACTION, 1, 1, "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL); } GST_INFO_OBJECT (swapper, "Probed the following caps %" GST_PTR_FORMAT, caps); return caps; }
static GstCaps * _set_caps_features (const GstCaps * caps, const gchar * feature_name) { GstCaps *tmp = gst_caps_copy (caps); guint n = gst_caps_get_size (tmp); guint i = 0; for (i = 0; i < n; i++) { GstCapsFeatures *features; features = gst_caps_features_new (feature_name, NULL); gst_caps_set_features (tmp, i, features); } return tmp; }
static gboolean gst_vtdec_negotiate_output_format (GstVtdec * vtdec, GstVideoCodecState * input_state) { GstCaps *caps = NULL, *peercaps = NULL, *templcaps; GstVideoFormat output_format; GstVideoCodecState *output_state = NULL; GstCapsFeatures *features; GstStructure *structure; const gchar *s; peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL); /* Check if output supports GL caps by preference */ templcaps = gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec)); caps = gst_caps_intersect_full (templcaps, peercaps, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (peercaps); gst_caps_unref (templcaps); caps = gst_caps_truncate (caps); structure = gst_caps_get_structure (caps, 0); s = gst_structure_get_string (structure, "format"); output_format = gst_video_format_from_string (s); features = gst_caps_features_copy (gst_caps_get_features (caps, 0)); gst_caps_unref (caps); if (!gst_vtdec_create_session (vtdec, output_format)) { gst_caps_features_free (features); return FALSE; } output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec), output_format, vtdec->video_info.width, vtdec->video_info.height, input_state); output_state->caps = gst_video_info_to_caps (&output_state->info); gst_caps_set_features (output_state->caps, 0, features); return TRUE; }
GstCaps * gst_vaapi_video_format_new_template_caps_with_features (GstVideoFormat format, const gchar * features_string) { GstCaps *caps; caps = gst_vaapi_video_format_new_template_caps (format); if (!caps) return NULL; GstCapsFeatures *const features = gst_caps_features_new (features_string, NULL); if (!features) { gst_caps_unref (caps); return NULL; } gst_caps_set_features (caps, 0, features); return caps; }
static GstCaps * _set_caps_features_with_passthrough (const GstCaps * caps, const gchar * feature_name, GstCapsFeatures * passthrough) { guint i, j, m, n; GstCaps *tmp; tmp = gst_caps_copy (caps); n = gst_caps_get_size (caps); for (i = 0; i < n; i++) { GstCapsFeatures *features, *orig_features; orig_features = gst_caps_get_features (caps, i); features = gst_caps_features_new (feature_name, NULL); m = gst_caps_features_get_size (orig_features); for (j = 0; j < m; j++) { const gchar *feature = gst_caps_features_get_nth (orig_features, j); /* if we already have the features */ if (gst_caps_features_contains (features, feature)) continue; if (g_strcmp0 (feature, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY) == 0) continue; if (passthrough && gst_caps_features_contains (passthrough, feature)) { gst_caps_features_add (features, feature); } } gst_caps_set_features (tmp, i, features); } return tmp; }
static gboolean pad_can_dmabuf (GstMsdkVPP * thiz, GstPadDirection direction, GstCaps * filter) { gboolean ret = FALSE; GstCaps *caps, *out_caps; GstPad *pad; GstBaseTransform *trans = GST_BASE_TRANSFORM (thiz); if (direction == GST_PAD_SRC) pad = GST_BASE_TRANSFORM_SRC_PAD (trans); else pad = GST_BASE_TRANSFORM_SINK_PAD (trans); /* make a copy of filter caps since we need to alter the structure * by adding dmabuf-capsfeatures */ caps = gst_caps_copy (filter); gst_caps_set_features (caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_DMABUF)); out_caps = gst_pad_peer_query_caps (pad, caps); if (!out_caps) goto done; if (gst_caps_is_any (out_caps) || gst_caps_is_empty (out_caps) || out_caps == caps) goto done; if (_gst_caps_has_feature (out_caps, GST_CAPS_FEATURE_MEMORY_DMABUF)) ret = TRUE; done: if (caps) gst_caps_unref (caps); if (out_caps) gst_caps_unref (out_caps); return ret; }
/* Return the possible output caps based on inputs and downstream prefs */ static GstCaps * _update_caps (GstVideoAggregator * vagg, GstCaps * caps) { GstGLStereoMix *mix = GST_GL_STEREO_MIX (vagg); GList *l; gint best_width = -1, best_height = -1; gdouble best_fps = -1, cur_fps; gint best_fps_n = 0, best_fps_d = 1; GstVideoInfo *mix_info; GstCaps *blend_caps, *tmp_caps; GstCaps *out_caps; GST_OBJECT_LOCK (vagg); for (l = GST_ELEMENT (vagg)->sinkpads; l; l = l->next) { GstVideoAggregatorPad *pad = l->data; GstVideoInfo tmp = pad->info; gint this_width, this_height; gint fps_n, fps_d; if (!pad->info.finfo) continue; /* This can happen if we release a pad and another pad hasn't been negotiated_caps yet */ if (GST_VIDEO_INFO_FORMAT (&pad->info) == GST_VIDEO_FORMAT_UNKNOWN) continue; /* Convert to per-view width/height for unpacked forms */ gst_video_multiview_video_info_change_mode (&tmp, GST_VIDEO_MULTIVIEW_MODE_SEPARATED, GST_VIDEO_MULTIVIEW_FLAGS_NONE); this_width = GST_VIDEO_INFO_WIDTH (&tmp); this_height = GST_VIDEO_INFO_HEIGHT (&tmp); fps_n = GST_VIDEO_INFO_FPS_N (&tmp); fps_d = GST_VIDEO_INFO_FPS_D (&tmp); GST_INFO_OBJECT (vagg, "Input pad %" GST_PTR_FORMAT " w %u h %u", pad, this_width, this_height); if (this_width == 0 || this_height == 0) continue; if (best_width < this_width) best_width = this_width; if (best_height < this_height) best_height = this_height; if (fps_d == 0) cur_fps = 0.0; else gst_util_fraction_to_double (fps_n, fps_d, &cur_fps); if (best_fps < cur_fps) { best_fps = cur_fps; best_fps_n = fps_n; best_fps_d = fps_d; } /* FIXME: Preserve PAR for at least one input when different sized inputs */ } GST_OBJECT_UNLOCK (vagg); mix_info = &mix->mix_info; gst_video_info_set_format (mix_info, GST_VIDEO_FORMAT_RGBA, best_width, best_height); GST_VIDEO_INFO_FPS_N (mix_info) = best_fps_n; GST_VIDEO_INFO_FPS_D (mix_info) = best_fps_d; GST_VIDEO_INFO_MULTIVIEW_MODE (mix_info) = GST_VIDEO_MULTIVIEW_MODE_SEPARATED; GST_VIDEO_INFO_VIEWS (mix_info) = 2; /* FIXME: If input is marked as flipped or flopped, preserve those flags */ GST_VIDEO_INFO_MULTIVIEW_FLAGS (mix_info) = GST_VIDEO_MULTIVIEW_FLAGS_NONE; /* Choose our output format based on downstream preferences */ blend_caps = gst_video_info_to_caps (mix_info); gst_caps_set_features (blend_caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY)); tmp_caps = get_converted_caps (GST_GL_STEREO_MIX (vagg), blend_caps); gst_caps_unref (blend_caps); out_caps = gst_caps_intersect (caps, tmp_caps); gst_caps_unref (tmp_caps); GST_DEBUG_OBJECT (vagg, "Possible output caps %" GST_PTR_FORMAT, out_caps); return out_caps; }
static gboolean gst_vtdec_negotiate (GstVideoDecoder * decoder) { GstVideoCodecState *output_state = NULL; GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL; GstVideoFormat format; GstStructure *structure; const gchar *s; GstVtdec *vtdec; OSStatus err = noErr; GstCapsFeatures *features = NULL; gboolean output_textures; vtdec = GST_VTDEC (decoder); if (vtdec->session) gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE); output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec)); if (output_state) { prevcaps = gst_caps_ref (output_state->caps); gst_video_codec_state_unref (output_state); } peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL); if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) { /* The hardware decoder can become (temporarily) unavailable across * VTDecompressionSessionCreate/Destroy calls. So if the currently configured * caps are still accepted by downstream we keep them so we don't have to * destroy and recreate the session. */ GST_INFO_OBJECT (vtdec, "current and peer caps are compatible, keeping current caps"); caps = gst_caps_ref (prevcaps); } else { templcaps = gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder)); caps = gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (templcaps); } gst_caps_unref (peercaps); caps = gst_caps_truncate (gst_caps_make_writable (caps)); structure = gst_caps_get_structure (caps, 0); s = gst_structure_get_string (structure, "format"); format = gst_video_format_from_string (s); features = gst_caps_get_features (caps, 0); if (features) features = gst_caps_features_copy (features); output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec), format, vtdec->video_info.width, vtdec->video_info.height, vtdec->input_state); output_state->caps = gst_video_info_to_caps (&output_state->info); if (features) { gst_caps_set_features (output_state->caps, 0, features); output_textures = gst_caps_features_contains (features, GST_CAPS_FEATURE_MEMORY_GL_MEMORY); if (output_textures) gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING, #if !HAVE_IOS GST_GL_TEXTURE_TARGET_RECTANGLE_STR, #else GST_GL_TEXTURE_TARGET_2D_STR, #endif NULL); } gst_caps_unref (caps); if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) { gboolean renegotiating = vtdec->session != NULL; GST_INFO_OBJECT (vtdec, "negotiated output format %" GST_PTR_FORMAT " previous %" GST_PTR_FORMAT, output_state->caps, prevcaps); if (vtdec->session) gst_vtdec_invalidate_session (vtdec); err = gst_vtdec_create_session (vtdec, format, TRUE); if (err == noErr) { GST_INFO_OBJECT (vtdec, "using hardware decoder"); } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) { GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore"); err = gst_vtdec_create_session (vtdec, format, FALSE); } if (err != noErr) { GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL), ("VTDecompressionSessionCreate returned %d", (int) err)); } } if (vtdec->texture_cache != NULL && !output_textures) { gst_video_texture_cache_free (vtdec->texture_cache); vtdec->texture_cache = NULL; } if (err == noErr && output_textures) { /* call this regardless of whether caps have changed or not since a new * local context could have become available */ gst_gl_context_helper_ensure_context (vtdec->ctxh); GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p", vtdec->ctxh->context, vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL); if (vtdec->texture_cache && vtdec->texture_cache->ctx != vtdec->ctxh->context) { gst_video_texture_cache_free (vtdec->texture_cache); vtdec->texture_cache = NULL; } if (!vtdec->texture_cache) setup_texture_cache (vtdec, vtdec->ctxh->context); } if (prevcaps) gst_caps_unref (prevcaps); if (err != noErr) return FALSE; return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder); }
static gboolean gst_vaapidecode_update_src_caps (GstVaapiDecode * decode) { GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode); GstPad *const srcpad = GST_VIDEO_DECODER_SRC_PAD (vdec); GstCaps *allowed; GstVideoCodecState *state, *ref_state; GstVaapiCapsFeature feature; GstCapsFeatures *features; GstCaps *allocation_caps; GstVideoInfo *vi; GstVideoFormat format; GstClockTime latency; gint fps_d, fps_n; guint width, height; const gchar *format_str, *feature_str; if (!decode->input_state) return FALSE; ref_state = decode->input_state; format = GST_VIDEO_INFO_FORMAT (&decode->decoded_info); allowed = gst_vaapidecode_get_allowed_srcpad_caps (decode); feature = gst_vaapi_find_preferred_caps_feature (srcpad, allowed, &format); gst_caps_unref (allowed); if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED) return FALSE; #if (!USE_GLX && !USE_EGL) /* This is a very pathological situation. Should not happen. */ if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META) return FALSE; #endif if ((feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY || feature == GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE) && format != GST_VIDEO_INFO_FORMAT (&decode->decoded_info)) { GST_FIXME_OBJECT (decode, "validate if driver can convert from %s to %s", gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (&decode->decoded_info)), gst_video_format_to_string (format)); } width = decode->display_width; height = decode->display_height; if (!width || !height) { width = GST_VIDEO_INFO_WIDTH (&ref_state->info); height = GST_VIDEO_INFO_HEIGHT (&ref_state->info); } state = gst_video_decoder_set_output_state (vdec, format, width, height, ref_state); if (!state) return FALSE; if (GST_VIDEO_INFO_WIDTH (&state->info) == 0 || GST_VIDEO_INFO_HEIGHT (&state->info) == 0) { gst_video_codec_state_unref (state); return FALSE; } vi = &state->info; state->caps = gst_video_info_to_caps (vi); switch (feature) { case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META: case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE:{ GstStructure *structure = gst_caps_get_structure (state->caps, 0); /* Remove chroma-site and colorimetry from src caps, * which is unnecessary on downstream if using VASurface */ gst_structure_remove_fields (structure, "chroma-site", "colorimetry", NULL); feature_str = gst_vaapi_caps_feature_to_string (feature); features = gst_caps_features_new (feature_str, NULL); gst_caps_set_features (state->caps, 0, features); break; } default: break; } /* Allocation query is different from pad's caps */ allocation_caps = NULL; if (GST_VIDEO_INFO_WIDTH (&decode->decoded_info) != width || GST_VIDEO_INFO_HEIGHT (&decode->decoded_info) != height) { allocation_caps = gst_caps_copy (state->caps); format_str = gst_video_format_to_string (format); gst_caps_set_simple (allocation_caps, "width", G_TYPE_INT, GST_VIDEO_INFO_WIDTH (&decode->decoded_info), "height", G_TYPE_INT, GST_VIDEO_INFO_HEIGHT (&decode->decoded_info), "format", G_TYPE_STRING, format_str, NULL); GST_INFO_OBJECT (decode, "new alloc caps = %" GST_PTR_FORMAT, allocation_caps); } gst_caps_replace (&state->allocation_caps, allocation_caps); if (allocation_caps) gst_caps_unref (allocation_caps); GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps); gst_caps_replace (&decode->srcpad_caps, state->caps); gst_video_codec_state_unref (state); fps_n = GST_VIDEO_INFO_FPS_N (vi); fps_d = GST_VIDEO_INFO_FPS_D (vi); if (fps_n <= 0 || fps_d <= 0) { GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation"); fps_n = 25; fps_d = 1; } /* For parsing/preparation purposes we'd need at least 1 frame * latency in general, with perfectly known unit boundaries (NALU, * AU), and up to 2 frames when we need to wait for the second frame * start to determine the first frame is complete */ latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n); gst_video_decoder_set_latency (vdec, latency, latency); return TRUE; }
static gboolean gst_vaapidecode_update_src_caps (GstVaapiDecode * decode) { GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode); GstVideoCodecState *state, *ref_state; GstVideoInfo *vi; GstVideoFormat format = GST_VIDEO_FORMAT_I420; if (!decode->input_state) return FALSE; ref_state = decode->input_state; GstCapsFeatures *features = NULL; GstVaapiCapsFeature feature; feature = gst_vaapi_find_preferred_caps_feature (GST_VIDEO_DECODER_SRC_PAD (vdec), GST_VIDEO_INFO_FORMAT (&ref_state->info), &format); if (feature == GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED) return FALSE; switch (feature) { #if (USE_GLX || USE_EGL) case GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META: features = gst_caps_features_new (GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL); break; #endif #if GST_CHECK_VERSION(1,3,1) case GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE: features = gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE, NULL); break; #endif default: break; } state = gst_video_decoder_set_output_state (vdec, format, ref_state->info.width, ref_state->info.height, ref_state); if (!state || state->info.width == 0 || state->info.height == 0) return FALSE; vi = &state->info; state->caps = gst_video_info_to_caps (vi); if (features) gst_caps_set_features (state->caps, 0, features); GST_INFO_OBJECT (decode, "new src caps = %" GST_PTR_FORMAT, state->caps); gst_caps_replace (&decode->srcpad_caps, state->caps); gst_video_codec_state_unref (state); gint fps_n = GST_VIDEO_INFO_FPS_N (vi); gint fps_d = GST_VIDEO_INFO_FPS_D (vi); if (fps_n <= 0 || fps_d <= 0) { GST_DEBUG_OBJECT (decode, "forcing 25/1 framerate for latency calculation"); fps_n = 25; fps_d = 1; } /* For parsing/preparation purposes we'd need at least 1 frame * latency in general, with perfectly known unit boundaries (NALU, * AU), and up to 2 frames when we need to wait for the second frame * start to determine the first frame is complete */ GstClockTime latency = gst_util_uint64_scale (2 * GST_SECOND, fps_d, fps_n); gst_video_decoder_set_latency (vdec, latency, latency); return TRUE; }
static void check_conversion (TestFrame * frames, guint size) { GstGLBaseMemoryAllocator *base_mem_alloc; gint i, j, k, l; gint ref_count = 0; base_mem_alloc = GST_GL_BASE_MEMORY_ALLOCATOR (gst_allocator_find (GST_GL_MEMORY_ALLOCATOR_NAME)); for (i = 0; i < size; i++) { GstBuffer *inbuf; GstVideoInfo in_info; gint in_width = frames[i].width; gint in_height = frames[i].height; GstVideoFormat in_v_format = frames[i].v_format; GstVideoFrame in_frame; GstCaps *in_caps; gst_video_info_set_format (&in_info, in_v_format, in_width, in_height); in_caps = gst_video_info_to_caps (&in_info); gst_caps_set_features (in_caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY)); /* create GL buffer */ inbuf = gst_buffer_new (); for (j = 0; j < GST_VIDEO_INFO_N_PLANES (&in_info); j++) { GstVideoGLTextureType tex_type = gst_gl_texture_type_from_format (context, GST_VIDEO_INFO_FORMAT (&in_info), j); GstGLVideoAllocationParams *params; GstGLBaseMemory *mem; ref_count++; params = gst_gl_video_allocation_params_new_wrapped_data (context, NULL, &in_info, j, NULL, GST_GL_TEXTURE_TARGET_2D, tex_type, frames[i].data[j], &ref_count, _frame_unref); mem = gst_gl_base_memory_alloc (base_mem_alloc, (GstGLAllocationParams *) params); gst_buffer_append_memory (inbuf, GST_MEMORY_CAST (mem)); gst_gl_allocation_params_free ((GstGLAllocationParams *) params); } fail_unless (gst_video_frame_map (&in_frame, &in_info, inbuf, GST_MAP_READ)); /* sanity check that the correct values were wrapped */ for (j = 0; j < GST_VIDEO_INFO_N_PLANES (&in_info); j++) { for (k = 0; k < _video_info_plane_size (&in_info, j); k++) { if (frames[i].data[j][k] != IGNORE_MAGIC) fail_unless (((gchar *) in_frame.data[j])[k] == frames[i].data[j][k]); } } for (j = 0; j < size; j++) { GstBuffer *outbuf; GstVideoInfo out_info; gint out_width = frames[j].width; gint out_height = frames[j].height; GstVideoFormat out_v_format = frames[j].v_format; gchar *out_data[GST_VIDEO_MAX_PLANES] = { 0 }; GstVideoFrame out_frame; GstCaps *out_caps; gst_video_info_set_format (&out_info, out_v_format, out_width, out_height); out_caps = gst_video_info_to_caps (&out_info); gst_caps_set_features (out_caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY)); for (k = 0; k < GST_VIDEO_INFO_N_PLANES (&out_info); k++) { out_data[k] = frames[j].data[k]; } gst_gl_color_convert_set_caps (convert, in_caps, out_caps); /* convert the data */ outbuf = gst_gl_color_convert_perform (convert, inbuf); if (outbuf == NULL) { const gchar *in_str = gst_video_format_to_string (in_v_format); const gchar *out_str = gst_video_format_to_string (out_v_format); GST_WARNING ("failed to convert from %s to %s", in_str, out_str); } fail_unless (gst_video_frame_map (&out_frame, &out_info, outbuf, GST_MAP_READ)); /* check that the converted values are correct */ for (k = 0; k < GST_VIDEO_INFO_N_PLANES (&out_info); k++) { for (l = 0; l < _video_info_plane_size (&out_info, k); l++) { gchar out_pixel = ((gchar *) out_frame.data[k])[l]; if (out_data[k][l] != IGNORE_MAGIC && out_pixel != IGNORE_MAGIC) fail_unless (out_pixel == out_data[k][l]); /* FIXME: check alpha clobbering */ } } gst_caps_unref (out_caps); gst_video_frame_unmap (&out_frame); gst_buffer_unref (outbuf); } gst_caps_unref (in_caps); gst_video_frame_unmap (&in_frame); gst_buffer_unref (inbuf); fail_unless_equals_int (ref_count, 0); } gst_object_unref (base_mem_alloc); }
static void gst_droidcamsrc_stream_window_reset_buffer_pool_locked (GstDroidCamSrcStreamWindow * win) { GstStructure *config; GstCaps *caps; GstCapsFeatures *feature; GST_DEBUG ("stream window configure buffer pool"); if (win->pool) { /* we will ignore the error here */ if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (win->pool), FALSE)) { GST_WARNING ("Failed to deactivate buffer pool"); } gst_object_unref (win->pool); } win->pool = gst_droid_cam_src_buffer_pool_new (win->info); if (!win->count || !win->width || !win->height || !win->usage || !win->format) { GST_ERROR ("incomplete configuration"); goto clean_and_out; } config = gst_buffer_pool_get_config (GST_BUFFER_POOL (win->pool)); if (!config) { GST_ERROR ("failed to get buffer pool config"); goto clean_and_out; } /* TODO: 30 is hardcoded */ caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "ENCODED", "width", G_TYPE_INT, win->width, "height", G_TYPE_INT, win->height, "framerate", GST_TYPE_FRACTION, 30, 1, NULL); feature = gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_DROID_HANDLE, NULL); gst_caps_set_features (caps, 0, feature); gst_buffer_pool_config_set_params (config, caps, 0, win->count, win->count); gst_buffer_pool_config_set_allocator (config, win->allocator, NULL); gst_structure_set (config, GST_DROIDCAMSRC_BUFFER_POOL_USAGE_KEY, G_TYPE_INT, win->usage, GST_DROIDCAMSRC_BUFFER_POOL_WIDTH_KEY, G_TYPE_INT, win->width, GST_DROIDCAMSRC_BUFFER_POOL_HEIGHT_KEY, G_TYPE_INT, win->height, GST_DROIDCAMSRC_BUFFER_POOL_FORMAT_KEY, G_TYPE_INT, win->format, NULL); gst_caps_unref (caps); if (!gst_buffer_pool_set_config (GST_BUFFER_POOL (win->pool), config)) { GST_ERROR ("failed to set buffer pool config"); goto clean_and_out; } if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (win->pool), TRUE)) { GST_ERROR ("failed to activate buffer pool"); goto clean_and_out; } win->needs_reconfigure = FALSE; return; clean_and_out: if (win->pool) { gst_object_unref (win->pool); win->pool = NULL; } }
GstVaapiCapsFeature gst_vaapi_find_preferred_caps_feature (GstPad * pad, GstVideoFormat format, GstVideoFormat * out_format_ptr) { GstVaapiCapsFeature feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY; guint i, num_structures; GstCaps *caps = NULL; GstCaps *gl_texture_upload_caps = NULL; GstCaps *sysmem_caps = NULL; GstCaps *vaapi_caps = NULL; GstCaps *out_caps, *templ; GstVideoFormat out_format; templ = gst_pad_get_pad_template_caps (pad); out_caps = gst_pad_peer_query_caps (pad, templ); gst_caps_unref (templ); if (!out_caps) { feature = GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED; goto cleanup; } out_format = format == GST_VIDEO_FORMAT_ENCODED ? GST_VIDEO_FORMAT_I420 : format; gl_texture_upload_caps = new_gl_texture_upload_meta_caps (); if (!gl_texture_upload_caps) goto cleanup; vaapi_caps = gst_vaapi_video_format_new_template_caps_with_features (out_format, GST_CAPS_FEATURE_MEMORY_VAAPI_SURFACE); if (!vaapi_caps) goto cleanup; sysmem_caps = gst_vaapi_video_format_new_template_caps_with_features (out_format, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY); if (!sysmem_caps) goto cleanup; num_structures = gst_caps_get_size (out_caps); for (i = 0; i < num_structures; i++) { GstCapsFeatures *const features = gst_caps_get_features (out_caps, i); GstStructure *const structure = gst_caps_get_structure (out_caps, i); #if GST_CHECK_VERSION(1,3,0) /* Skip ANY features, we need an exact match for correct evaluation */ if (gst_caps_features_is_any (features)) continue; #endif caps = gst_caps_new_full (gst_structure_copy (structure), NULL); if (!caps) continue; gst_caps_set_features (caps, 0, gst_caps_features_copy (features)); if (gst_caps_can_intersect (caps, vaapi_caps) && feature < GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE) feature = GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE; else if (gst_caps_can_intersect (caps, gl_texture_upload_caps) && feature < GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META) feature = GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META; else if (gst_caps_can_intersect (caps, sysmem_caps) && feature < GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY; gst_caps_replace (&caps, NULL); #if GST_CHECK_VERSION(1,3,0) /* Stop at the first match, the caps should already be sorted out by preference order from downstream elements */ if (feature != GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) break; #endif } if (out_format_ptr) { if (feature == GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META) { GstStructure *structure; gchar *format_str; out_format = GST_VIDEO_FORMAT_UNKNOWN; do { caps = gst_caps_intersect_full (out_caps, gl_texture_upload_caps, GST_CAPS_INTERSECT_FIRST); if (!caps) break; structure = gst_caps_get_structure (caps, 0); if (!structure) break; if (!gst_structure_get (structure, "format", G_TYPE_STRING, &format_str, NULL)) break; out_format = gst_video_format_from_string (format_str); g_free (format_str); } while (0); if (!out_format) goto cleanup; } *out_format_ptr = out_format; } cleanup: gst_caps_replace (&gl_texture_upload_caps, NULL); gst_caps_replace (&sysmem_caps, NULL); gst_caps_replace (&vaapi_caps, NULL); gst_caps_replace (&caps, NULL); gst_caps_replace (&out_caps, NULL); return feature; }
static GstCaps * gst_mfxpostproc_transform_caps_impl (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps) { GstMfxPostproc *const vpp = GST_MFXPOSTPROC (trans); GstVideoInfo vi, peer_vi; GstVideoFormat out_format; GstCaps *out_caps, *peer_caps; GstMfxCapsFeature feature; const gchar *feature_str; guint width, height; /* Generate the sink pad caps, that could be fixated afterwards */ if (direction == GST_PAD_SRC) { if (!ensure_allowed_sinkpad_caps (vpp)) return NULL; return gst_caps_ref (vpp->allowed_sinkpad_caps); } /* Generate complete set of src pad caps if non-fixated sink pad * caps are provided */ if (!gst_caps_is_fixed (caps)) { if (!ensure_allowed_srcpad_caps (vpp)) return NULL; return gst_caps_ref (vpp->allowed_srcpad_caps); } /* Generate the expected src pad caps, from the current fixated * sink pad caps */ if (!gst_video_info_from_caps (&vi, caps)) return NULL; if (vpp->deinterlace_mode) GST_VIDEO_INFO_INTERLACE_MODE (&vi) = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE; /* Update size from user-specified parameters */ find_best_size (vpp, &vi, &width, &height); /* Update format from user-specified parameters */ peer_caps = gst_pad_peer_query_caps (GST_BASE_TRANSFORM_SRC_PAD (trans), vpp->allowed_srcpad_caps); if (gst_caps_is_any (peer_caps) || gst_caps_is_empty (peer_caps)) return peer_caps; if (!gst_caps_is_fixed (peer_caps)) peer_caps = gst_caps_fixate (peer_caps); gst_video_info_from_caps (&peer_vi, peer_caps); out_format = GST_VIDEO_INFO_FPS_N (&peer_vi); /* Update width and height from the caps */ if (GST_VIDEO_INFO_HEIGHT (&peer_vi) != 1 && GST_VIDEO_INFO_WIDTH (&peer_vi) != 1) find_best_size(vpp, &peer_vi, &width, &height); if (vpp->format != DEFAULT_FORMAT) out_format = vpp->format; if (vpp->fps_n) { GST_VIDEO_INFO_FPS_N (&vi) = vpp->fps_n; GST_VIDEO_INFO_FPS_D (&vi) = vpp->fps_d; vpp->field_duration = gst_util_uint64_scale (GST_SECOND, vpp->fps_d, vpp->fps_n); if (DEFAULT_FRC_ALG == vpp->alg) vpp->alg = GST_MFX_FRC_PRESERVE_TIMESTAMP; } if (peer_caps) gst_caps_unref (peer_caps); feature = gst_mfx_find_preferred_caps_feature (GST_BASE_TRANSFORM_SRC_PAD (trans), &out_format); gst_video_info_change_format (&vi, out_format, width, height); out_caps = gst_video_info_to_caps (&vi); if (!out_caps) return NULL; if (feature) { feature_str = gst_mfx_caps_feature_to_string (feature); if (feature_str) gst_caps_set_features (out_caps, 0, gst_caps_features_new (feature_str, NULL)); } if (vpp->format != out_format) vpp->format = out_format; return out_caps; }
GstCaps * gst_egl_adaptation_fill_supported_fbuffer_configs (GstEglAdaptationContext * ctx) { GstCaps *caps = NULL, *copy1, *copy2; guint i, n; GST_DEBUG_OBJECT (ctx->element, "Building initial list of wanted eglattribs per format"); /* Init supported format/caps list */ if (_gst_egl_choose_config (ctx, TRUE, NULL)) { caps = gst_caps_new_empty (); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_RGBA)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_BGRA)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_ARGB)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_ABGR)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_RGBx)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_BGRx)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_xRGB)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_xBGR)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_AYUV)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_Y444)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_RGB)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_BGR)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_I420)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_YV12)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_NV12)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_NV21)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_Y42B)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_Y41B)); gst_caps_append (caps, _gst_video_format_new_template_caps (GST_VIDEO_FORMAT_RGB16)); copy1 = gst_caps_copy (caps); copy2 = gst_caps_copy (caps); #ifndef HAVE_IOS n = gst_caps_get_size (caps); for (i = 0; i < n; i++) { GstCapsFeatures *features = gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_EGL_IMAGE, NULL); gst_caps_set_features (caps, i, features); } #endif n = gst_caps_get_size (copy1); for (i = 0; i < n; i++) { GstCapsFeatures *features = gst_caps_features_new (GST_CAPS_FEATURE_META_GST_VIDEO_GL_TEXTURE_UPLOAD_META, NULL); gst_caps_set_features (copy1, i, features); } gst_caps_append (caps, copy1); gst_caps_append (caps, copy2); } else { GST_INFO_OBJECT (ctx->element, "EGL display doesn't support RGBA8888 config"); } return caps; }
static gboolean gst_msdkdec_set_src_caps (GstMsdkDec * thiz, gboolean need_allocation) { GstVideoCodecState *output_state; GstVideoInfo *vinfo; GstVideoAlignment align; GstCaps *allocation_caps = NULL; GstVideoFormat format; guint width, height; const gchar *format_str; /* use display width and display height in output state which * will be using for caps negotiation */ width = thiz->param.mfx.FrameInfo.CropW ? thiz->param.mfx. FrameInfo.CropW : GST_VIDEO_INFO_WIDTH (&thiz->input_state->info); height = thiz->param.mfx.FrameInfo.CropH ? thiz->param.mfx. FrameInfo.CropH : GST_VIDEO_INFO_HEIGHT (&thiz->input_state->info); format = gst_msdk_get_video_format_from_mfx_fourcc (thiz->param.mfx. FrameInfo.FourCC); if (format == GST_VIDEO_FORMAT_UNKNOWN) { GST_WARNING_OBJECT (thiz, "Failed to find a valid video format\n"); return FALSE; } output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (thiz), format, width, height, thiz->input_state); if (!output_state) return FALSE; /* Ensure output_state->caps and info has same width and height * Also mandate the 32 bit alignment */ vinfo = &output_state->info; gst_msdk_set_video_alignment (vinfo, &align); gst_video_info_align (vinfo, &align); output_state->caps = gst_video_info_to_caps (vinfo); if (srcpad_can_dmabuf (thiz)) gst_caps_set_features (output_state->caps, 0, gst_caps_features_new (GST_CAPS_FEATURE_MEMORY_DMABUF, NULL)); thiz->output_info = output_state->info; if (need_allocation) { /* Find allocation width and height */ width = GST_ROUND_UP_16 (thiz->param.mfx.FrameInfo.Width ? thiz->param.mfx. FrameInfo.Width : GST_VIDEO_INFO_WIDTH (&output_state->info)); height = GST_ROUND_UP_32 (thiz->param.mfx.FrameInfo.Height ? thiz->param.mfx. FrameInfo.Height : GST_VIDEO_INFO_HEIGHT (&output_state->info)); /* set allocation width and height in allocation_caps * which may or may not be similar to the output_state caps */ allocation_caps = gst_caps_copy (output_state->caps); format_str = gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (&thiz->output_info)); gst_caps_set_simple (allocation_caps, "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, "format", G_TYPE_STRING, format_str, NULL); GST_INFO_OBJECT (thiz, "new alloc caps = %" GST_PTR_FORMAT, allocation_caps); gst_caps_replace (&thiz->allocation_caps, allocation_caps); } else { /* We keep the allocation parameters as it is to avoid pool renegotiation. * For codecs like VP9, dynamic resolution change doesn't requires allocation * reset if the new video frame resolution is lower than the * already configured one */ allocation_caps = gst_caps_copy (thiz->allocation_caps); } gst_caps_replace (&output_state->allocation_caps, allocation_caps); if (allocation_caps) gst_caps_unref (allocation_caps); gst_video_codec_state_unref (output_state); return TRUE; }
GstVaapiCapsFeature gst_vaapi_find_preferred_caps_feature (GstPad * pad, GstCaps * allowed_caps, GstVideoFormat * out_format_ptr) { GstVaapiCapsFeature feature = GST_VAAPI_CAPS_FEATURE_NOT_NEGOTIATED; guint i, j, num_structures; GstCaps *peer_caps, *out_caps = NULL, *caps = NULL; static const guint feature_list[] = { GST_VAAPI_CAPS_FEATURE_VAAPI_SURFACE, GST_VAAPI_CAPS_FEATURE_DMABUF, GST_VAAPI_CAPS_FEATURE_GL_TEXTURE_UPLOAD_META, GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY, }; /* query with no filter */ peer_caps = gst_pad_peer_query_caps (pad, NULL); if (!peer_caps) goto cleanup; if (gst_caps_is_empty (peer_caps)) goto cleanup; /* filter against our allowed caps */ out_caps = gst_caps_intersect_full (allowed_caps, peer_caps, GST_CAPS_INTERSECT_FIRST); /* default feature */ feature = GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY; /* if downstream requests caps ANY, system memory is preferred */ if (gst_caps_is_any (peer_caps)) goto find_format; num_structures = gst_caps_get_size (out_caps); for (i = 0; i < num_structures; i++) { GstCapsFeatures *const features = gst_caps_get_features (out_caps, i); GstStructure *const structure = gst_caps_get_structure (out_caps, i); /* Skip ANY features, we need an exact match for correct evaluation */ if (gst_caps_features_is_any (features)) continue; gst_caps_replace (&caps, NULL); caps = gst_caps_new_full (gst_structure_copy (structure), NULL); if (!caps) continue; gst_caps_set_features (caps, 0, gst_caps_features_copy (features)); for (j = 0; j < G_N_ELEMENTS (feature_list); j++) { if (gst_vaapi_caps_feature_contains (caps, feature_list[j]) && feature < feature_list[j]) { feature = feature_list[j]; break; } } /* Stop at the first match, the caps should already be sorted out by preference order from downstream elements */ if (feature != GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) break; } if (!caps) goto cleanup; find_format: if (out_format_ptr) { GstVideoFormat out_format; GstStructure *structure = NULL; const GValue *format_list; GstCapsFeatures *features; /* if the best feature is SystemMemory, we should choose the * vidoe/x-raw caps in the filtered peer caps set. If not, use * the first caps, which is the preferred by downstream. */ if (feature == GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY) { gst_caps_replace (&caps, out_caps); num_structures = gst_caps_get_size (caps); for (i = 0; i < num_structures; i++) { structure = gst_caps_get_structure (caps, i); features = gst_caps_get_features (caps, i); if (!gst_caps_features_is_any (features) && gst_caps_features_contains (features, gst_vaapi_caps_feature_to_string (GST_VAAPI_CAPS_FEATURE_SYSTEM_MEMORY))) break; } } else { structure = gst_caps_get_structure (caps, 0); } if (!structure) goto cleanup; format_list = gst_structure_get_value (structure, "format"); if (!format_list) goto cleanup; out_format = gst_vaapi_find_preferred_format (format_list, *out_format_ptr); if (out_format == GST_VIDEO_FORMAT_UNKNOWN) goto cleanup; *out_format_ptr = out_format; } cleanup: gst_caps_replace (&caps, NULL); gst_caps_replace (&out_caps, NULL); gst_caps_replace (&peer_caps, NULL); return feature; }
static void check_conversion (TestFrame * frames, guint size) { gint i, j, k, l; gint ref_count = 0; for (i = 0; i < size; i++) { GstBuffer *inbuf; GstVideoInfo in_info; gint in_width = frames[i].width; gint in_height = frames[i].height; GstVideoFormat in_v_format = frames[i].v_format; gchar *in_data[GST_VIDEO_MAX_PLANES] = { 0 }; GstGLMemory *in_mem[GST_VIDEO_MAX_PLANES] = { 0 }; GstVideoFrame in_frame; GstCaps *in_caps; gst_video_info_set_format (&in_info, in_v_format, in_width, in_height); in_caps = gst_video_info_to_caps (&in_info); gst_caps_set_features (in_caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY)); for (j = 0; j < GST_VIDEO_INFO_N_PLANES (&in_info); j++) { in_data[j] = frames[i].data[j]; } /* create GL buffer */ ref_count += GST_VIDEO_INFO_N_PLANES (&in_info); inbuf = gst_buffer_new (); fail_unless (gst_gl_memory_setup_wrapped (context, GST_GL_TEXTURE_TARGET_2D, &in_info, NULL, (gpointer *) in_data, in_mem, &ref_count, _frame_unref)); for (j = 0; j < GST_VIDEO_INFO_N_PLANES (&in_info); j++) { gst_buffer_append_memory (inbuf, (GstMemory *) in_mem[j]); } fail_unless (gst_video_frame_map (&in_frame, &in_info, inbuf, GST_MAP_READ)); /* sanity check that the correct values were wrapped */ for (j = 0; j < GST_VIDEO_INFO_N_PLANES (&in_info); j++) { for (k = 0; k < _video_info_plane_size (&in_info, j); k++) { if (in_data[j][k] != IGNORE_MAGIC) fail_unless (((gchar *) in_frame.data[j])[k] == in_data[j][k]); } } for (j = 0; j < size; j++) { GstBuffer *outbuf; GstVideoInfo out_info; gint out_width = frames[j].width; gint out_height = frames[j].height; GstVideoFormat out_v_format = frames[j].v_format; gchar *out_data[GST_VIDEO_MAX_PLANES] = { 0 }; GstVideoFrame out_frame; GstCaps *out_caps; gst_video_info_set_format (&out_info, out_v_format, out_width, out_height); out_caps = gst_video_info_to_caps (&out_info); gst_caps_set_features (out_caps, 0, gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY)); for (k = 0; k < GST_VIDEO_INFO_N_PLANES (&out_info); k++) { out_data[k] = frames[j].data[k]; } gst_gl_color_convert_set_caps (convert, in_caps, out_caps); /* convert the data */ outbuf = gst_gl_color_convert_perform (convert, inbuf); if (outbuf == NULL) { const gchar *in_str = gst_video_format_to_string (in_v_format); const gchar *out_str = gst_video_format_to_string (out_v_format); GST_WARNING ("failed to convert from %s to %s", in_str, out_str); } fail_unless (gst_video_frame_map (&out_frame, &out_info, outbuf, GST_MAP_READ)); /* check that the converted values are correct */ for (k = 0; k < GST_VIDEO_INFO_N_PLANES (&out_info); k++) { for (l = 0; l < _video_info_plane_size (&out_info, k); l++) { gchar out_pixel = ((gchar *) out_frame.data[k])[l]; if (out_data[k][l] != IGNORE_MAGIC && out_pixel != IGNORE_MAGIC) fail_unless (out_pixel == out_data[k][l]); /* FIXME: check alpha clobbering */ } } gst_caps_unref (out_caps); gst_video_frame_unmap (&out_frame); gst_buffer_unref (outbuf); } gst_caps_unref (in_caps); gst_video_frame_unmap (&in_frame); gst_buffer_unref (inbuf); fail_unless_equals_int (ref_count, 0); } }
static GstBuffer * _default_pad_upload_buffer (GstGLMixer * mix, GstGLMixerFrameData * frame, GstBuffer * buffer) { GstVideoAggregatorPad *vaggpad = GST_VIDEO_AGGREGATOR_PAD (frame->pad); GstGLMixerPad *pad = frame->pad; GstBuffer *uploaded_buf, *gl_buffer; GstCaps *gl_caps; GstCapsFeatures *gl_features; GstVideoInfo gl_info; GstVideoFrame gl_frame; GstGLSyncMeta *sync_meta; gst_video_info_set_format (&gl_info, GST_VIDEO_FORMAT_RGBA, GST_VIDEO_INFO_WIDTH (&vaggpad->info), GST_VIDEO_INFO_HEIGHT (&vaggpad->info)); gl_features = gst_caps_features_from_string (GST_CAPS_FEATURE_MEMORY_GL_MEMORY); gl_caps = gst_video_info_to_caps (&gl_info); gst_caps_set_features (gl_caps, 0, gst_caps_features_copy (gl_features)); if (!pad->upload) { GstCaps *in_caps = gst_pad_get_current_caps (GST_PAD (pad)); GstCaps *upload_caps = gst_caps_copy (in_caps); pad->upload = gst_gl_upload_new (mix->context); gst_caps_set_features (upload_caps, 0, gst_caps_features_copy (gl_features)); gst_gl_upload_set_caps (pad->upload, in_caps, upload_caps); if (!pad->convert) { pad->convert = gst_gl_color_convert_new (mix->context); gst_gl_color_convert_set_caps (pad->convert, upload_caps, gl_caps); } gst_caps_unref (upload_caps); gst_caps_unref (in_caps); } gst_caps_features_free (gl_features); gst_caps_unref (gl_caps); sync_meta = gst_buffer_get_gl_sync_meta (vaggpad->buffer); if (sync_meta) gst_gl_sync_meta_wait (sync_meta); if (gst_gl_upload_perform_with_buffer (pad->upload, vaggpad->buffer, &uploaded_buf) != GST_GL_UPLOAD_DONE) { return NULL; } if (!(gl_buffer = gst_gl_color_convert_perform (pad->convert, uploaded_buf))) { gst_buffer_unref (uploaded_buf); return NULL; } if (!gst_video_frame_map (&gl_frame, &gl_info, gl_buffer, GST_MAP_READ | GST_MAP_GL)) { gst_buffer_unref (uploaded_buf); gst_buffer_unref (gl_buffer); return NULL; } frame->texture = *(guint *) gl_frame.data[0]; gst_buffer_unref (uploaded_buf); gst_video_frame_unmap (&gl_frame); return gl_buffer; }