bool getVideoSizeAndFormatFromCaps(GstCaps* caps, WebCore::IntSize& size, GstVideoFormat& format, int& pixelAspectRatioNumerator, int& pixelAspectRatioDenominator, int& stride) { #ifdef GST_API_VERSION_1 GstVideoInfo info; if (!gst_caps_is_fixed(caps) || !gst_video_info_from_caps(&info, caps)) return false; format = GST_VIDEO_INFO_FORMAT(&info); size.setWidth(GST_VIDEO_INFO_WIDTH(&info)); size.setHeight(GST_VIDEO_INFO_HEIGHT(&info)); pixelAspectRatioNumerator = GST_VIDEO_INFO_PAR_N(&info); pixelAspectRatioDenominator = GST_VIDEO_INFO_PAR_D(&info); stride = GST_VIDEO_INFO_PLANE_STRIDE(&info, 0); #else gint width, height; if (!GST_IS_CAPS(caps) || !gst_caps_is_fixed(caps) || !gst_video_format_parse_caps(caps, &format, &width, &height) || !gst_video_parse_caps_pixel_aspect_ratio(caps, &pixelAspectRatioNumerator, &pixelAspectRatioDenominator)) return false; size.setWidth(width); size.setHeight(height); stride = size.width() * 4; #endif return true; }
static gboolean event_func (GstPad * pad, GstObject * noparent, GstEvent * event) { switch (GST_EVENT_TYPE (event)) { case GST_EVENT_CAPS: { GstCaps *caps; GstCaps **caps2 = g_object_get_data (G_OBJECT (pad), "caps"); GstCaps *caps_no_ssrc; GstCaps *caps2_no_ssrc; gst_event_parse_caps (event, &caps); caps_no_ssrc = remove_ssrc_from_caps (caps); caps2_no_ssrc = remove_ssrc_from_caps (*caps2); fail_unless (caps2 != NULL && *caps2 != NULL); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_caps_is_fixed (*caps2)); fail_unless (gst_caps_is_equal_fixed (caps_no_ssrc, caps2_no_ssrc)); gst_caps_unref (caps_no_ssrc); gst_caps_unref (caps2_no_ssrc); break; } default: break; } gst_event_unref (event); return TRUE; }
static GstElement * setup_audioresample (int channels, int inrate, int outrate, int width, gboolean fp) { GstElement *audioresample; GstCaps *caps; GstStructure *structure; GST_DEBUG ("setup_audioresample"); audioresample = gst_check_setup_element ("audioresample"); if (fp) caps = gst_caps_from_string (RESAMPLE_CAPS_FLOAT); else caps = gst_caps_from_string (RESAMPLE_CAPS_INT); structure = gst_caps_get_structure (caps, 0); gst_structure_set (structure, "channels", G_TYPE_INT, channels, "rate", G_TYPE_INT, inrate, "width", G_TYPE_INT, width, NULL); if (!fp) gst_structure_set (structure, "depth", G_TYPE_INT, width, NULL); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_element_set_state (audioresample, GST_STATE_PAUSED) == GST_STATE_CHANGE_SUCCESS, "could not set to paused"); mysrcpad = gst_check_setup_src_pad (audioresample, &srctemplate, caps); gst_pad_set_caps (mysrcpad, caps); gst_caps_unref (caps); if (fp) caps = gst_caps_from_string (RESAMPLE_CAPS_FLOAT); else caps = gst_caps_from_string (RESAMPLE_CAPS_INT); structure = gst_caps_get_structure (caps, 0); gst_structure_set (structure, "channels", G_TYPE_INT, channels, "rate", G_TYPE_INT, outrate, "width", G_TYPE_INT, width, NULL); if (!fp) gst_structure_set (structure, "depth", G_TYPE_INT, width, NULL); fail_unless (gst_caps_is_fixed (caps)); mysinkpad = gst_check_setup_sink_pad (audioresample, &sinktemplate, caps); /* this installs a getcaps func that will always return the caps we set * later */ gst_pad_set_caps (mysinkpad, caps); gst_pad_use_fixed_caps (mysinkpad); gst_pad_set_active (mysinkpad, TRUE); gst_pad_set_active (mysrcpad, TRUE); gst_caps_unref (caps); return audioresample; }
// Returns the size of the video IntSize MediaPlayerPrivate::naturalSize() const { if (!hasVideo()) return IntSize(); // TODO: handle possible clean aperture data. See // https://bugzilla.gnome.org/show_bug.cgi?id=596571 // TODO: handle possible transformation matrix. See // https://bugzilla.gnome.org/show_bug.cgi?id=596326 int width = 0, height = 0; if (GstPad* pad = gst_element_get_static_pad(m_videoSink, "sink")) { GstCaps* caps = GST_PAD_CAPS(pad); gfloat pixelAspectRatio; gint pixelAspectRatioNumerator, pixelAspectRatioDenominator; if (!GST_IS_CAPS(caps) || !gst_caps_is_fixed(caps) || !gst_video_format_parse_caps(caps, NULL, &width, &height) || !gst_video_parse_caps_pixel_aspect_ratio(caps, &pixelAspectRatioNumerator, &pixelAspectRatioDenominator)) { gst_object_unref(GST_OBJECT(pad)); return IntSize(); } pixelAspectRatio = (gfloat) pixelAspectRatioNumerator / (gfloat) pixelAspectRatioDenominator; width *= pixelAspectRatio; height /= pixelAspectRatio; gst_object_unref(GST_OBJECT(pad)); } return IntSize(width, height); }
bool gst_caps_to_tcam_video_format (GstCaps* caps, struct tcam_video_format* format) { if (!caps || !gst_caps_is_fixed(caps) || !format) { return false; } *format = {}; GstStructure* struc = gst_caps_get_structure(caps, 0); format->fourcc = tcam_fourcc_from_gst_1_0_caps_string(gst_structure_get_name(struc), gst_structure_get_string(struc, "format")); gint tmp_w, tmp_h; gst_structure_get_int(struc, "width", &tmp_w); gst_structure_get_int(struc, "height", &tmp_h); format->width = tmp_w < 0 ? 0 : tmp_w; format->height = tmp_h < 0 ? 0 : tmp_h; int num; int den; gst_structure_get_fraction(struc, "framerate", &num, &den); format->framerate = den / num; return true; }
/** * gst_pb_utils_get_encoder_description: * @caps: the (fixed) #GstCaps for which an encoder description is needed * * Returns a localised string describing an encoder for the format specified * in @caps, for use in error dialogs or other messages to be seen by the user. * Should never return NULL unless @factory_name or @caps are invalid. * * This function is mainly for internal use, applications would typically * use gst_missing_plugin_message_get_description() to get a description of * a missing feature from a missing-plugin message. * * Returns: a newly-allocated description string, or NULL on error. Free * string with g_free() when not needed any longer. */ gchar * gst_pb_utils_get_encoder_description (const GstCaps * caps) { gchar *str, *ret; GstCaps *tmp; g_return_val_if_fail (caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (caps), NULL); tmp = copy_and_clean_caps (caps); g_return_val_if_fail (gst_caps_is_fixed (tmp), NULL); /* special-case RTP caps */ if (caps_are_rtp_caps (tmp, "video", &str)) { ret = g_strdup_printf (_("%s video RTP payloader"), str); } else if (caps_are_rtp_caps (tmp, "audio", &str)) { ret = g_strdup_printf (_("%s audio RTP payloader"), str); } else if (caps_are_rtp_caps (tmp, "application", &str)) { ret = g_strdup_printf (_("%s RTP payloader"), str); } else { const FormatInfo *info; str = gst_pb_utils_get_codec_description (tmp); info = find_format_info (tmp); if (info != NULL && (info->flags & FLAG_CONTAINER) != 0) { ret = g_strdup_printf (_("%s muxer"), str); } else { ret = g_strdup_printf (_("%s encoder"), str); } } g_free (str); gst_caps_unref (tmp); return ret; }
/** * gst_missing_encoder_message_new: * @element: the #GstElement posting the message * @encode_caps: the (fixed) caps for which an encoder element is needed * * Creates a missing-plugin message for @element to notify the application * that an encoder element for a particular set of (fixed) caps is missing. * This function is mainly for use in plugins. * * Returns: (transfer full): a new #GstMessage, or NULL on error */ GstMessage * gst_missing_encoder_message_new (GstElement * element, const GstCaps * encode_caps) { GstStructure *s; GstCaps *caps; gchar *description; g_return_val_if_fail (element != NULL, NULL); g_return_val_if_fail (GST_IS_ELEMENT (element), NULL); g_return_val_if_fail (encode_caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (encode_caps), NULL); g_return_val_if_fail (!gst_caps_is_any (encode_caps), NULL); g_return_val_if_fail (!gst_caps_is_empty (encode_caps), NULL); g_return_val_if_fail (gst_caps_is_fixed (encode_caps), NULL); description = gst_pb_utils_get_encoder_description (encode_caps); caps = copy_and_clean_caps (encode_caps); s = gst_structure_new ("missing-plugin", "type", G_TYPE_STRING, "encoder", "detail", GST_TYPE_CAPS, caps, "name", G_TYPE_STRING, description, NULL); gst_caps_unref (caps); g_free (description); return gst_message_new_element (GST_OBJECT_CAST (element), s); }
/* * _gst_caps_set_buffer_array: * @caps: (transfer full): a #GstCaps * @field: field in caps to set * @buf: header buffers * * Adds given buffers to an array of buffers set as the given @field * on the given @caps. List of buffer arguments must be NULL-terminated. * * Returns: (transfer full): input caps with a streamheader field added, or NULL * if some error occurred */ static GstCaps * _gst_caps_set_buffer_array (GstCaps * caps, const gchar * field, GstBuffer * buf, ...) { GstStructure *structure = NULL; va_list va; GValue array = { 0 }; GValue value = { 0 }; g_return_val_if_fail (caps != NULL, NULL); g_return_val_if_fail (gst_caps_is_fixed (caps), NULL); g_return_val_if_fail (field != NULL, NULL); caps = gst_caps_make_writable (caps); structure = gst_caps_get_structure (caps, 0); g_value_init (&array, GST_TYPE_ARRAY); va_start (va, buf); /* put buffers in a fixed list */ while (buf) { g_value_init (&value, GST_TYPE_BUFFER); gst_value_set_buffer (&value, buf); gst_value_array_append_value (&array, &value); g_value_unset (&value); buf = va_arg (va, GstBuffer *); } va_end (va); gst_structure_take_value (structure, field, &array); return caps; }
/* Print information regarding a stream */ void print_stream_info (GstDiscovererStreamInfo *info, gint depth) { gchar *desc = NULL; GstCaps *caps; const GstTagList *tags; caps = gst_discoverer_stream_info_get_caps (info); if (caps) { if (gst_caps_is_fixed (caps)) desc = gst_pb_utils_get_codec_description (caps); else desc = gst_caps_to_string (caps); gst_caps_unref (caps); } g_print ("%*s%s: %s\n", 2 * depth, " ", gst_discoverer_stream_info_get_stream_type_nick (info), (desc ? desc : "")); if (desc) { g_free (desc); desc = NULL; } tags = gst_discoverer_stream_info_get_tags (info); if (tags) { g_print ("%*sTags:\n", 2 * (depth + 1), " "); gst_tag_list_foreach (tags, print_tag_foreach, GINT_TO_POINTER (depth + 2)); } }
static gboolean gst_rtp_h263p_pay_setcaps (GstRTPBasePayload * payload, GstCaps * caps) { gboolean res; GstCaps *peercaps; gchar *encoding_name = NULL; g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE); peercaps = gst_pad_peer_query_caps (GST_RTP_BASE_PAYLOAD_SRCPAD (payload), NULL); if (peercaps) { GstCaps *intersect = gst_caps_intersect (peercaps, gst_pad_get_pad_template_caps (GST_RTP_BASE_PAYLOAD_SRCPAD (payload))); gst_caps_unref (peercaps); if (!gst_caps_is_empty (intersect)) { GstStructure *s = gst_caps_get_structure (intersect, 0); encoding_name = g_strdup (gst_structure_get_string (s, "encoding-name")); } gst_caps_unref (intersect); } if (!encoding_name) encoding_name = g_strdup ("H263-1998"); gst_rtp_base_payload_set_options (payload, "video", TRUE, (gchar *) encoding_name, 90000); res = gst_rtp_base_payload_set_outcaps (payload, NULL); g_free (encoding_name); return res; }
/** * gst_pb_utils_get_codec_description: * @caps: the (fixed) #GstCaps for which an format description is needed * * Returns a localised (as far as this is possible) string describing the * media format specified in @caps, for use in error dialogs or other messages * to be seen by the user. Should never return NULL unless @caps is invalid. * * Also see the convenience function * gst_pb_utils_add_codec_description_to_tag_list(). * * Returns: a newly-allocated description string, or NULL on error. Free * string with g_free() when not needed any longer. */ gchar * gst_pb_utils_get_codec_description (const GstCaps * caps) { const FormatInfo *info; gchar *str, *comma; GstCaps *tmp; g_return_val_if_fail (caps != NULL, NULL); g_return_val_if_fail (GST_IS_CAPS (caps), NULL); tmp = copy_and_clean_caps (caps); g_return_val_if_fail (gst_caps_is_fixed (tmp), NULL); info = find_format_info (tmp); if (info) { str = format_info_get_desc (info, tmp); } else { str = gst_caps_to_string (tmp); /* cut off everything after the media type, if there is anything */ if ((comma = strchr (str, ','))) { *comma = '\0'; g_strchomp (str); /* we could do something more elaborate here, like taking into account * audio/, video/, image/ and application/ prefixes etc. */ } GST_WARNING ("No description available for media type: %s", str); } gst_caps_unref (tmp); return str; }
/* this tests that the output is a correct discontinuous stream * if the input is; ie input drops in time come out the same way */ static void test_discont_stream_instance (int inrate, int outrate, int samples, int numbuffers) { GstElement *audioresample; GstBuffer *inbuffer, *outbuffer; GstCaps *caps; GstClockTime ints; int i, j; gint16 *p; audioresample = setup_audioresample (2, inrate, outrate); caps = gst_pad_get_negotiated_caps (mysrcpad); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_element_set_state (audioresample, GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS, "could not set to playing"); for (j = 1; j <= numbuffers; ++j) { inbuffer = gst_buffer_new_and_alloc (samples * 4); GST_BUFFER_DURATION (inbuffer) = samples * GST_SECOND / inrate; /* "drop" half the buffers */ ints = GST_BUFFER_DURATION (inbuffer) * 2 * (j - 1); GST_BUFFER_TIMESTAMP (inbuffer) = ints; GST_BUFFER_OFFSET (inbuffer) = (j - 1) * 2 * samples; GST_BUFFER_OFFSET_END (inbuffer) = j * 2 * samples + samples; gst_buffer_set_caps (inbuffer, caps); p = (gint16 *) GST_BUFFER_DATA (inbuffer); /* create a 16 bit signed ramp */ for (i = 0; i < samples; ++i) { *p = -32767 + i * (65535 / samples); ++p; *p = -32767 + i * (65535 / samples); ++p; } /* pushing gives away my reference ... */ fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK); /* check if the timestamp of the pushed buffer matches the incoming one */ outbuffer = g_list_nth_data (buffers, g_list_length (buffers) - 1); fail_if (outbuffer == NULL); fail_unless_equals_uint64 (ints, GST_BUFFER_TIMESTAMP (outbuffer)); if (j > 1) { fail_unless (GST_BUFFER_IS_DISCONT (outbuffer), "expected discont buffer"); } } /* cleanup */ gst_caps_unref (caps); cleanup_audioresample (audioresample); }
/* this tests that the output is a perfect stream if the input is */ static void test_perfect_stream_instance (int inrate, int outrate, int samples, int numbuffers) { GstElement *audioresample; GstBuffer *inbuffer, *outbuffer; GstCaps *caps; guint64 offset = 0; int i, j; GstMapInfo map; gint16 *p; audioresample = setup_audioresample (2, 0x3, inrate, outrate, GST_AUDIO_NE (S16)); caps = gst_pad_get_current_caps (mysrcpad); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_element_set_state (audioresample, GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS, "could not set to playing"); for (j = 1; j <= numbuffers; ++j) { inbuffer = gst_buffer_new_and_alloc (samples * 4); GST_BUFFER_DURATION (inbuffer) = GST_FRAMES_TO_CLOCK_TIME (samples, inrate); GST_BUFFER_TIMESTAMP (inbuffer) = GST_BUFFER_DURATION (inbuffer) * (j - 1); GST_BUFFER_OFFSET (inbuffer) = offset; offset += samples; GST_BUFFER_OFFSET_END (inbuffer) = offset; gst_buffer_map (inbuffer, &map, GST_MAP_WRITE); p = (gint16 *) map.data; /* create a 16 bit signed ramp */ for (i = 0; i < samples; ++i) { *p = -32767 + i * (65535 / samples); ++p; *p = -32767 + i * (65535 / samples); ++p; } gst_buffer_unmap (inbuffer, &map); /* pushing gives away my reference ... */ fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK); /* ... but it ends up being collected on the global buffer list */ fail_unless_equals_int (g_list_length (buffers), j); } /* FIXME: we should make audioresample handle eos by flushing out the last * samples, which will give us one more, small, buffer */ fail_if ((outbuffer = (GstBuffer *) buffers->data) == NULL); ASSERT_BUFFER_REFCOUNT (outbuffer, "outbuffer", 1); fail_unless_perfect_stream (); /* cleanup */ gst_caps_unref (caps); cleanup_audioresample (audioresample); }
static GstElement * setup_audioresample (int channels, guint64 mask, int inrate, int outrate, const gchar * format) { GstElement *audioresample; GstCaps *caps; GstStructure *structure; GST_DEBUG ("setup_audioresample"); audioresample = gst_check_setup_element ("audioresample"); caps = gst_caps_from_string (RESAMPLE_CAPS); structure = gst_caps_get_structure (caps, 0); gst_structure_set (structure, "channels", G_TYPE_INT, channels, "rate", G_TYPE_INT, inrate, "format", G_TYPE_STRING, format, "channel-mask", GST_TYPE_BITMASK, mask, NULL); fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_element_set_state (audioresample, GST_STATE_PAUSED) == GST_STATE_CHANGE_SUCCESS, "could not set to paused"); mysrcpad = gst_check_setup_src_pad (audioresample, &srctemplate); gst_pad_set_active (mysrcpad, TRUE); gst_pad_set_caps (mysrcpad, caps); gst_caps_unref (caps); caps = gst_caps_from_string (RESAMPLE_CAPS); structure = gst_caps_get_structure (caps, 0); gst_structure_set (structure, "channels", G_TYPE_INT, channels, "rate", G_TYPE_INT, outrate, "format", G_TYPE_STRING, format, NULL); fail_unless (gst_caps_is_fixed (caps)); mysinkpad = gst_check_setup_sink_pad (audioresample, &sinktemplate); gst_pad_set_active (mysinkpad, TRUE); /* this installs a getcaps func that will always return the caps we set * later */ gst_pad_set_caps (mysinkpad, caps); gst_pad_use_fixed_caps (mysinkpad); gst_caps_unref (caps); return audioresample; }
static gboolean gst_caps_is_interlaced (GstCaps * caps) { GstVideoInfo info; fail_unless (gst_caps_is_fixed (caps)); fail_unless (gst_video_info_from_caps (&info, caps)); return GST_VIDEO_INFO_IS_INTERLACED (&info); }
/** * gst_type_find_suggest: * @find: The #GstTypeFind object the function was called with * @probability: The probability in percent that the suggestion is right * @caps: The fixed #GstCaps to suggest * * If a #GstTypeFindFunction calls this function it suggests the caps with the * given probability. A #GstTypeFindFunction may supply different suggestions * in one call. * It is up to the caller of the #GstTypeFindFunction to interpret these values. */ void gst_type_find_suggest (GstTypeFind * find, guint probability, GstCaps * caps) { g_return_if_fail (find->suggest != NULL); g_return_if_fail (probability <= 100); g_return_if_fail (caps != NULL); g_return_if_fail (gst_caps_is_fixed (caps)); find->suggest (find->data, probability, caps); }
static gboolean sink_setcaps (GstPad *pad, GstCaps *caps) { GstStructure *structure; GstOmxBaseFilter21 *self; GOmxCore *gomx; GstVideoFormat format; int sink_number; self = GST_OMX_BASE_FILTER21 (GST_PAD_PARENT (pad)); if(strcmp(GST_PAD_NAME(pad), "sink_00") == 0){ sink_number=0; } else if(strcmp(GST_PAD_NAME(pad), "sink_01") == 0){ sink_number=1; } gomx = (GOmxCore *) self->gomx; GST_INFO_OBJECT (self, "setcaps (sink): %d", sink_number); GST_INFO_OBJECT (self, "setcaps (sink): %" GST_PTR_FORMAT, caps); g_return_val_if_fail (caps, FALSE); g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE); structure = gst_caps_get_structure (caps, 0); g_return_val_if_fail (structure, FALSE); if (!gst_video_format_parse_caps_strided (caps, &format, &self->in_width[sink_number], &self->in_height[sink_number], &self->in_stride[sink_number])) { GST_WARNING_OBJECT (self, "width and/or height is not set in caps"); return FALSE; } if (!self->in_stride[sink_number]) { self->in_stride[sink_number] = gstomx_calculate_stride (self->in_width[sink_number], format); } { /* Output framerate correspond to the minimum input framerate */ const GValue *sink_framerate = NULL; sink_framerate = gst_structure_get_value (structure, "framerate"); if( GST_VALUE_HOLDS_FRACTION(sink_framerate) ) { if( self->out_framerate == NULL || gst_value_compare(sink_framerate, self->out_framerate) == GST_VALUE_LESS_THAN ) { self->out_framerate = sink_framerate; self->duration = gst_util_uint64_scale_int(GST_SECOND, gst_value_get_fraction_denominator(sink_framerate), gst_value_get_fraction_numerator(sink_framerate)); } } } return gst_pad_set_caps (pad, caps); }
gboolean gst_vdp_caps_to_rgba_format (GstCaps * caps, VdpRGBAFormat * rgba_format) { GstStructure *structure; gint c_bpp, c_depth, c_endianness, c_red_mask, c_green_mask, c_blue_mask, c_alpha_mask; gint i; g_return_val_if_fail (GST_IS_CAPS (caps), FALSE); if (!gst_caps_is_fixed (caps)) return FALSE; structure = gst_caps_get_structure (caps, 0); if (!gst_structure_has_name (structure, "video/x-raw-rgb")) return FALSE; if (!gst_structure_get_int (structure, "bpp", &c_bpp) || !gst_structure_get_int (structure, "depth", &c_depth) || !gst_structure_get_int (structure, "endianness", &c_endianness) || !gst_structure_get_int (structure, "red_mask", &c_red_mask) || !gst_structure_get_int (structure, "green_mask", &c_green_mask) || !gst_structure_get_int (structure, "blue_mask", &c_blue_mask) || !gst_structure_get_int (structure, "alpha_mask", &c_alpha_mask)) return FALSE; for (i = 0; i < G_N_ELEMENTS (rgba_formats); i++) { gint bpp, depth, endianness, red_mask, green_mask, blue_mask, alpha_mask; GstCaps *rgb_caps = gst_static_caps_get (&rgba_formats[i].caps); structure = gst_caps_get_structure (rgb_caps, 0); gst_structure_get_int (structure, "bpp", &bpp); gst_structure_get_int (structure, "depth", &depth); gst_structure_get_int (structure, "endianness", &endianness); gst_structure_get_int (structure, "red_mask", &red_mask); gst_structure_get_int (structure, "green_mask", &green_mask); gst_structure_get_int (structure, "blue_mask", &blue_mask); gst_structure_get_int (structure, "alpha_mask", &alpha_mask); if (c_bpp == bpp && c_depth == depth && c_endianness == endianness && c_red_mask == red_mask && c_green_mask == green_mask && c_blue_mask == blue_mask && c_alpha_mask == alpha_mask) { gst_caps_unref (rgb_caps); *rgba_format = rgba_formats[i].format; return TRUE; } gst_caps_unref (rgb_caps); } return FALSE; }
static gboolean gst_alsasink_acceptcaps (GstPad * pad, GstCaps * caps) { GstAlsaSink *alsa = GST_ALSA_SINK (gst_pad_get_parent_element (pad)); GstCaps *pad_caps; GstStructure *st; gboolean ret = FALSE; GstRingBufferSpec spec = { 0 }; pad_caps = gst_pad_get_caps_reffed (pad); if (pad_caps) { ret = gst_caps_can_intersect (pad_caps, caps); gst_caps_unref (pad_caps); if (!ret) goto done; } /* If we've not got fixed caps, creating a stream might fail, so let's just * return from here with default acceptcaps behaviour */ if (!gst_caps_is_fixed (caps)) goto done; /* parse helper expects this set, so avoid nasty warning * will be set properly later on anyway */ spec.latency_time = GST_SECOND; if (!gst_ring_buffer_parse_caps (&spec, caps)) goto done; /* Make sure input is framed (one frame per buffer) and can be payloaded */ switch (spec.type) { case GST_BUFTYPE_AC3: case GST_BUFTYPE_EAC3: case GST_BUFTYPE_DTS: case GST_BUFTYPE_MPEG: { gboolean framed = FALSE, parsed = FALSE; st = gst_caps_get_structure (caps, 0); gst_structure_get_boolean (st, "framed", &framed); gst_structure_get_boolean (st, "parsed", &parsed); if ((!framed && !parsed) || gst_audio_iec61937_frame_size (&spec) <= 0) goto done; } default: { } } ret = TRUE; done: gst_caps_replace (&spec.caps, NULL); gst_object_unref (alsa); return ret; }
/* Output buffer preparation... if the buffer has no caps, and * our allowed output caps is fixed, then give the caps to the * buffer. * This ensures that outgoing buffers have caps if we can, so * that pipelines like: * gst-launch filesrc location=rawsamples.raw ! * audio/x-raw-int,width=16,depth=16,rate=48000,channels=2, * endianness=4321,signed='(boolean)'true ! alsasink * will work. */ static GstFlowReturn gst_capsfilter_prepare_buf (GstBaseTransform * trans, GstBuffer * input, gint size, GstCaps * caps, GstBuffer ** buf) { if (GST_BUFFER_CAPS (input) != NULL) { /* Output buffer already has caps */ GST_DEBUG_OBJECT (trans, "Input buffer already has caps (implicitely fixed)"); /* FIXME : Move this behaviour to basetransform. The given caps are the ones * of the source pad, therefore our outgoing buffers should always have * those caps. */ gst_buffer_set_caps (input, caps); gst_buffer_ref (input); *buf = input; } else { /* Buffer has no caps. See if the output pad only supports fixed caps */ GstCaps *out_caps; out_caps = GST_PAD_CAPS (trans->srcpad); if (out_caps != NULL) { gst_caps_ref (out_caps); } else { out_caps = gst_pad_get_allowed_caps (trans->srcpad); g_return_val_if_fail (out_caps != NULL, GST_FLOW_ERROR); } out_caps = gst_caps_make_writable (out_caps); gst_caps_do_simplify (out_caps); if (gst_caps_is_fixed (out_caps) && !gst_caps_is_empty (out_caps)) { GST_DEBUG_OBJECT (trans, "Have fixed output caps %" GST_PTR_FORMAT " to apply to buffer with no caps", out_caps); if (gst_buffer_is_metadata_writable (input)) { gst_buffer_ref (input); *buf = input; } else { GST_DEBUG_OBJECT (trans, "Creating sub-buffer and setting caps"); *buf = gst_buffer_create_sub (input, 0, GST_BUFFER_SIZE (input)); } GST_BUFFER_CAPS (*buf) = out_caps; if (GST_PAD_CAPS (trans->srcpad) == NULL) gst_pad_set_caps (trans->srcpad, out_caps); } else { GST_DEBUG_OBJECT (trans, "Have unfixed output caps %" GST_PTR_FORMAT, out_caps); gst_caps_unref (out_caps); } } return GST_FLOW_OK; }
/** * gst_video_parse_caps_framerate: * @caps: pointer to a #GstCaps instance * @fps_n: pointer to integer to hold numerator of frame rate (output) * @fps_d: pointer to integer to hold denominator of frame rate (output) * * Extracts the frame rate from @caps and places the values in the locations * pointed to by @fps_n and @fps_d. Returns TRUE if the values could be * parsed correctly, FALSE if not. * * This function can be used with #GstCaps that have any media type; it * is not limited to formats handled by #GstVideoFormat. * * Since: 0.10.16 * * Returns: TRUE if @caps was parsed correctly. */ gboolean gst_video_parse_caps_framerate (GstCaps * caps, int *fps_n, int *fps_d) { GstStructure *structure; if (!gst_caps_is_fixed (caps)) return FALSE; structure = gst_caps_get_structure (caps, 0); return gst_structure_get_fraction (structure, "framerate", fps_n, fps_d); }
/** * @brief Parse caps and set tensor info. */ static gboolean gst_tensor_aggregator_parse_caps (GstTensorAggregator * self, const GstCaps * caps) { GstStructure *structure; GstTensorConfig config; uint32_t per_frame; guint count; g_return_val_if_fail (caps != NULL, FALSE); g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE); structure = gst_caps_get_structure (caps, 0); if (!gst_structure_has_name (structure, "other/tensor")) { GST_ERROR_OBJECT (self, "Invalid caps"); return FALSE; } if (!gst_tensor_config_from_structure (&config, structure) || !gst_tensor_config_validate (&config)) { GST_ERROR_OBJECT (self, "Cannot configure tensor info"); return FALSE; } /** * @todo flush data * Check properties to detect invalid case. * Assertion when in=5 out=10 flush=20 or in=10 out=5 flush=20 */ count = (self->frames_out + self->frames_in - 1) / self->frames_in; g_assert (self->frames_in * count >= self->frames_flush); self->in_config = config; /** * update dimension in output tensor. * e.g, in-dimension 2:200:200:1 * if frames_out=10 and frames_dim=3, then out-dimension is 2:200:200:10. * if frames_out=10 and frames_dim=2, then out-dimension is 2:200:2000:1. */ g_assert (self->frames_dim < NNS_TENSOR_RANK_LIMIT); g_assert ((config.info.dimension[self->frames_dim] % self->frames_in) == 0); per_frame = config.info.dimension[self->frames_dim] / self->frames_in; config.info.dimension[self->frames_dim] = per_frame * self->frames_out; self->out_config = config; self->tensor_configured = TRUE; silent_debug_config (&self->in_config, "in-tensor"); silent_debug_config (&self->out_config, "out-tensor"); return TRUE; }
gboolean gst_sbc_enc_fill_sbc_params(GstSbcEnc *enc, GstCaps *caps) { if (!gst_caps_is_fixed(caps)) { GST_DEBUG_OBJECT(enc, "didn't receive fixed caps, " "returning false"); return FALSE; } if (!gst_sbc_util_fill_sbc_params(&enc->sbc, caps)) return FALSE; if (enc->rate != 0 && gst_sbc_parse_rate_from_sbc(enc->sbc.frequency) != enc->rate) goto fail; if (enc->channels != 0 && gst_sbc_get_channel_number(enc->sbc.mode) != enc->channels) goto fail; if (enc->blocks != 0 && gst_sbc_parse_blocks_from_sbc(enc->sbc.blocks) != enc->blocks) goto fail; if (enc->subbands != 0 && gst_sbc_parse_subbands_from_sbc( enc->sbc.subbands) != enc->subbands) goto fail; if (enc->mode != SBC_ENC_DEFAULT_MODE && enc->sbc.mode != enc->mode) goto fail; if (enc->allocation != SBC_AM_AUTO && enc->sbc.allocation != enc->allocation) goto fail; if (enc->bitpool != SBC_ENC_BITPOOL_AUTO && enc->sbc.bitpool != enc->bitpool) goto fail; enc->codesize = sbc_get_codesize(&enc->sbc); enc->frame_length = sbc_get_frame_length(&enc->sbc); enc->frame_duration = sbc_get_frame_duration(&enc->sbc); GST_DEBUG_OBJECT(enc, "codesize: %d, frame_length: %d, frame_duration:" " %d", enc->codesize, enc->frame_length, enc->frame_duration); return TRUE; fail: memset(&enc->sbc, 0, sizeof(sbc_t)); return FALSE; }
static gboolean sink_setcaps (GstPad * pad, GstCaps * caps) { GstStructure *structure; GstOmxBaseFilter *omx_base; GOmxCore *gomx; gint rate = 0; omx_base = GST_OMX_BASE_FILTER (GST_PAD_PARENT (pad)); gomx = (GOmxCore *) omx_base->gomx; GST_INFO_OBJECT (omx_base, "setcaps (sink): %" GST_PTR_FORMAT, caps); structure = gst_caps_get_structure (caps, 0); gst_structure_get_int (structure, "rate", &rate); /* Input port configuration. */ { OMX_AUDIO_PARAM_PCMMODETYPE param; G_OMX_INIT_PARAM (param); param.nPortIndex = omx_base->out_port->port_index; OMX_GetParameter (gomx->omx_handle, OMX_IndexParamAudioPcm, ¶m); param.nSamplingRate = rate; OMX_SetParameter (gomx->omx_handle, OMX_IndexParamAudioPcm, ¶m); } /* set caps on the srcpad */ { GstCaps *tmp_caps; tmp_caps = gst_pad_get_allowed_caps (omx_base->srcpad); tmp_caps = gst_caps_make_writable (tmp_caps); gst_caps_truncate (tmp_caps); gst_pad_fixate_caps (omx_base->srcpad, tmp_caps); if (gst_caps_is_fixed (tmp_caps)) { GST_INFO_OBJECT (omx_base, "fixated to: %" GST_PTR_FORMAT, tmp_caps); gst_pad_set_caps (omx_base->srcpad, tmp_caps); } gst_caps_unref (tmp_caps); } return gst_pad_set_caps (pad, caps); }
gboolean gst_caps_is_video_raw (GstCaps * caps) { GstStructure *structure; g_return_val_if_fail (caps != NULL, FALSE); if (!gst_caps_is_fixed (caps)) return FALSE; if (!_gst_caps_has_feature (caps, GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY)) return FALSE; structure = gst_caps_get_structure (caps, 0); return gst_structure_has_name (structure, "video/x-raw"); }
/** * gst_buffer_pool_config_set_params: * @config: a #GstBufferPool configuration * @caps: caps for the buffers * @size: the size of each buffer, not including prefix and padding * @min_buffers: the minimum amount of buffers to allocate. * @max_buffers: the maximum amount of buffers to allocate or 0 for unlimited. * * Configure @config with the given parameters. */ void gst_buffer_pool_config_set_params (GstStructure * config, GstCaps * caps, guint size, guint min_buffers, guint max_buffers) { g_return_if_fail (config != NULL); g_return_if_fail (max_buffers == 0 || min_buffers <= max_buffers); g_return_if_fail (caps == NULL || gst_caps_is_fixed (caps)); gst_structure_id_set (config, GST_QUARK (CAPS), GST_TYPE_CAPS, caps, GST_QUARK (SIZE), G_TYPE_UINT, size, GST_QUARK (MIN_BUFFERS), G_TYPE_UINT, min_buffers, GST_QUARK (MAX_BUFFERS), G_TYPE_UINT, max_buffers, NULL); }
/** * @brief This function handles sink pad query. */ static gboolean gst_tensor_aggregator_sink_query (GstPad * pad, GstObject * parent, GstQuery * query) { GstTensorAggregator *self; self = GST_TENSOR_AGGREGATOR (parent); GST_DEBUG_OBJECT (self, "Received %s query: %" GST_PTR_FORMAT, GST_QUERY_TYPE_NAME (query), query); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CAPS: { GstCaps *caps; GstCaps *filter; gst_query_parse_caps (query, &filter); caps = gst_tensor_aggregator_query_caps (self, pad, filter); gst_query_set_caps_result (query, caps); gst_caps_unref (caps); return TRUE; } case GST_QUERY_ACCEPT_CAPS: { GstCaps *caps; GstCaps *template_caps; gboolean res = FALSE; gst_query_parse_accept_caps (query, &caps); silent_debug_caps (caps, "accept-caps"); if (gst_caps_is_fixed (caps)) { template_caps = gst_pad_get_pad_template_caps (pad); res = gst_caps_can_intersect (template_caps, caps); gst_caps_unref (template_caps); } gst_query_set_accept_caps_result (query, res); return TRUE; } default: break; } return gst_pad_query_default (pad, parent, query); }
static gboolean gst_srtp_dec_sink_setcaps (GstPad * pad, GstObject * parent, GstCaps * caps, gboolean is_rtcp) { GstSrtpDec *filter = GST_SRTP_DEC (parent); GstPad *otherpad; GstStructure *ps; gboolean ret = FALSE; g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE); ps = gst_caps_get_structure (caps, 0); if (gst_structure_has_field_typed (ps, "ssrc", G_TYPE_UINT) && gst_structure_has_field_typed (ps, "roc", G_TYPE_UINT) && gst_structure_has_field_typed (ps, "srtp-cipher", G_TYPE_STRING) && gst_structure_has_field_typed (ps, "srtp-auth", G_TYPE_STRING) && gst_structure_has_field_typed (ps, "srtcp-cipher", G_TYPE_STRING) && gst_structure_has_field_typed (ps, "srtcp-auth", G_TYPE_STRING)) { guint ssrc; gst_structure_get_uint (ps, "ssrc", &ssrc); if (!update_session_stream_from_caps (filter, ssrc, caps)) { GST_WARNING_OBJECT (pad, "Could not create session from pad caps: %" GST_PTR_FORMAT, caps); return FALSE; } } caps = gst_caps_copy (caps); ps = gst_caps_get_structure (caps, 0); gst_structure_remove_fields (ps, "srtp-key", "srtp-cipher", "srtp-auth", "srtcp-cipher", "srtcp-auth", NULL); if (is_rtcp) gst_structure_set_name (ps, "application/x-rtcp"); else gst_structure_set_name (ps, "application/x-rtp"); otherpad = gst_pad_get_element_private (pad); ret = gst_pad_set_caps (otherpad, caps); gst_caps_unref (caps); return ret; }
bool getVideoSizeAndFormatFromCaps(GstCaps* caps, WebCore::IntSize& size, GstVideoFormat& format, int& pixelAspectRatioNumerator, int& pixelAspectRatioDenominator, int& stride) { GstVideoInfo info; if (!gst_caps_is_fixed(caps) || !gst_video_info_from_caps(&info, caps)) return false; format = GST_VIDEO_INFO_FORMAT(&info); size.setWidth(GST_VIDEO_INFO_WIDTH(&info)); size.setHeight(GST_VIDEO_INFO_HEIGHT(&info)); pixelAspectRatioNumerator = GST_VIDEO_INFO_PAR_N(&info); pixelAspectRatioDenominator = GST_VIDEO_INFO_PAR_D(&info); stride = GST_VIDEO_INFO_PLANE_STRIDE(&info, 0); return true; }
/** * gst_event_new_caps: * @caps: (transfer none): a #GstCaps * * Create a new CAPS event for @caps. The caps event can only travel downstream * synchronized with the buffer flow and contains the format of the buffers * that will follow after the event. * * Returns: (transfer full): the new CAPS event. */ GstEvent * gst_event_new_caps (GstCaps * caps) { GstEvent *event; g_return_val_if_fail (caps != NULL, NULL); g_return_val_if_fail (gst_caps_is_fixed (caps), NULL); GST_CAT_INFO (GST_CAT_EVENT, "creating caps event %" GST_PTR_FORMAT, caps); event = gst_event_new_custom (GST_EVENT_CAPS, gst_structure_new_id (GST_QUARK (EVENT_CAPS), GST_QUARK (CAPS), GST_TYPE_CAPS, caps, NULL)); return event; }