/** * gst_video_info_is_equal: * @info: a #GstVideoInfo * @other: a #GstVideoInfo * * Compares two #GstVideoInfo and returns whether they are equal or not * * Returns: %TRUE if @info and @other are equal, else %FALSE. */ gboolean gst_video_info_is_equal (const GstVideoInfo * info, const GstVideoInfo * other) { gint i; if (GST_VIDEO_INFO_FORMAT (info) != GST_VIDEO_INFO_FORMAT (other)) return FALSE; if (GST_VIDEO_INFO_INTERLACE_MODE (info) != GST_VIDEO_INFO_INTERLACE_MODE (other)) return FALSE; if (GST_VIDEO_INFO_FLAGS (info) != GST_VIDEO_INFO_FLAGS (other)) return FALSE; if (GST_VIDEO_INFO_WIDTH (info) != GST_VIDEO_INFO_WIDTH (other)) return FALSE; if (GST_VIDEO_INFO_HEIGHT (info) != GST_VIDEO_INFO_HEIGHT (other)) return FALSE; if (GST_VIDEO_INFO_SIZE (info) != GST_VIDEO_INFO_SIZE (other)) return FALSE; if (GST_VIDEO_INFO_PAR_N (info) != GST_VIDEO_INFO_PAR_N (other)) return FALSE; if (GST_VIDEO_INFO_PAR_D (info) != GST_VIDEO_INFO_PAR_D (other)) return FALSE; if (GST_VIDEO_INFO_FPS_N (info) != GST_VIDEO_INFO_FPS_N (other)) return FALSE; if (GST_VIDEO_INFO_FPS_D (info) != GST_VIDEO_INFO_FPS_D (other)) return FALSE; if (!gst_video_colorimetry_is_equal (&GST_VIDEO_INFO_COLORIMETRY (info), &GST_VIDEO_INFO_COLORIMETRY (other))) return FALSE; if (GST_VIDEO_INFO_CHROMA_SITE (info) != GST_VIDEO_INFO_CHROMA_SITE (other)) return FALSE; if (GST_VIDEO_INFO_MULTIVIEW_MODE (info) != GST_VIDEO_INFO_MULTIVIEW_MODE (other)) return FALSE; if (GST_VIDEO_INFO_MULTIVIEW_FLAGS (info) != GST_VIDEO_INFO_MULTIVIEW_FLAGS (other)) return FALSE; if (GST_VIDEO_INFO_VIEWS (info) != GST_VIDEO_INFO_VIEWS (other)) return FALSE; for (i = 0; i < info->finfo->n_planes; i++) { if (info->stride[i] != other->stride[i]) return FALSE; if (info->offset[i] != other->offset[i]) return FALSE; } return TRUE; }
static void _mixer_pad_get_output_size (GstGLVideoMixer * mix, GstGLVideoMixerPad * mix_pad, gint * width, gint * height) { GstVideoAggregator *vagg = GST_VIDEO_AGGREGATOR (mix); GstVideoAggregatorPad *vagg_pad = GST_VIDEO_AGGREGATOR_PAD (mix_pad); gint pad_width, pad_height; guint dar_n, dar_d; /* FIXME: Anything better we can do here? */ if (!vagg_pad->info.finfo || vagg_pad->info.finfo->format == GST_VIDEO_FORMAT_UNKNOWN) { GST_DEBUG_OBJECT (mix_pad, "Have no caps yet"); *width = 0; *height = 0; return; } pad_width = mix_pad->width <= 0 ? GST_VIDEO_INFO_WIDTH (&vagg_pad->info) : mix_pad->width; pad_height = mix_pad->height <= 0 ? GST_VIDEO_INFO_HEIGHT (&vagg_pad->info) : mix_pad->height; gst_video_calculate_display_ratio (&dar_n, &dar_d, pad_width, pad_height, GST_VIDEO_INFO_PAR_N (&vagg_pad->info), GST_VIDEO_INFO_PAR_D (&vagg_pad->info), GST_VIDEO_INFO_PAR_N (&vagg->info), GST_VIDEO_INFO_PAR_D (&vagg->info) ); GST_LOG_OBJECT (mix_pad, "scaling %ux%u by %u/%u (%u/%u / %u/%u)", pad_width, pad_height, dar_n, dar_d, GST_VIDEO_INFO_PAR_N (&vagg_pad->info), GST_VIDEO_INFO_PAR_D (&vagg_pad->info), GST_VIDEO_INFO_PAR_N (&vagg->info), GST_VIDEO_INFO_PAR_D (&vagg->info)); if (pad_height % dar_n == 0) { pad_width = gst_util_uint64_scale_int (pad_height, dar_n, dar_d); } else if (pad_width % dar_d == 0) { pad_height = gst_util_uint64_scale_int (pad_width, dar_d, dar_n); } else { pad_width = gst_util_uint64_scale_int (pad_height, dar_n, dar_d); } if (width) *width = pad_width; if (height) *height = pad_height; }
bool getVideoSizeAndFormatFromCaps(GstCaps* caps, WebCore::IntSize& size, GstVideoFormat& format, int& pixelAspectRatioNumerator, int& pixelAspectRatioDenominator, int& stride) { #ifdef GST_API_VERSION_1 GstVideoInfo info; if (!gst_video_info_from_caps(&info, caps)) return false; format = GST_VIDEO_INFO_FORMAT(&info); size.setWidth(GST_VIDEO_INFO_WIDTH(&info)); size.setHeight(GST_VIDEO_INFO_HEIGHT(&info)); pixelAspectRatioNumerator = GST_VIDEO_INFO_PAR_N(&info); pixelAspectRatioDenominator = GST_VIDEO_INFO_PAR_D(&info); stride = GST_VIDEO_INFO_PLANE_STRIDE(&info, 0); #else gint width, height; if (!GST_IS_CAPS(caps) || !gst_caps_is_fixed(caps) || !gst_video_format_parse_caps(caps, &format, &width, &height) || !gst_video_parse_caps_pixel_aspect_ratio(caps, &pixelAspectRatioNumerator, &pixelAspectRatioDenominator)) return false; size.setWidth(width); size.setHeight(height); stride = size.width() * 4; #endif return true; }
bool gst_vlc_set_vout_fmt( GstVideoInfo *p_info, GstVideoAlignment *p_align, GstCaps *p_caps, decoder_t *p_dec ) { es_format_t *p_outfmt = &p_dec->fmt_out; video_format_t *p_voutfmt = &p_dec->fmt_out.video; GstStructure *p_str = gst_caps_get_structure( p_caps, 0 ); vlc_fourcc_t i_chroma; int i_padded_width, i_padded_height; i_chroma = p_outfmt->i_codec = vlc_fourcc_GetCodecFromString( VIDEO_ES, gst_structure_get_string( p_str, "format" ) ); if( !i_chroma ) { msg_Err( p_dec, "video chroma type not supported" ); return false; } i_padded_width = GST_VIDEO_INFO_WIDTH( p_info ) + p_align->padding_left + p_align->padding_right; i_padded_height = GST_VIDEO_INFO_HEIGHT( p_info ) + p_align->padding_top + p_align->padding_bottom; video_format_Setup( p_voutfmt, i_chroma, i_padded_width, i_padded_height, GST_VIDEO_INFO_WIDTH( p_info ), GST_VIDEO_INFO_HEIGHT( p_info ), GST_VIDEO_INFO_PAR_N( p_info ), GST_VIDEO_INFO_PAR_D( p_info )); p_voutfmt->i_x_offset = p_align->padding_left; p_voutfmt->i_y_offset = p_align->padding_top; p_voutfmt->i_frame_rate = GST_VIDEO_INFO_FPS_N( p_info ); p_voutfmt->i_frame_rate_base = GST_VIDEO_INFO_FPS_D( p_info ); return true; }
static inline GstBuffer * gst_y4m_encode_get_stream_header (GstY4mEncode * filter, gboolean tff) { gpointer header; GstBuffer *buf; gchar interlaced; gsize len; if (GST_VIDEO_INFO_IS_INTERLACED (&filter->info)) { if (tff) interlaced = 't'; else interlaced = 'b'; } else { interlaced = 'p'; } header = g_strdup_printf ("YUV4MPEG2 C%s W%d H%d I%c F%d:%d A%d:%d\n", filter->colorspace, GST_VIDEO_INFO_WIDTH (&filter->info), GST_VIDEO_INFO_HEIGHT (&filter->info), interlaced, GST_VIDEO_INFO_FPS_N (&filter->info), GST_VIDEO_INFO_FPS_D (&filter->info), GST_VIDEO_INFO_PAR_N (&filter->info), GST_VIDEO_INFO_PAR_D (&filter->info)); len = strlen (header); buf = gst_buffer_new (); gst_buffer_append_memory (buf, gst_memory_new_wrapped (0, header, len, 0, len, header, g_free)); return buf; }
static gboolean _calculate_par (GtkGstWidget * widget, GstVideoInfo * info) { gboolean ok; gint width, height; gint par_n, par_d; gint display_par_n, display_par_d; guint display_ratio_num, display_ratio_den; width = GST_VIDEO_INFO_WIDTH (info); height = GST_VIDEO_INFO_HEIGHT (info); par_n = GST_VIDEO_INFO_PAR_N (info); par_d = GST_VIDEO_INFO_PAR_D (info); if (!par_n) par_n = 1; /* get display's PAR */ if (widget->priv->par_n != 0 && widget->priv->par_d != 0) { display_par_n = widget->priv->par_n; display_par_d = widget->priv->par_d; } else { display_par_n = 1; display_par_d = 1; } ok = gst_video_calculate_display_ratio (&display_ratio_num, &display_ratio_den, width, height, par_n, par_d, display_par_n, display_par_d); if (!ok) return FALSE; GST_LOG ("PAR: %u/%u DAR:%u/%u", par_n, par_d, display_par_n, display_par_d); if (height % display_ratio_den == 0) { GST_DEBUG ("keeping video height"); widget->priv->display_width = (guint) gst_util_uint64_scale_int (height, display_ratio_num, display_ratio_den); widget->priv->display_height = height; } else if (width % display_ratio_num == 0) { GST_DEBUG ("keeping video width"); widget->priv->display_width = width; widget->priv->display_height = (guint) gst_util_uint64_scale_int (width, display_ratio_den, display_ratio_num); } else { GST_DEBUG ("approximating while keeping video height"); widget->priv->display_width = (guint) gst_util_uint64_scale_int (height, display_ratio_num, display_ratio_den); widget->priv->display_height = height; } GST_DEBUG ("scaling to %dx%d", widget->priv->display_width, widget->priv->display_height); return TRUE; }
static void _mixer_pad_get_output_size (GstCompositor * comp, GstCompositorPad * comp_pad, gint out_par_n, gint out_par_d, gint * width, gint * height) { GstVideoAggregatorPad *vagg_pad = GST_VIDEO_AGGREGATOR_PAD (comp_pad); gint pad_width, pad_height; guint dar_n, dar_d; /* FIXME: Anything better we can do here? */ if (!vagg_pad->info.finfo || vagg_pad->info.finfo->format == GST_VIDEO_FORMAT_UNKNOWN) { GST_DEBUG_OBJECT (comp_pad, "Have no caps yet"); *width = 0; *height = 0; return; } pad_width = comp_pad->width <= 0 ? GST_VIDEO_INFO_WIDTH (&vagg_pad->info) : comp_pad->width; pad_height = comp_pad->height <= 0 ? GST_VIDEO_INFO_HEIGHT (&vagg_pad->info) : comp_pad->height; if (!gst_video_calculate_display_ratio (&dar_n, &dar_d, pad_width, pad_height, GST_VIDEO_INFO_PAR_N (&vagg_pad->info), GST_VIDEO_INFO_PAR_D (&vagg_pad->info), out_par_n, out_par_d)) { GST_WARNING_OBJECT (comp_pad, "Cannot calculate display aspect ratio"); *width = *height = 0; } GST_LOG_OBJECT (comp_pad, "scaling %ux%u by %u/%u (%u/%u / %u/%u)", pad_width, pad_height, dar_n, dar_d, GST_VIDEO_INFO_PAR_N (&vagg_pad->info), GST_VIDEO_INFO_PAR_D (&vagg_pad->info), out_par_n, out_par_d); if (pad_height % dar_n == 0) { pad_width = gst_util_uint64_scale_int (pad_height, dar_n, dar_d); } else if (pad_width % dar_d == 0) { pad_height = gst_util_uint64_scale_int (pad_width, dar_d, dar_n); } else { pad_width = gst_util_uint64_scale_int (pad_height, dar_n, dar_d); } *width = pad_width; *height = pad_height; }
void gst_video_info_change_format (GstVideoInfo * vip, GstVideoFormat format, guint width, guint height) { GstVideoInfo vi = *vip; gst_video_info_set_format (vip, format, width, height); GST_VIDEO_INFO_INTERLACE_MODE (vip) = GST_VIDEO_INFO_INTERLACE_MODE (&vi); GST_VIDEO_FORMAT_INFO_FLAGS (vip) = GST_VIDEO_FORMAT_INFO_FLAGS (&vi); GST_VIDEO_INFO_VIEWS (vip) = GST_VIDEO_INFO_VIEWS (&vi); GST_VIDEO_INFO_PAR_N (vip) = GST_VIDEO_INFO_PAR_N (&vi); GST_VIDEO_INFO_PAR_D (vip) = GST_VIDEO_INFO_PAR_D (&vi); GST_VIDEO_INFO_FPS_N (vip) = GST_VIDEO_INFO_FPS_N (&vi); GST_VIDEO_INFO_FPS_D (vip) = GST_VIDEO_INFO_FPS_D (&vi); GST_VIDEO_INFO_MULTIVIEW_MODE (vip) = GST_VIDEO_INFO_MULTIVIEW_MODE (&vi); GST_VIDEO_INFO_MULTIVIEW_FLAGS (vip) = GST_VIDEO_INFO_MULTIVIEW_FLAGS (&vi); }
static gboolean theora_enc_set_format (GstVideoEncoder * benc, GstVideoCodecState * state) { GstTheoraEnc *enc = GST_THEORA_ENC (benc); GstVideoInfo *info = &state->info; enc->width = GST_VIDEO_INFO_WIDTH (info); enc->height = GST_VIDEO_INFO_HEIGHT (info); th_info_clear (&enc->info); th_info_init (&enc->info); /* Theora has a divisible-by-sixteen restriction for the encoded video size but * we can define a picture area using pic_width/pic_height */ enc->info.frame_width = GST_ROUND_UP_16 (enc->width); enc->info.frame_height = GST_ROUND_UP_16 (enc->height); enc->info.pic_width = enc->width; enc->info.pic_height = enc->height; switch (GST_VIDEO_INFO_FORMAT (info)) { case GST_VIDEO_FORMAT_I420: enc->info.pixel_fmt = TH_PF_420; break; case GST_VIDEO_FORMAT_Y42B: enc->info.pixel_fmt = TH_PF_422; break; case GST_VIDEO_FORMAT_Y444: enc->info.pixel_fmt = TH_PF_444; break; default: g_assert_not_reached (); } enc->info.fps_numerator = enc->fps_n = GST_VIDEO_INFO_FPS_N (info); enc->info.fps_denominator = enc->fps_d = GST_VIDEO_INFO_FPS_D (info); enc->info.aspect_numerator = GST_VIDEO_INFO_PAR_N (info); enc->info.aspect_denominator = GST_VIDEO_INFO_PAR_D (info); enc->info.colorspace = TH_CS_UNSPECIFIED; /* Save input state */ if (enc->input_state) gst_video_codec_state_unref (enc->input_state); enc->input_state = gst_video_codec_state_ref (state); /* as done in theora */ enc->info.keyframe_granule_shift = _ilog (enc->keyframe_force - 1); GST_DEBUG_OBJECT (enc, "keyframe_frequency_force is %d, granule shift is %d", enc->keyframe_force, enc->info.keyframe_granule_shift); theora_enc_reset (enc); enc->initialised = TRUE; return TRUE; }
static gboolean gst_kms_sink_calculate_display_ratio (GstKMSSink * self, GstVideoInfo * vinfo) { guint dar_n, dar_d; guint video_width, video_height; guint video_par_n, video_par_d; guint dpy_par_n, dpy_par_d; video_width = GST_VIDEO_INFO_WIDTH (vinfo); video_height = GST_VIDEO_INFO_HEIGHT (vinfo); video_par_n = GST_VIDEO_INFO_PAR_N (vinfo); video_par_d = GST_VIDEO_INFO_PAR_D (vinfo); gst_video_calculate_device_ratio (self->hdisplay, self->vdisplay, self->mm_width, self->mm_height, &dpy_par_n, &dpy_par_d); if (!gst_video_calculate_display_ratio (&dar_n, &dar_d, video_width, video_height, video_par_n, video_par_d, dpy_par_n, dpy_par_d)) return FALSE; GST_DEBUG_OBJECT (self, "video calculated display ratio: %d/%d", dar_n, dar_d); /* now find a width x height that respects this display ratio. * prefer those that have one of w/h the same as the incoming video * using wd / hd = dar_n / dar_d */ /* start with same height, because of interlaced video */ /* check hd / dar_d is an integer scale factor, and scale wd with the PAR */ if (video_height % dar_d == 0) { GST_DEBUG_OBJECT (self, "keeping video height"); GST_VIDEO_SINK_WIDTH (self) = (guint) gst_util_uint64_scale_int (video_height, dar_n, dar_d); GST_VIDEO_SINK_HEIGHT (self) = video_height; } else if (video_width % dar_n == 0) { GST_DEBUG_OBJECT (self, "keeping video width"); GST_VIDEO_SINK_WIDTH (self) = video_width; GST_VIDEO_SINK_HEIGHT (self) = (guint) gst_util_uint64_scale_int (video_width, dar_d, dar_n); } else { GST_DEBUG_OBJECT (self, "approximating while keeping video height"); GST_VIDEO_SINK_WIDTH (self) = (guint) gst_util_uint64_scale_int (video_height, dar_n, dar_d); GST_VIDEO_SINK_HEIGHT (self) = video_height; } GST_DEBUG_OBJECT (self, "scaling to %dx%d", GST_VIDEO_SINK_WIDTH (self), GST_VIDEO_SINK_HEIGHT (self)); return TRUE; }
bool getVideoSizeAndFormatFromCaps(GstCaps* caps, WebCore::IntSize& size, GstVideoFormat& format, int& pixelAspectRatioNumerator, int& pixelAspectRatioDenominator, int& stride) { GstVideoInfo info; if (!gst_caps_is_fixed(caps) || !gst_video_info_from_caps(&info, caps)) return false; format = GST_VIDEO_INFO_FORMAT(&info); size.setWidth(GST_VIDEO_INFO_WIDTH(&info)); size.setHeight(GST_VIDEO_INFO_HEIGHT(&info)); pixelAspectRatioNumerator = GST_VIDEO_INFO_PAR_N(&info); pixelAspectRatioDenominator = GST_VIDEO_INFO_PAR_D(&info); stride = GST_VIDEO_INFO_PLANE_STRIDE(&info, 0); return true; }
void GStreamerReader::VideoPreroll() { /* The first video buffer has reached the video sink. Get width and height */ LOG(PR_LOG_DEBUG, "Video preroll"); GstPad* sinkpad = gst_element_get_static_pad(GST_ELEMENT(mVideoAppSink), "sink"); int PARNumerator, PARDenominator; #if GST_VERSION_MAJOR >= 1 GstCaps* caps = gst_pad_get_current_caps(sinkpad); memset (&mVideoInfo, 0, sizeof (mVideoInfo)); gst_video_info_from_caps(&mVideoInfo, caps); mFormat = mVideoInfo.finfo->format; mPicture.width = mVideoInfo.width; mPicture.height = mVideoInfo.height; PARNumerator = GST_VIDEO_INFO_PAR_N(&mVideoInfo); PARDenominator = GST_VIDEO_INFO_PAR_D(&mVideoInfo); #else GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad); gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height); if (!gst_video_parse_caps_pixel_aspect_ratio(caps, &PARNumerator, &PARDenominator)) { PARNumerator = 1; PARDenominator = 1; } #endif NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution"); // Calculate display size according to pixel aspect ratio. nsIntRect pictureRect(0, 0, mPicture.width, mPicture.height); nsIntSize frameSize = nsIntSize(mPicture.width, mPicture.height); nsIntSize displaySize = nsIntSize(mPicture.width, mPicture.height); ScaleDisplayByAspectRatio(displaySize, float(PARNumerator) / float(PARDenominator)); // If video frame size is overflow, stop playing. if (IsValidVideoRegion(frameSize, pictureRect, displaySize)) { GstStructure* structure = gst_caps_get_structure(caps, 0); gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen); mInfo.mVideo.mDisplay = ThebesIntSize(displaySize.ToIntSize()); mInfo.mVideo.mHasVideo = true; } else { LOG(PR_LOG_DEBUG, "invalid video region"); Eos(); } gst_caps_unref(caps); gst_object_unref(sinkpad); }
static gboolean _calculate_par (GtkGstBaseWidget * widget, GstVideoInfo * info) { gboolean ok; gint width, height; gint par_n, par_d; gint display_par_n, display_par_d; width = GST_VIDEO_INFO_WIDTH (info); height = GST_VIDEO_INFO_HEIGHT (info); par_n = GST_VIDEO_INFO_PAR_N (info); par_d = GST_VIDEO_INFO_PAR_D (info); if (!par_n) par_n = 1; /* get display's PAR */ if (widget->par_n != 0 && widget->par_d != 0) { display_par_n = widget->par_n; display_par_d = widget->par_d; } else { display_par_n = 1; display_par_d = 1; } ok = gst_video_calculate_display_ratio (&widget->display_ratio_num, &widget->display_ratio_den, width, height, par_n, par_d, display_par_n, display_par_d); if (ok) { GST_LOG ("PAR: %u/%u DAR:%u/%u", par_n, par_d, display_par_n, display_par_d); return TRUE; } return FALSE; }
static gboolean gst_aml_vdec_set_format(GstVideoDecoder *dec, GstVideoCodecState *state) { gboolean ret = FALSE; GstStructure *structure; const char *name; GstVideoInfo *info; gint par_num, par_den; GstVideoFormat fmt; GstAmlVdec *amlvdec = GST_AMLVDEC(dec); g_return_val_if_fail(state != NULL, FALSE); if (amlvdec->input_state) gst_video_codec_state_unref(amlvdec->input_state); amlvdec->input_state = gst_video_codec_state_ref(state); structure = gst_caps_get_structure(state->caps, 0); name = gst_structure_get_name(structure); GST_INFO_OBJECT(amlvdec, "%s = %s", __FUNCTION__, name); if (name) { ret = gst_set_vstream_info(amlvdec, state->caps); if (!amlvdec->output_state) { info = &amlvdec->input_state->info; fmt = GST_VIDEO_FORMAT_xRGB; GST_VIDEO_INFO_WIDTH (info) = amlvdec->pcodec->am_sysinfo.width; GST_VIDEO_INFO_HEIGHT (info) = amlvdec->pcodec->am_sysinfo.height; par_num = GST_VIDEO_INFO_PAR_N(info); par_den = GST_VIDEO_INFO_PAR_D(info); amlvdec->output_state = gst_video_decoder_set_output_state(GST_VIDEO_DECODER(amlvdec), fmt, info->width, info->height, amlvdec->input_state); gst_video_decoder_negotiate (GST_VIDEO_DECODER (amlvdec)); } } return ret; }
static void gst_raw_video_parse_set_property (GObject * object, guint prop_id, GValue const *value, GParamSpec * pspec) { GstBaseParse *base_parse = GST_BASE_PARSE (object); GstRawBaseParse *raw_base_parse = GST_RAW_BASE_PARSE (object); GstRawVideoParse *raw_video_parse = GST_RAW_VIDEO_PARSE (object); GstRawVideoParseConfig *props_cfg = &(raw_video_parse->properties_config); /* All properties are handled similarly: * - if the new value is the same as the current value, nothing is done * - the parser lock is held while the new value is set * - if the properties config is the current config, the source caps are * invalidated to ensure that the code in handle_frame pushes a new CAPS * event out * - properties that affect the video frame size call the function to update * the info and also call gst_base_parse_set_min_frame_size() to ensure * that the minimum frame size can hold 1 frame (= one sample for each * channel); to ensure that the min frame size includes any extra padding, * it is set to the result of gst_raw_video_parse_get_config_frame_size() * - property configuration values that require video info updates aren't * written directory into the video info structure, but in the extra * fields instead (gst_raw_video_parse_update_info() then copies the values * from these fields into the video info); see the documentation inside * gst_raw_video_parse_update_info() for the reason why */ switch (prop_id) { case PROP_WIDTH: { gint new_width = g_value_get_int (value); GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); if (new_width != props_cfg->width) { props_cfg->width = new_width; gst_raw_video_parse_update_info (props_cfg); if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) { gst_raw_base_parse_invalidate_src_caps (raw_base_parse); gst_base_parse_set_min_frame_size (base_parse, gst_raw_video_parse_get_config_frame_size (raw_base_parse, GST_RAW_BASE_PARSE_CONFIG_PROPERTIES)); } } GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_HEIGHT: { gint new_height = g_value_get_int (value); GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); if (new_height != props_cfg->height) { props_cfg->height = new_height; gst_raw_video_parse_update_info (props_cfg); if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) { gst_raw_base_parse_invalidate_src_caps (raw_base_parse); gst_base_parse_set_min_frame_size (base_parse, gst_raw_video_parse_get_config_frame_size (raw_base_parse, GST_RAW_BASE_PARSE_CONFIG_PROPERTIES)); } } GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_FORMAT: { GstVideoFormat new_format = g_value_get_enum (value); GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); if (new_format != props_cfg->format) { props_cfg->format = new_format; gst_raw_video_parse_update_info (props_cfg); if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) { gst_raw_base_parse_invalidate_src_caps (raw_base_parse); gst_base_parse_set_min_frame_size (base_parse, gst_raw_video_parse_get_config_frame_size (raw_base_parse, GST_RAW_BASE_PARSE_CONFIG_PROPERTIES)); } } GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_PIXEL_ASPECT_RATIO: { GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); /* The pixel aspect ratio does not affect the video frame size, * so it is just set directly without any updates */ props_cfg->pixel_aspect_ratio_n = GST_VIDEO_INFO_PAR_N (&(props_cfg->info)) = gst_value_get_fraction_numerator (value); props_cfg->pixel_aspect_ratio_d = GST_VIDEO_INFO_PAR_D (&(props_cfg->info)) = gst_value_get_fraction_denominator (value); GST_DEBUG_OBJECT (raw_video_parse, "setting pixel aspect ratio to %u/%u", props_cfg->pixel_aspect_ratio_n, props_cfg->pixel_aspect_ratio_d); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_FRAMERATE: { GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); /* The framerate does not affect the video frame size, * so it is just set directly without any updates */ props_cfg->framerate_n = GST_VIDEO_INFO_FPS_N (&(props_cfg->info)) = gst_value_get_fraction_numerator (value); props_cfg->framerate_d = GST_VIDEO_INFO_FPS_D (&(props_cfg->info)) = gst_value_get_fraction_denominator (value); GST_DEBUG_OBJECT (raw_video_parse, "setting framerate to %u/%u", props_cfg->framerate_n, props_cfg->framerate_d); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_INTERLACED: { GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); /* Interlacing does not affect the video frame size, * so it is just set directly without any updates */ props_cfg->interlaced = g_value_get_boolean (value); GST_VIDEO_INFO_INTERLACE_MODE (&(props_cfg->info)) = props_cfg->interlaced ? GST_VIDEO_INTERLACE_MODE_INTERLEAVED : GST_VIDEO_INTERLACE_MODE_PROGRESSIVE; GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_TOP_FIELD_FIRST: { /* The top-field-first flag is a detail related to * interlacing, so no video info update is needed */ GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); props_cfg->top_field_first = g_value_get_boolean (value); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_PLANE_STRIDES: { GValueArray *valarray = g_value_get_boxed (value); guint n_planes; guint i; /* If no valarray is given, then disable custom * plane strides & offsets and stick to the * standard computed ones */ if (valarray == NULL) { GST_DEBUG_OBJECT (raw_video_parse, "custom plane strides & offsets disabled"); props_cfg->custom_plane_strides = FALSE; gst_raw_video_parse_update_info (props_cfg); break; } /* Sanity check - reject empty arrays */ if ((valarray != NULL) && (valarray->n_values == 0)) { GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS, ("plane strides property holds an empty array"), (NULL)); break; } GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); n_planes = GST_VIDEO_INFO_N_PLANES (&(props_cfg->info)); /* Check that the valarray holds the right number of values */ if (valarray->n_values != n_planes) { GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS, ("incorrect number of elements in plane strides property"), ("expected: %u, got: %u", n_planes, valarray->n_values)); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } /* Copy the values to the stride array */ for (i = 0; i < n_planes; ++i) { GValue *val = g_value_array_get_nth (valarray, i); props_cfg->plane_strides[i] = g_value_get_uint (val); GST_DEBUG_OBJECT (raw_video_parse, "plane #%u stride: %d", i, props_cfg->plane_strides[i]); } props_cfg->custom_plane_strides = TRUE; gst_raw_video_parse_update_info (props_cfg); if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) gst_base_parse_set_min_frame_size (base_parse, gst_raw_video_parse_get_config_frame_size (raw_base_parse, GST_RAW_BASE_PARSE_CONFIG_PROPERTIES)); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_PLANE_OFFSETS: { GValueArray *valarray = g_value_get_boxed (value); guint n_planes; guint i; /* If no valarray is given, then disable custom * plane strides & offsets and stick to the * standard computed ones */ if (valarray == NULL) { GST_DEBUG_OBJECT (raw_video_parse, "custom plane strides & offsets disabled"); props_cfg->custom_plane_strides = FALSE; gst_raw_video_parse_update_info (props_cfg); break; } /* Sanity check - reject empty arrays */ if ((valarray != NULL) && (valarray->n_values == 0)) { GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS, ("plane offsets property holds an empty array"), (NULL)); break; } GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); n_planes = GST_VIDEO_INFO_N_PLANES (&(props_cfg->info)); /* Check that the valarray holds the right number of values */ if (valarray->n_values != n_planes) { GST_ELEMENT_ERROR (raw_video_parse, LIBRARY, SETTINGS, ("incorrect number of elements in plane offsets property"), ("expected: %u, got: %u", n_planes, valarray->n_values)); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } /* Copy the values to the offset array */ for (i = 0; i < n_planes; ++i) { GValue *val = g_value_array_get_nth (valarray, i); props_cfg->plane_offsets[i] = g_value_get_uint (val); GST_DEBUG_OBJECT (raw_video_parse, "plane #%u offset: %" G_GSIZE_FORMAT, i, props_cfg->plane_offsets[i]); } props_cfg->custom_plane_strides = TRUE; gst_raw_video_parse_update_info (props_cfg); if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) gst_base_parse_set_min_frame_size (base_parse, gst_raw_video_parse_get_config_frame_size (raw_base_parse, GST_RAW_BASE_PARSE_CONFIG_PROPERTIES)); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } case PROP_FRAME_STRIDE: { /* The frame stride does not affect the video frame size, * so it is just set directly without any updates */ GST_RAW_BASE_PARSE_CONFIG_MUTEX_LOCK (object); props_cfg->frame_stride = g_value_get_uint (value); gst_raw_video_parse_update_info (props_cfg); if (!gst_raw_video_parse_is_using_sink_caps (raw_video_parse)) gst_base_parse_set_min_frame_size (base_parse, gst_raw_video_parse_get_config_frame_size (raw_base_parse, GST_RAW_BASE_PARSE_CONFIG_PROPERTIES)); GST_RAW_BASE_PARSE_CONFIG_MUTEX_UNLOCK (object); break; } default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } }
static GstFlowReturn theora_handle_type_packet (GstTheoraDec * dec) { gint par_num, par_den; GstFlowReturn ret = GST_FLOW_OK; GstVideoCodecState *state; GstVideoFormat fmt; GstVideoInfo *info = &dec->input_state->info; GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d", dec->info.fps_numerator, dec->info.fps_denominator, dec->info.aspect_numerator, dec->info.aspect_denominator); /* calculate par * the info.aspect_* values reflect PAR; * 0:x and x:0 are allowed and can be interpreted as 1:1. */ par_num = GST_VIDEO_INFO_PAR_N (info); par_den = GST_VIDEO_INFO_PAR_D (info); /* If we have a default PAR, see if the decoder specified a different one */ if (par_num == 1 && par_den == 1 && (dec->info.aspect_numerator != 0 && dec->info.aspect_denominator != 0)) { par_num = dec->info.aspect_numerator; par_den = dec->info.aspect_denominator; } /* theora has: * * width/height : dimension of the encoded frame * pic_width/pic_height : dimension of the visible part * pic_x/pic_y : offset in encoded frame where visible part starts */ GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width, dec->info.pic_height, par_num, par_den); GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d", dec->info.pic_width, dec->info.pic_height, dec->info.pic_x, dec->info.pic_y); switch (dec->info.pixel_fmt) { case TH_PF_420: fmt = GST_VIDEO_FORMAT_I420; break; case TH_PF_422: fmt = GST_VIDEO_FORMAT_Y42B; break; case TH_PF_444: fmt = GST_VIDEO_FORMAT_Y444; break; default: goto unsupported_format; } GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width; GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height; /* Ensure correct offsets in chroma for formats that need it * by rounding the offset. libtheora will add proper pixels, * so no need to handle them ourselves. */ if (dec->info.pic_x & 1 && dec->info.pixel_fmt != TH_PF_444) { GST_VIDEO_INFO_WIDTH (info)++; } if (dec->info.pic_y & 1 && dec->info.pixel_fmt == TH_PF_420) { GST_VIDEO_INFO_HEIGHT (info)++; } GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d", info->width, info->height, dec->info.pic_x, dec->info.pic_y); /* done */ dec->decoder = th_decode_alloc (&dec->info, dec->setup); if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV, &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MV visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE, &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI, &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS, &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation"); } /* Create the output state */ dec->output_state = state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt, info->width, info->height, dec->input_state); /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */ state->info.fps_n = dec->info.fps_numerator; state->info.fps_d = dec->info.fps_denominator; state->info.par_n = par_num; state->info.par_d = par_den; /* these values are for all versions of the colorspace specified in the * theora info */ state->info.chroma_site = GST_VIDEO_CHROMA_SITE_JPEG; state->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235; state->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; state->info.colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; switch (dec->info.colorspace) { case TH_CS_ITU_REC_470M: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M; break; case TH_CS_ITU_REC_470BG: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG; break; default: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); dec->have_header = TRUE; return ret; /* ERRORS */ unsupported_format: { GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt); return GST_FLOW_ERROR; } }
static gboolean gst_imx_egl_viv_sink_gles2_renderer_update_display_ratio(GstImxEglVivSinkGLES2Renderer *renderer, GstVideoInfo *video_info) { /* must be called with lock */ float display_scale_w, display_scale_h; if (renderer->force_aspect_ratio && (renderer->window_width != 0) && (renderer->window_height != 0)) { gint video_par_n, video_par_d, window_par_n, window_par_d; float norm_ratio; video_par_n = GST_VIDEO_INFO_PAR_N(video_info); video_par_d = GST_VIDEO_INFO_PAR_D(video_info); window_par_n = 1; window_par_d = 1; if (!gst_video_calculate_display_ratio(&(renderer->display_ratio_n), &(renderer->display_ratio_d), video_info->width, video_info->height, video_par_n, video_par_d, window_par_n, window_par_d)) { GLES2_RENDERER_UNLOCK(renderer); GST_ERROR("could not calculate display ratio"); return FALSE; } norm_ratio = (float)(renderer->display_ratio_n) / (float)(renderer->display_ratio_d) * (float)(renderer->window_height) / (float)(renderer->window_width); GST_LOG( "force aspect ratio: %d video width/height: %dx%d video pixel aspect ratio: %d/%d window pixel aspect ratio: %d/%d calculated display ratio: %d/%d window width/height: %dx%d norm ratio: %f", renderer->force_aspect_ratio, video_info->width, video_info->height, video_par_n, video_par_d, window_par_n, window_par_d, renderer->display_ratio_n, renderer->display_ratio_d, renderer->window_width, renderer->window_height, norm_ratio ); if (norm_ratio >= 1.0f) { display_scale_w = 1.0f; display_scale_h = 1.0f / norm_ratio; } else { display_scale_w = norm_ratio; display_scale_h = 1.0f; } } else { renderer->display_ratio_n = 1; renderer->display_ratio_d = 1; display_scale_w = 1.0f; display_scale_h = 1.0f; } if (renderer->frame_rect_uloc != -1) { GST_LOG( "display scale: %f/%f", display_scale_w, display_scale_h ); glUniform2f(renderer->frame_rect_uloc, display_scale_w, display_scale_h); } return TRUE; }
static void gst_vp8_enc_set_stream_info (GstVPXEnc * enc, GstCaps * caps, GstVideoInfo * info) { GstStructure *s; GstVideoEncoder *video_encoder; GstBuffer *stream_hdr, *vorbiscomment; const GstTagList *iface_tags; GValue array = { 0, }; GValue value = { 0, }; guint8 *data = NULL; GstMapInfo map; video_encoder = GST_VIDEO_ENCODER (enc); s = gst_caps_get_structure (caps, 0); /* put buffers in a fixed list */ g_value_init (&array, GST_TYPE_ARRAY); g_value_init (&value, GST_TYPE_BUFFER); /* Create Ogg stream-info */ stream_hdr = gst_buffer_new_and_alloc (26); gst_buffer_map (stream_hdr, &map, GST_MAP_WRITE); data = map.data; GST_WRITE_UINT8 (data, 0x4F); GST_WRITE_UINT32_BE (data + 1, 0x56503830); /* "VP80" */ GST_WRITE_UINT8 (data + 5, 0x01); /* stream info header */ GST_WRITE_UINT8 (data + 6, 1); /* Major version 1 */ GST_WRITE_UINT8 (data + 7, 0); /* Minor version 0 */ GST_WRITE_UINT16_BE (data + 8, GST_VIDEO_INFO_WIDTH (info)); GST_WRITE_UINT16_BE (data + 10, GST_VIDEO_INFO_HEIGHT (info)); GST_WRITE_UINT24_BE (data + 12, GST_VIDEO_INFO_PAR_N (info)); GST_WRITE_UINT24_BE (data + 15, GST_VIDEO_INFO_PAR_D (info)); GST_WRITE_UINT32_BE (data + 18, GST_VIDEO_INFO_FPS_N (info)); GST_WRITE_UINT32_BE (data + 22, GST_VIDEO_INFO_FPS_D (info)); gst_buffer_unmap (stream_hdr, &map); GST_BUFFER_FLAG_SET (stream_hdr, GST_BUFFER_FLAG_HEADER); gst_value_set_buffer (&value, stream_hdr); gst_value_array_append_value (&array, &value); g_value_unset (&value); gst_buffer_unref (stream_hdr); iface_tags = gst_tag_setter_get_tag_list (GST_TAG_SETTER (video_encoder)); if (iface_tags) { vorbiscomment = gst_tag_list_to_vorbiscomment_buffer (iface_tags, (const guint8 *) "OVP80\2 ", 7, "Encoded with GStreamer vp8enc " PACKAGE_VERSION); GST_BUFFER_FLAG_SET (vorbiscomment, GST_BUFFER_FLAG_HEADER); g_value_init (&value, GST_TYPE_BUFFER); gst_value_set_buffer (&value, vorbiscomment); gst_value_array_append_value (&array, &value); g_value_unset (&value); gst_buffer_unref (vorbiscomment); } gst_structure_set_value (s, "streamheader", &array); g_value_unset (&array); }
static gboolean gst_glimage_sink_set_caps (GstBaseSink * bsink, GstCaps * caps) { GstGLImageSink *glimage_sink; gint width; gint height; gboolean ok; gint par_n, par_d; gint display_par_n, display_par_d; guint display_ratio_num, display_ratio_den; GstVideoInfo vinfo; GstStructure *structure; GstBufferPool *newpool, *oldpool; GST_DEBUG ("set caps with %" GST_PTR_FORMAT, caps); glimage_sink = GST_GLIMAGE_SINK (bsink); ok = gst_video_info_from_caps (&vinfo, caps); if (!ok) return FALSE; width = GST_VIDEO_INFO_WIDTH (&vinfo); height = GST_VIDEO_INFO_HEIGHT (&vinfo); if (glimage_sink->tex_id) gst_gl_context_del_texture (glimage_sink->context, &glimage_sink->tex_id); //FIXME: this texture seems to be never deleted when going to STATE_NULL gst_gl_context_gen_texture (glimage_sink->context, &glimage_sink->tex_id, GST_VIDEO_INFO_FORMAT (&vinfo), width, height); par_n = GST_VIDEO_INFO_PAR_N (&vinfo); par_d = GST_VIDEO_INFO_PAR_D (&vinfo); if (!par_n) par_n = 1; /* get display's PAR */ if (glimage_sink->par_n != 0 && glimage_sink->par_d != 0) { display_par_n = glimage_sink->par_n; display_par_d = glimage_sink->par_d; } else { display_par_n = 1; display_par_d = 1; } ok = gst_video_calculate_display_ratio (&display_ratio_num, &display_ratio_den, width, height, par_n, par_d, display_par_n, display_par_d); if (!ok) return FALSE; GST_TRACE ("PAR: %u/%u DAR:%u/%u", par_n, par_d, display_par_n, display_par_d); if (height % display_ratio_den == 0) { GST_DEBUG ("keeping video height"); GST_VIDEO_SINK_WIDTH (glimage_sink) = (guint) gst_util_uint64_scale_int (height, display_ratio_num, display_ratio_den); GST_VIDEO_SINK_HEIGHT (glimage_sink) = height; } else if (width % display_ratio_num == 0) { GST_DEBUG ("keeping video width"); GST_VIDEO_SINK_WIDTH (glimage_sink) = width; GST_VIDEO_SINK_HEIGHT (glimage_sink) = (guint) gst_util_uint64_scale_int (width, display_ratio_den, display_ratio_num); } else { GST_DEBUG ("approximating while keeping video height"); GST_VIDEO_SINK_WIDTH (glimage_sink) = (guint) gst_util_uint64_scale_int (height, display_ratio_num, display_ratio_den); GST_VIDEO_SINK_HEIGHT (glimage_sink) = height; } GST_DEBUG ("scaling to %dx%d", GST_VIDEO_SINK_WIDTH (glimage_sink), GST_VIDEO_SINK_HEIGHT (glimage_sink)); glimage_sink->info = vinfo; newpool = gst_gl_buffer_pool_new (glimage_sink->context); structure = gst_buffer_pool_get_config (newpool); gst_buffer_pool_config_set_params (structure, caps, vinfo.size, 2, 0); gst_buffer_pool_set_config (newpool, structure); oldpool = glimage_sink->pool; /* we don't activate the pool yet, this will be done by downstream after it * has configured the pool. If downstream does not want our pool we will * activate it when we render into it */ glimage_sink->pool = newpool; /* unref the old sink */ if (oldpool) { /* we don't deactivate, some elements might still be using it, it will * be deactivated when the last ref is gone */ gst_object_unref (oldpool); } return TRUE; }
static gboolean gst_raw_video_parse_set_config_from_caps (GstRawBaseParse * raw_base_parse, GstRawBaseParseConfig config, GstCaps * caps) { int i; GstStructure *structure; GstRawVideoParse *raw_video_parse = GST_RAW_VIDEO_PARSE (raw_base_parse); GstRawVideoParseConfig *config_ptr = gst_raw_video_parse_get_config_ptr (raw_video_parse, config); g_assert (caps != NULL); /* Caps might get copied, and the copy needs to be unref'd. * Also, the caller retains ownership over the original caps. * So, to make this mechanism also work with cases where the * caps are *not* copied, ref the original caps here first. */ gst_caps_ref (caps); structure = gst_caps_get_structure (caps, 0); /* For unaligned raw data, the output caps stay the same, * except that video/x-unaligned-raw becomes video/x-raw, * since the parser aligns the frame data */ if (gst_structure_has_name (structure, "video/x-unaligned-raw")) { /* Copy the caps to be able to modify them */ GstCaps *new_caps = gst_caps_copy (caps); gst_caps_unref (caps); caps = new_caps; /* Change the media type to video/x-raw , otherwise * gst_video_info_from_caps() won't work */ structure = gst_caps_get_structure (caps, 0); gst_structure_set_name (structure, "video/x-raw"); } config_ptr->ready = gst_video_info_from_caps (&(config_ptr->info), caps); if (config_ptr->ready) { config_ptr->width = GST_VIDEO_INFO_WIDTH (&(config_ptr->info)); config_ptr->height = GST_VIDEO_INFO_HEIGHT (&(config_ptr->info)); config_ptr->pixel_aspect_ratio_n = GST_VIDEO_INFO_PAR_N (&(config_ptr->info)); config_ptr->pixel_aspect_ratio_d = GST_VIDEO_INFO_PAR_D (&(config_ptr->info)); config_ptr->framerate_n = GST_VIDEO_INFO_FPS_N (&(config_ptr->info)); config_ptr->framerate_d = GST_VIDEO_INFO_FPS_D (&(config_ptr->info)); config_ptr->interlaced = GST_VIDEO_INFO_IS_INTERLACED (&(config_ptr->info)); config_ptr->height = GST_VIDEO_INFO_HEIGHT (&(config_ptr->info)); config_ptr->top_field_first = 0; config_ptr->frame_stride = 0; for (i = 0; i < GST_VIDEO_MAX_PLANES; ++i) { config_ptr->plane_offsets[i] = GST_VIDEO_INFO_PLANE_OFFSET (&(config_ptr->info), i); config_ptr->plane_strides[i] = GST_VIDEO_INFO_PLANE_STRIDE (&(config_ptr->info), i); } } gst_caps_unref (caps); return config_ptr->ready; }
static void gst_raw_video_parse_update_info (GstRawVideoParseConfig * config) { guint i; guint n_planes; guint last_plane; gsize last_plane_offset, last_plane_size; GstVideoInfo *info = &(config->info); GST_DEBUG ("updating info with width %u height %u format %s " " custom plane strides&offsets %d", config->width, config->height, gst_video_format_to_string (config->format), config->custom_plane_strides); gst_video_info_set_format (info, config->format, config->width, config->height); GST_VIDEO_INFO_PAR_N (info) = config->pixel_aspect_ratio_n; GST_VIDEO_INFO_PAR_D (info) = config->pixel_aspect_ratio_d; GST_VIDEO_INFO_FPS_N (info) = config->framerate_n; GST_VIDEO_INFO_FPS_D (info) = config->framerate_d; GST_VIDEO_INFO_INTERLACE_MODE (info) = config->interlaced ? GST_VIDEO_INTERLACE_MODE_INTERLEAVED : GST_VIDEO_INTERLACE_MODE_PROGRESSIVE; /* Check if there are custom plane strides & offsets that need to be preserved */ if (config->custom_plane_strides) { /* In case there are, overwrite the offsets&strides computed by * gst_video_info_set_format with the custom ones */ for (i = 0; i < GST_VIDEO_MAX_PLANES; ++i) { GST_VIDEO_INFO_PLANE_OFFSET (info, i) = config->plane_offsets[i]; GST_VIDEO_INFO_PLANE_STRIDE (info, i) = config->plane_strides[i]; } } else { /* No custom planes&offsets; copy the computed ones into * the plane_offsets & plane_strides arrays to ensure they * are equal to the ones in the videoinfo */ for (i = 0; i < GST_VIDEO_MAX_PLANES; ++i) { config->plane_offsets[i] = GST_VIDEO_INFO_PLANE_OFFSET (info, i); config->plane_strides[i] = GST_VIDEO_INFO_PLANE_STRIDE (info, i); } } n_planes = GST_VIDEO_INFO_N_PLANES (info); if (n_planes < 1) n_planes = 1; /* Figure out what plane is the physically last one. Typically * this is the last plane in the list (= at index n_planes-1). * However, this is not guaranteed, so we have to scan the offsets * to find the last plane. */ last_plane_offset = 0; last_plane = 0; for (i = 0; i < n_planes; ++i) { gsize plane_offset = GST_VIDEO_INFO_PLANE_OFFSET (info, i); if (plane_offset >= last_plane_offset) { last_plane = i; last_plane_offset = plane_offset; } } last_plane_size = GST_VIDEO_INFO_PLANE_STRIDE (info, last_plane) * GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (info->finfo, last_plane, config->height); GST_VIDEO_INFO_SIZE (info) = last_plane_offset + last_plane_size; GST_DEBUG ("last plane #%u: offset: %" G_GSIZE_FORMAT " size: %" G_GSIZE_FORMAT " => frame size minus extra padding: %" G_GSIZE_FORMAT, last_plane, last_plane_offset, last_plane_size, GST_VIDEO_INFO_SIZE (info)); }
static GstFlowReturn handle_sequence (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info) { GstFlowReturn ret = GST_FLOW_OK; GstClockTime latency; const mpeg2_sequence_t *sequence; GstVideoCodecState *state; GstVideoInfo *dinfo = &mpeg2dec->decoded_info; GstVideoInfo *vinfo; GstVideoFormat format; sequence = info->sequence; if (sequence->frame_period == 0) goto invalid_frame_period; /* mpeg2 video can only be from 16x16 to 4096x4096. Everything * else is a corrupted file */ if (sequence->width > 4096 || sequence->width < 16 || sequence->height > 4096 || sequence->height < 16) goto invalid_size; GST_DEBUG_OBJECT (mpeg2dec, "widthxheight: %dx%d , decoded_widthxheight: %dx%d", sequence->picture_width, sequence->picture_height, sequence->width, sequence->height); if (sequence->picture_width != sequence->width || sequence->picture_height != sequence->height) { GST_DEBUG_OBJECT (mpeg2dec, "we need to crop"); mpeg2dec->need_cropping = TRUE; } else { GST_DEBUG_OBJECT (mpeg2dec, "no cropping needed"); mpeg2dec->need_cropping = FALSE; } /* get subsampling */ if (sequence->chroma_width < sequence->width) { /* horizontally subsampled */ if (sequence->chroma_height < sequence->height) { /* and vertically subsamples */ format = GST_VIDEO_FORMAT_I420; } else { format = GST_VIDEO_FORMAT_Y42B; } } else { /* not subsampled */ format = GST_VIDEO_FORMAT_Y444; } state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (mpeg2dec), format, sequence->picture_width, sequence->picture_height, mpeg2dec->input_state); vinfo = &state->info; /* If we don't have a valid upstream PAR override it */ if (GST_VIDEO_INFO_PAR_N (vinfo) == 1 && GST_VIDEO_INFO_PAR_D (vinfo) == 1 && sequence->pixel_width != 0 && sequence->pixel_height != 0) { #if MPEG2_RELEASE >= MPEG2_VERSION(0,5,0) guint pixel_width, pixel_height; if (mpeg2_guess_aspect (sequence, &pixel_width, &pixel_height)) { vinfo->par_n = pixel_width; vinfo->par_d = pixel_height; } #else vinfo->par_n = sequence->pixel_width; vinfo->par_d = sequence->pixel_height; #endif GST_DEBUG_OBJECT (mpeg2dec, "Setting PAR %d x %d", vinfo->par_n, vinfo->par_d); } vinfo->fps_n = 27000000; vinfo->fps_d = sequence->frame_period; if (!(sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE)) vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED; else vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE; vinfo->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2; vinfo->colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235; if (sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION) { /* do color description */ switch (sequence->colour_primaries) { case 1: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709; break; case 4: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M; break; case 5: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG; break; case 6: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M; break; case 7: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M; break; /* 0 forbidden */ /* 2 unspecified */ /* 3 reserved */ /* 8-255 reseved */ default: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } /* matrix coefficients */ switch (sequence->matrix_coefficients) { case 1: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709; break; case 4: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_FCC; break; case 5: case 6: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; break; case 7: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M; break; /* 0 forbidden */ /* 2 unspecified */ /* 3 reserved */ /* 8-255 reseved */ default: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN; break; } /* transfer characteristics */ switch (sequence->transfer_characteristics) { case 1: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; break; case 4: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA22; break; case 5: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA28; break; case 6: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; break; case 7: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M; break; case 8: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10; break; /* 0 forbidden */ /* 2 unspecified */ /* 3 reserved */ /* 9-255 reseved */ default: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN; break; } } GST_DEBUG_OBJECT (mpeg2dec, "sequence flags: %d, frame period: %d, frame rate: %d/%d", sequence->flags, sequence->frame_period, vinfo->fps_n, vinfo->fps_d); GST_DEBUG_OBJECT (mpeg2dec, "profile: %02x, colour_primaries: %d", sequence->profile_level_id, sequence->colour_primaries); GST_DEBUG_OBJECT (mpeg2dec, "transfer chars: %d, matrix coef: %d", sequence->transfer_characteristics, sequence->matrix_coefficients); GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: CONSTRAINED_PARAMETERS:%d, PROGRESSIVE_SEQUENCE:%d", sequence->flags & SEQ_FLAG_CONSTRAINED_PARAMETERS, sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE); GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: LOW_DELAY:%d, COLOUR_DESCRIPTION:%d", sequence->flags & SEQ_FLAG_LOW_DELAY, sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION); /* we store the codec size before cropping */ *dinfo = *vinfo; gst_video_info_set_format (dinfo, format, sequence->width, sequence->height); /* Mpeg2dec has 2 frame latency to produce a picture and 1 frame latency in * it's parser */ latency = gst_util_uint64_scale (3, vinfo->fps_d, vinfo->fps_n); gst_video_decoder_set_latency (GST_VIDEO_DECODER (mpeg2dec), latency, latency); if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (mpeg2dec))) goto negotiation_fail; gst_video_codec_state_unref (state); mpeg2_custom_fbuf (mpeg2dec->decoder, 1); init_dummybuf (mpeg2dec); /* Pump in some null buffers, because otherwise libmpeg2 doesn't * initialise the discard_fbuf->id */ mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL); mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL); mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL); gst_mpeg2dec_clear_buffers (mpeg2dec); return ret; invalid_frame_period: { GST_WARNING_OBJECT (mpeg2dec, "Frame period is 0!"); return GST_FLOW_ERROR; } invalid_size: { GST_ERROR_OBJECT (mpeg2dec, "Invalid frame dimensions: %d x %d", sequence->width, sequence->height); return GST_FLOW_ERROR; } negotiation_fail: { GST_WARNING_OBJECT (mpeg2dec, "Failed to negotiate with downstream"); return GST_FLOW_ERROR; } }
static GstFlowReturn daala_handle_type_packet (GstDaalaDec * dec) { gint par_num, par_den; GstFlowReturn ret = GST_FLOW_OK; GstVideoCodecState *state; GstVideoFormat fmt; GstVideoInfo *info; if (!dec->input_state) return GST_FLOW_NOT_NEGOTIATED; info = &dec->input_state->info; GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d", dec->info.timebase_numerator, dec->info.timebase_denominator, dec->info.pixel_aspect_numerator, dec->info.pixel_aspect_denominator); /* calculate par * the info.aspect_* values reflect PAR; * 0:x and x:0 are allowed and can be interpreted as 1:1. */ par_num = GST_VIDEO_INFO_PAR_N (info); par_den = GST_VIDEO_INFO_PAR_D (info); /* If we have a default PAR, see if the decoder specified a different one */ if (par_num == 1 && par_den == 1 && (dec->info.pixel_aspect_numerator != 0 && dec->info.pixel_aspect_denominator != 0)) { par_num = dec->info.pixel_aspect_numerator; par_den = dec->info.pixel_aspect_denominator; } /* daala has: * * width/height : dimension of the encoded frame * pic_width/pic_height : dimension of the visible part * pic_x/pic_y : offset in encoded frame where visible part starts */ GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width, dec->info.pic_height, par_num, par_den); if (dec->info.nplanes == 3 && dec->info.plane_info[0].xdec == 0 && dec->info.plane_info[0].ydec == 0 && dec->info.plane_info[1].xdec == 1 && dec->info.plane_info[1].ydec == 1 && dec->info.plane_info[2].xdec == 1 && dec->info.plane_info[2].ydec == 1) { fmt = GST_VIDEO_FORMAT_I420; } else if (dec->info.nplanes == 3 && dec->info.plane_info[0].xdec == 0 && dec->info.plane_info[0].ydec == 0 && dec->info.plane_info[1].xdec == 0 && dec->info.plane_info[1].ydec == 0 && dec->info.plane_info[2].xdec == 0 && dec->info.plane_info[2].ydec == 0) { fmt = GST_VIDEO_FORMAT_Y444; } else { goto unsupported_format; } GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width; GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height; /* done */ dec->decoder = daala_decode_alloc (&dec->info, dec->setup); /* Create the output state */ dec->output_state = state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt, info->width, info->height, dec->input_state); /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */ state->info.fps_n = dec->info.timebase_numerator; state->info.fps_d = dec->info.timebase_denominator; state->info.par_n = par_num; state->info.par_d = par_den; gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); dec->have_header = TRUE; return ret; /* ERRORS */ unsupported_format: { GST_ERROR_OBJECT (dec, "Invalid pixel format"); return GST_FLOW_ERROR; } }