static gboolean gst_base_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; GstStructure *structure; const GValue *codec_data; gboolean res = TRUE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG ("setcaps %" GST_PTR_FORMAT, caps); if (base_video_decoder->codec_data) { gst_buffer_unref (base_video_decoder->codec_data); base_video_decoder->codec_data = NULL; } structure = gst_caps_get_structure (caps, 0); codec_data = gst_structure_get_value (structure, "codec_data"); if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) { base_video_decoder->codec_data = gst_value_get_buffer (codec_data); } if (base_video_decoder_class->set_sink_caps) res = base_video_decoder_class->set_sink_caps (base_video_decoder, caps); g_object_unref (base_video_decoder); return res; }
static gboolean gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query) { GstBaseVideoDecoder *enc; gboolean res; enc = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); switch GST_QUERY_TYPE (query) { case GST_QUERY_CONVERT: { GstFormat src_fmt, dest_fmt; gint64 src_val, dest_val; gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); res = gst_base_video_decoder_src_convert (pad, src_fmt, src_val, &dest_fmt, &dest_val); if (!res) goto error; gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); break; } default: res = gst_pad_query_default (pad, query); } gst_object_unref (enc); return res; error: GST_DEBUG_OBJECT (enc, "query failed"); gst_object_unref (enc); return res; }
static void gst_base_video_decoder_finalize (GObject * object) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; g_return_if_fail (GST_IS_BASE_VIDEO_DECODER (object)); base_video_decoder = GST_BASE_VIDEO_DECODER (object); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (object); gst_base_video_decoder_reset (base_video_decoder); if (base_video_decoder->input_adapter) { g_object_unref (base_video_decoder->input_adapter); base_video_decoder->input_adapter = NULL; } if (base_video_decoder->output_adapter) { g_object_unref (base_video_decoder->output_adapter); base_video_decoder->output_adapter = NULL; } GST_DEBUG_OBJECT (object, "finalize"); G_OBJECT_CLASS (parent_class)->finalize (object); }
static GstStateChangeReturn gst_base_video_decoder_change_state (GstElement * element, GstStateChange transition) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; GstStateChangeReturn ret; base_video_decoder = GST_BASE_VIDEO_DECODER (element); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (element); switch (transition) { default: break; } ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition); switch (transition) { case GST_STATE_CHANGE_PAUSED_TO_READY: break; default: break; } return ret; }
static gboolean gst_base_video_decoder_sink_convert (GstPad * pad, GstFormat src_format, gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { gboolean res = TRUE; GstBaseVideoDecoder *enc; if (src_format == *dest_format) { *dest_value = src_value; return TRUE; } enc = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); /* FIXME: check if we are in a decoding state */ switch (src_format) { case GST_FORMAT_BYTES: switch (*dest_format) { #if 0 case GST_FORMAT_DEFAULT: *dest_value = gst_util_uint64_scale_int (src_value, 1, enc->bytes_per_picture); break; #endif case GST_FORMAT_TIME: /* seems like a rather silly conversion, implement me if you like */ default: res = FALSE; } break; case GST_FORMAT_DEFAULT: switch (*dest_format) { case GST_FORMAT_TIME: *dest_value = gst_util_uint64_scale (src_value, GST_SECOND * enc->fps_d, enc->fps_n); break; #if 0 case GST_FORMAT_BYTES: *dest_value = gst_util_uint64_scale_int (src_value, enc->bytes_per_picture, 1); break; #endif default: res = FALSE; } break; default: res = FALSE; break; } }
static gboolean gst_base_video_decoder_src_convert (GstPad * pad, GstFormat src_format, gint64 src_value, GstFormat * dest_format, gint64 * dest_value) { gboolean res = TRUE; GstBaseVideoDecoder *enc; if (src_format == *dest_format) { *dest_value = src_value; return TRUE; } enc = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); /* FIXME: check if we are in a encoding state */ GST_DEBUG ("src convert"); switch (src_format) { #if 0 case GST_FORMAT_DEFAULT: switch (*dest_format) { case GST_FORMAT_TIME: *dest_value = gst_util_uint64_scale (granulepos_to_frame (src_value), enc->fps_d * GST_SECOND, enc->fps_n); break; default: res = FALSE; } break; case GST_FORMAT_TIME: switch (*dest_format) { case GST_FORMAT_DEFAULT: { *dest_value = gst_util_uint64_scale (src_value, enc->fps_n, enc->fps_d * GST_SECOND); break; } default: res = FALSE; break; } break; #endif default: res = FALSE; break; } gst_object_unref (enc); return res; }
static gboolean gst_base_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; GstStructure *structure; const GValue *codec_data; GstVideoState *state; gboolean ret = TRUE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG ("setcaps %" GST_PTR_FORMAT, caps); state = &base_video_decoder->state; if (state->codec_data) { gst_buffer_unref (state->codec_data); } memset (state, 0, sizeof (GstVideoState)); structure = gst_caps_get_structure (caps, 0); gst_video_format_parse_caps (caps, NULL, &state->width, &state->height); gst_video_parse_caps_framerate (caps, &state->fps_n, &state->fps_d); gst_video_parse_caps_pixel_aspect_ratio (caps, &state->par_n, &state->par_d); #if 0 /* requires 0.10.23 */ state->have_interlaced = gst_video_format_parse_caps_interlaced (caps, &state->interlaced); #else state->have_interlaced = gst_structure_get_boolean (structure, "interlaced", &state->interlaced); #endif codec_data = gst_structure_get_value (structure, "codec_data"); if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER) { state->codec_data = gst_value_get_buffer (codec_data); } if (base_video_decoder_class->start) { ret = base_video_decoder_class->start (base_video_decoder); } g_object_unref (base_video_decoder); return ret; }
static gboolean gst_base_video_decoder_sink_activate_push (GstPad * pad, gboolean active) { gboolean result = TRUE; GstBaseVideoDecoder *base_video_decoder; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); result = gst_base_video_decoder_sink_activate (base_video_decoder, active); gst_object_unref (base_video_decoder); return result; }
static gboolean gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query) { GstBaseVideoDecoder *dec; gboolean res = TRUE; dec = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); switch GST_QUERY_TYPE (query) { case GST_QUERY_POSITION: { GstFormat format; gint64 time; gst_query_parse_position (query, &format, NULL); GST_DEBUG ("query in format %d", format); if (format != GST_FORMAT_TIME) { goto error; } time = dec->last_timestamp; time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, time); gst_query_set_position (query, format, time); res = TRUE; break; } case GST_QUERY_DURATION: /* FIXME: approximate using bitrate if upstream doesn't answear */ res = gst_pad_query (dec->sinkpad, query); break; default: res = gst_pad_query_default (pad, query); } gst_object_unref (dec); return res; error: GST_ERROR_OBJECT (dec, "query failed"); gst_object_unref (dec); return res; }
static void gst_base_video_decoder_set_property (GObject * object, guint property_id, const GValue * value, GParamSpec * pspec) { GstBaseVideoDecoder *base_video_decoder = GST_BASE_VIDEO_DECODER (object); switch (property_id) { case PROP_PACKETIZED: base_video_decoder->packetized = g_value_get_boolean (value); break; case PROP_SINK_CLIPPING: base_video_decoder->sink_clipping = g_value_get_boolean (value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); break; } }
static gboolean gst_base_video_decoder_sink_query (GstPad * pad, GstQuery * query) { GstBaseVideoDecoder *base_video_decoder; gboolean res = FALSE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); GST_DEBUG_OBJECT (base_video_decoder, "sink query fps=%d/%d", base_video_decoder->state.fps_n, base_video_decoder->state.fps_d); switch (GST_QUERY_TYPE (query)) { default: res = gst_pad_query_default (pad, query); break; } gst_object_unref (base_video_decoder); return res; }
static gboolean gst_base_video_decoder_sink_query (GstPad * pad, GstQuery * query) { GstBaseVideoDecoder *base_video_decoder; gboolean res = FALSE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); GST_DEBUG_OBJECT (base_video_decoder, "sink query fps=%d/%d", base_video_decoder->state.fps_n, base_video_decoder->state.fps_d); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CONVERT: { GstFormat src_fmt, dest_fmt; gint64 src_val, dest_val; gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); res = gst_base_video_rawvideo_convert (&base_video_decoder->state, src_fmt, src_val, &dest_fmt, &dest_val); if (!res) goto error; gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); break; } default: res = gst_pad_query_default (pad, query); break; } done: gst_object_unref (base_video_decoder); return res; error: GST_DEBUG_OBJECT (base_video_decoder, "query failed"); goto done; }
static GstFlowReturn gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; GstFlowReturn ret; GST_DEBUG ("chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); #if 0 /* requiring the pad to be negotiated makes it impossible to use * oggdemux or filesrc ! decoder */ if (!gst_pad_is_negotiated (pad)) { GST_DEBUG ("not negotiated"); return GST_FLOW_NOT_NEGOTIATED; } #endif base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG_OBJECT (base_video_decoder, "chain"); if (!base_video_decoder->have_segment) { GstEvent *event; GstFlowReturn ret; GST_WARNING ("Received buffer without a new-segment. Assuming timestamps start from 0."); gst_segment_set_newsegment_full (&base_video_decoder->segment, FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0); base_video_decoder->have_segment = TRUE; event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0); ret = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), event); if (!ret) { GST_ERROR ("new segment event ret=%d", ret); return GST_FLOW_ERROR; } } if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer"); gst_base_video_decoder_flush (base_video_decoder); } if (base_video_decoder->current_frame == NULL) { base_video_decoder->current_frame = gst_base_video_decoder_new_frame (base_video_decoder); } base_video_decoder->input_offset += GST_BUFFER_SIZE (buf); if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) { gst_base_video_decoder_add_timestamp (base_video_decoder, buf); } if (base_video_decoder->packetized) { base_video_decoder->current_frame->sink_buffer = buf; ret = gst_base_video_decoder_have_frame (base_video_decoder, TRUE, NULL); } else { gst_adapter_push (base_video_decoder->input_adapter, buf); ret = gst_base_video_decoder_drain (base_video_decoder, FALSE); } gst_object_unref (base_video_decoder); return ret; }
static GstFlowReturn gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *klass; GstBuffer *buffer; GstFlowReturn ret; GST_DEBUG ("chain %" G_GINT64_FORMAT, GST_BUFFER_TIMESTAMP (buf)); #if 0 /* requiring the pad to be negotiated makes it impossible to use * oggdemux or filesrc ! decoder */ if (!gst_pad_is_negotiated (pad)) { GST_DEBUG ("not negotiated"); return GST_FLOW_NOT_NEGOTIATED; } #endif base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); klass = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG_OBJECT (base_video_decoder, "chain"); if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer"); if (base_video_decoder->started) { gst_base_video_decoder_reset (base_video_decoder); } } if (!base_video_decoder->started) { klass->start (base_video_decoder); base_video_decoder->started = TRUE; } if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) { GST_DEBUG ("timestamp %" G_GINT64_FORMAT " offset %" G_GINT64_FORMAT, GST_BUFFER_TIMESTAMP (buf), base_video_decoder->offset); base_video_decoder->last_sink_timestamp = GST_BUFFER_TIMESTAMP (buf); } if (GST_BUFFER_OFFSET_END (buf) != -1) { GST_DEBUG ("gp %" G_GINT64_FORMAT, GST_BUFFER_OFFSET_END (buf)); base_video_decoder->last_sink_offset_end = GST_BUFFER_OFFSET_END (buf); } base_video_decoder->offset += GST_BUFFER_SIZE (buf); #if 0 if (base_video_decoder->timestamp_offset == GST_CLOCK_TIME_NONE && GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) { GST_DEBUG ("got new offset %lld", GST_BUFFER_TIMESTAMP (buf)); base_video_decoder->timestamp_offset = GST_BUFFER_TIMESTAMP (buf); } #endif if (base_video_decoder->current_frame == NULL) { base_video_decoder->current_frame = gst_base_video_decoder_new_frame (base_video_decoder); } gst_adapter_push (base_video_decoder->input_adapter, buf); if (!base_video_decoder->have_sync) { int n, m; GST_DEBUG ("no sync, scanning"); n = gst_adapter_available (base_video_decoder->input_adapter); m = klass->scan_for_sync (base_video_decoder, FALSE, 0, n); if (m < 0) { g_warning ("subclass returned negative scan %d", m); } if (m >= n) { g_warning ("subclass scanned past end %d >= %d", m, n); } gst_adapter_flush (base_video_decoder->input_adapter, m); if (m < n) { GST_DEBUG ("found possible sync after %d bytes (of %d)", m, n); /* this is only "maybe" sync */ base_video_decoder->have_sync = TRUE; } if (!base_video_decoder->have_sync) { gst_object_unref (base_video_decoder); return GST_FLOW_OK; } } /* FIXME: use gst_adapter_prev_timestamp() here instead? */ buffer = gst_adapter_get_buffer (base_video_decoder->input_adapter); base_video_decoder->buffer_timestamp = GST_BUFFER_TIMESTAMP (buffer); gst_buffer_unref (buffer); do { ret = klass->parse_data (base_video_decoder, FALSE); } while (ret == GST_FLOW_OK); if (ret == GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA) { gst_object_unref (base_video_decoder); return GST_FLOW_OK; } gst_object_unref (base_video_decoder); return ret; }
static GstFlowReturn gst_base_video_decoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *klass; GstFlowReturn ret; GST_DEBUG ("chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); #if 0 /* requiring the pad to be negotiated makes it impossible to use * oggdemux or filesrc ! decoder */ if (!gst_pad_is_negotiated (pad)) { GST_DEBUG ("not negotiated"); return GST_FLOW_NOT_NEGOTIATED; } #endif base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); klass = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG_OBJECT (base_video_decoder, "chain"); if (!base_video_decoder->have_segment) { GstEvent *event; GstFlowReturn ret; GST_WARNING ("Received buffer without a new-segment. Assuming timestamps start from 0."); gst_segment_set_newsegment_full (&base_video_decoder->segment, FALSE, 1.0, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0); base_video_decoder->have_segment = TRUE; event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0); ret = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), event); if (!ret) { GST_ERROR ("new segment event ret=%d", ret); return GST_FLOW_ERROR; } } if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { GST_DEBUG_OBJECT (base_video_decoder, "received DISCONT buffer"); gst_base_video_decoder_reset (base_video_decoder); } if (!base_video_decoder->started) { klass->start (base_video_decoder); base_video_decoder->started = TRUE; } if (base_video_decoder->current_frame == NULL) { base_video_decoder->current_frame = gst_base_video_decoder_new_frame (base_video_decoder); } if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) { gst_base_video_decoder_add_timestamp (base_video_decoder, buf); } base_video_decoder->input_offset += GST_BUFFER_SIZE (buf); #if 0 if (base_video_decoder->timestamp_offset == GST_CLOCK_TIME_NONE && GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) { GST_DEBUG ("got new offset %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); base_video_decoder->timestamp_offset = GST_BUFFER_TIMESTAMP (buf); } #endif if (base_video_decoder->packetized) { base_video_decoder->current_frame->sink_buffer = buf; ret = gst_base_video_decoder_have_frame_2 (base_video_decoder); } else { gst_adapter_push (base_video_decoder->input_adapter, buf); if (!base_video_decoder->have_sync) { int n, m; GST_DEBUG ("no sync, scanning"); n = gst_adapter_available (base_video_decoder->input_adapter); m = klass->scan_for_sync (base_video_decoder, FALSE, 0, n); if (m == -1) { gst_object_unref (base_video_decoder); return GST_FLOW_OK; } if (m < 0) { g_warning ("subclass returned negative scan %d", m); } if (m >= n) { GST_ERROR ("subclass scanned past end %d >= %d", m, n); } gst_adapter_flush (base_video_decoder->input_adapter, m); if (m < n) { GST_DEBUG ("found possible sync after %d bytes (of %d)", m, n); /* this is only "maybe" sync */ base_video_decoder->have_sync = TRUE; } if (!base_video_decoder->have_sync) { gst_object_unref (base_video_decoder); return GST_FLOW_OK; } } do { ret = klass->parse_data (base_video_decoder, FALSE); } while (ret == GST_FLOW_OK); if (ret == GST_BASE_VIDEO_DECODER_FLOW_NEED_DATA) { gst_object_unref (base_video_decoder); return GST_FLOW_OK; } } gst_object_unref (base_video_decoder); return ret; }
static gboolean gst_vdp_mpeg4_dec_handle_configuration (GstVdpMpeg4Dec * mpeg4_dec, GstMpeg4Frame * mpeg4_frame) { Mpeg4VisualObjectSequence vos; Mpeg4VisualObject vo; Mpeg4VideoObjectLayer vol; GstVideoState state; guint8 profile_indication; VdpDecoderProfile profile; GstFlowReturn ret; if (mpeg4_dec->is_configured) return GST_FLOW_OK; if (!mpeg4_frame->vos_buf || !mpeg4_frame->vo_buf || !mpeg4_frame->vol_buf) goto skip_frame; if (!mpeg4_util_parse_VOS (mpeg4_frame->vos_buf, &vos)) goto skip_frame; if (!mpeg4_util_parse_VO (mpeg4_frame->vo_buf, &vo)) goto skip_frame; if (!mpeg4_util_parse_VOL (mpeg4_frame->vol_buf, &vo, &vol)) goto skip_frame; state = gst_base_video_decoder_get_state (GST_BASE_VIDEO_DECODER (mpeg4_dec)); state.width = vol.width; state.height = vol.height; if (vol.fixed_vop_rate) { state.fps_n = vol.vop_time_increment_resolution; state.fps_d = vol.fixed_vop_time_increment; } state.par_n = vol.par_n; state.par_d = vol.par_d; gst_base_video_decoder_set_state (GST_BASE_VIDEO_DECODER (mpeg4_dec), state); profile_indication = vos.profile_and_level_indication >> 4; switch (profile_indication) { case 0x0: profile = VDP_DECODER_PROFILE_MPEG4_PART2_SP; break; case 0xf: profile = VDP_DECODER_PROFILE_MPEG4_PART2_ASP; break; default: goto unsupported_profile; } ret = gst_vdp_decoder_init_decoder (GST_VDP_DECODER (mpeg4_dec), profile, 2); if (ret != GST_FLOW_OK) return ret; mpeg4_dec->vol = vol; mpeg4_dec->is_configured = TRUE; return GST_FLOW_OK; skip_frame: GST_WARNING ("Skipping frame since we're not configured yet"); gst_base_video_decoder_skip_frame (GST_BASE_VIDEO_DECODER (mpeg4_dec), GST_VIDEO_FRAME (mpeg4_frame)); return GST_FLOW_CUSTOM_ERROR; unsupported_profile: GST_ELEMENT_ERROR (mpeg4_dec, STREAM, WRONG_TYPE, ("vdpaumpeg4dec doesn't support this streams profile"), ("profile_and_level_indication: %d", vos.profile_and_level_indication)); return GST_FLOW_ERROR; }
static GstFlowReturn gst_vdp_mpeg_dec_handle_sequence (GstVdpMpegDec * mpeg_dec, GstBuffer * seq, GstBuffer * seq_ext) { GstBaseVideoDecoder *base_video_decoder = GST_BASE_VIDEO_DECODER (mpeg_dec); MPEGSeqHdr hdr; GstVdpMpegStreamInfo stream_info; if (!mpeg_util_parse_sequence_hdr (&hdr, seq)) return GST_FLOW_CUSTOM_ERROR; memcpy (&mpeg_dec->vdp_info.intra_quantizer_matrix, &hdr.intra_quantizer_matrix, 64); memcpy (&mpeg_dec->vdp_info.non_intra_quantizer_matrix, &hdr.non_intra_quantizer_matrix, 64); stream_info.width = hdr.width; stream_info.height = hdr.height; stream_info.fps_n = hdr.fps_n; stream_info.fps_d = hdr.fps_d; stream_info.par_n = hdr.par_w; stream_info.par_d = hdr.par_h; stream_info.interlaced = FALSE; stream_info.version = 1; stream_info.profile = VDP_DECODER_PROFILE_MPEG1; if (seq_ext) { MPEGSeqExtHdr ext; if (!mpeg_util_parse_sequence_extension (&ext, seq_ext)) return GST_FLOW_CUSTOM_ERROR; stream_info.fps_n *= (ext.fps_n_ext + 1); stream_info.fps_d *= (ext.fps_d_ext + 1); stream_info.width += (ext.horiz_size_ext << 12); stream_info.height += (ext.vert_size_ext << 12); stream_info.interlaced = !ext.progressive; stream_info.version = 2; stream_info.profile = gst_vdp_mpeg_dec_get_profile (&ext); } if (memcmp (&mpeg_dec->stream_info, &stream_info, sizeof (GstVdpMpegStreamInfo)) != 0) { GstVideoState state; GstFlowReturn ret; state = gst_base_video_decoder_get_state (base_video_decoder); state.width = stream_info.width; state.height = stream_info.height; state.fps_n = stream_info.fps_n; state.fps_d = stream_info.fps_d; state.par_n = stream_info.par_n; state.par_d = stream_info.par_d; state.interlaced = stream_info.interlaced; gst_base_video_decoder_set_state (base_video_decoder, state); ret = gst_vdp_decoder_init_decoder (GST_VDP_DECODER (mpeg_dec), stream_info.profile, 2); if (ret != GST_FLOW_OK) return ret; memcpy (&mpeg_dec->stream_info, &stream_info, sizeof (GstVdpMpegStreamInfo)); } mpeg_dec->state = GST_VDP_MPEG_DEC_STATE_NEED_DATA; return GST_FLOW_OK; }
static gboolean gst_base_video_decoder_src_event (GstPad * pad, GstEvent * event) { GstBaseVideoDecoder *base_video_decoder; gboolean res = FALSE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: { GstFormat format, tformat; gdouble rate; GstEvent *real_seek; GstSeekFlags flags; GstSeekType cur_type, stop_type; gint64 cur, stop; gint64 tcur, tstop; gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur, &stop_type, &stop); gst_event_unref (event); tformat = GST_FORMAT_TIME; res = gst_base_video_decoder_src_convert (pad, format, cur, &tformat, &tcur); if (!res) goto convert_error; res = gst_base_video_decoder_src_convert (pad, format, stop, &tformat, &tstop); if (!res) goto convert_error; real_seek = gst_event_new_seek (rate, GST_FORMAT_TIME, flags, cur_type, tcur, stop_type, tstop); res = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_decoder), real_seek); break; } case GST_EVENT_QOS: { gdouble proportion; GstClockTimeDiff diff; GstClockTime timestamp; gst_event_parse_qos (event, &proportion, &diff, ×tamp); GST_OBJECT_LOCK (base_video_decoder); base_video_decoder->proportion = proportion; base_video_decoder->earliest_time = timestamp + diff; GST_OBJECT_UNLOCK (base_video_decoder); GST_DEBUG_OBJECT (base_video_decoder, "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT ", %g", GST_TIME_ARGS (timestamp), diff, proportion); res = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_decoder), event); break; } default: res = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SINK_PAD (base_video_decoder), event); break; } done: gst_object_unref (base_video_decoder); return res; convert_error: GST_DEBUG_OBJECT (base_video_decoder, "could not convert format"); goto done; }
static gboolean gst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; gboolean ret = FALSE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_EOS: { GstVideoFrame *frame; frame = g_malloc0 (sizeof (GstVideoFrame)); frame->presentation_frame_number = base_video_decoder->presentation_frame_number; frame->presentation_duration = 0; base_video_decoder->presentation_frame_number++; base_video_decoder->frames = g_list_append (base_video_decoder->frames, frame); if (base_video_decoder_class->finish) { base_video_decoder_class->finish (base_video_decoder, frame); } ret = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), event); } break; case GST_EVENT_NEWSEGMENT: { gboolean update; double rate; double applied_rate; GstFormat format; gint64 start; gint64 stop; gint64 position; gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate, &format, &start, &stop, &position); if (format != GST_FORMAT_TIME) goto newseg_wrong_format; gst_segment_set_newsegment_full (&base_video_decoder->state.segment, update, rate, applied_rate, format, start, stop, position); GST_DEBUG ("new segment %" GST_SEGMENT_FORMAT, &base_video_decoder->state.segment); ret = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), event); } break; default: /* FIXME this changes the order of events */ ret = gst_pad_push_event (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), event); break; } done: gst_object_unref (base_video_decoder); return ret; newseg_wrong_format: { GST_DEBUG_OBJECT (base_video_decoder, "received non TIME newsegment"); gst_event_unref (event); goto done; } }
static gboolean gst_base_video_decoder_src_event (GstPad * pad, GstEvent * event) { GstBaseVideoDecoder *base_video_decoder; gboolean res = FALSE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEEK: /* FIXME: do seek using bitrate incase upstream doesn't handle it */ res = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SINK_PAD (base_video_decoder), event); break; case GST_EVENT_QOS: { gdouble proportion; GstClockTimeDiff diff; GstClockTime timestamp; GstClockTime duration; gst_event_parse_qos (event, &proportion, &diff, ×tamp); GST_OBJECT_LOCK (base_video_decoder); base_video_decoder->proportion = proportion; if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) { if (G_UNLIKELY (diff > 0)) { if (base_video_decoder->state.fps_n > 0) duration = gst_util_uint64_scale (GST_SECOND, base_video_decoder->state.fps_d, base_video_decoder->state.fps_n); else duration = 0; base_video_decoder->earliest_time = timestamp + 2 * diff + duration; } else { base_video_decoder->earliest_time = timestamp + diff; } } else { base_video_decoder->earliest_time = GST_CLOCK_TIME_NONE; } GST_OBJECT_UNLOCK (base_video_decoder); GST_DEBUG_OBJECT (base_video_decoder, "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT ", %g", GST_TIME_ARGS (timestamp), diff, proportion); res = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SINK_PAD (base_video_decoder), event); break; } default: res = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SINK_PAD (base_video_decoder), event); break; } gst_object_unref (base_video_decoder); return res; }
static GstFlowReturn gst_vdp_h264_dec_idr (GstVdpH264Dec * h264_dec, GstH264Frame * h264_frame) { GstH264Slice *slice; GstH264Sequence *seq; h264_dec->poc_msb = 0; h264_dec->prev_poc_lsb = 0; slice = &h264_frame->slice_hdr; if (slice->dec_ref_pic_marking.no_output_of_prior_pics_flag) gst_h264_dpb_flush (h264_dec->dpb, FALSE); else gst_h264_dpb_flush (h264_dec->dpb, TRUE); if (slice->dec_ref_pic_marking.long_term_reference_flag) g_object_set (h264_dec->dpb, "max-longterm-frame-idx", 0, NULL); else g_object_set (h264_dec->dpb, "max-longterm-frame-idx", -1, NULL); seq = slice->picture->sequence; if (seq != h264_dec->sequence) { GstVideoState state; VdpDecoderProfile profile; GstFlowReturn ret; state = gst_base_video_decoder_get_state (GST_BASE_VIDEO_DECODER (h264_dec)); state.width = (seq->pic_width_in_mbs_minus1 + 1) * 16 - 2 * seq->frame_crop_right_offset; state.height = (2 - seq->frame_mbs_only_flag) * (seq->pic_height_in_map_units_minus1 + 1) * 16; if (seq->frame_mbs_only_flag) state.height -= 2 * seq->frame_crop_bottom_offset; else state.height -= 4 * seq->frame_crop_bottom_offset; /* calculate framerate if we haven't got one */ if (state.fps_n == 0 && seq->vui_parameters_present_flag) { GstH264VUIParameters *vui; guint16 par_n, par_d; vui = &seq->vui_parameters; if (gst_vdp_h264_dec_calculate_par (vui, &par_n, &par_d)) { state.par_n = par_n; state.par_d = par_d; } if (vui->timing_info_present_flag && vui->fixed_frame_rate_flag) { state.fps_n = vui->time_scale; state.fps_d = vui->num_units_in_tick; if (seq->frame_mbs_only_flag) state.fps_d *= 2; } } gst_base_video_decoder_set_state (GST_BASE_VIDEO_DECODER (h264_dec), state); switch (seq->profile_idc) { case 66: profile = VDP_DECODER_PROFILE_H264_BASELINE; break; case 77: profile = VDP_DECODER_PROFILE_H264_MAIN; break; case 100: profile = VDP_DECODER_PROFILE_H264_HIGH; break; default: GST_ELEMENT_ERROR (h264_dec, STREAM, WRONG_TYPE, ("vdpauh264dec doesn't support this streams profile"), ("profile_idc: %d", seq->profile_idc)); return GST_FLOW_ERROR; } ret = gst_vdp_decoder_init_decoder (GST_VDP_DECODER (h264_dec), profile, seq->num_ref_frames); if (ret != GST_FLOW_OK) return ret; g_object_set (h264_dec->dpb, "num-ref-frames", seq->num_ref_frames, NULL); h264_dec->sequence = seq; } return GST_FLOW_OK; }
static gboolean gst_base_video_decoder_src_query (GstPad * pad, GstQuery * query) { GstBaseVideoDecoder *dec; gboolean res = TRUE; dec = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); switch GST_QUERY_TYPE (query) { case GST_QUERY_POSITION: { GstFormat format; gint64 time; gst_query_parse_position (query, &format, NULL); GST_DEBUG ("query in format %d", format); if (format != GST_FORMAT_TIME) { goto error; } time = dec->last_timestamp; time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, time); gst_query_set_position (query, format, time); res = TRUE; break; } case GST_QUERY_DURATION: { res = gst_pad_peer_query (dec->sinkpad, query); break; } case GST_QUERY_CONVERT: { GstFormat src_fmt, dest_fmt; gint64 src_val, dest_val; GST_DEBUG ("convert query"); gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val); res = gst_base_video_decoder_src_convert (pad, src_fmt, src_val, &dest_fmt, &dest_val); if (!res) goto error; gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val); break; } default: res = gst_pad_query_default (pad, query); } gst_object_unref (dec); return res; error: GST_ERROR_OBJECT (dec, "query failed"); gst_object_unref (dec); return res; }
static gboolean gst_base_video_decoder_sink_event (GstPad * pad, GstEvent * event) { GstBaseVideoDecoder *base_video_decoder; GstBaseVideoDecoderClass *base_video_decoder_class; gboolean ret = FALSE; base_video_decoder = GST_BASE_VIDEO_DECODER (gst_pad_get_parent (pad)); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_EOS: { if (!base_video_decoder->packetized) gst_base_video_decoder_drain (base_video_decoder, TRUE); ret = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), event); } break; case GST_EVENT_NEWSEGMENT: { gboolean update; double rate; double applied_rate; GstFormat format; gint64 start; gint64 stop; gint64 position; GstSegment *segment = &base_video_decoder->segment; gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate, &format, &start, &stop, &position); if (format != GST_FORMAT_TIME) goto newseg_wrong_format; if (!update) { gst_base_video_decoder_flush (base_video_decoder); } base_video_decoder->timestamp_offset = start; gst_segment_set_newsegment_full (segment, update, rate, applied_rate, format, start, stop, position); base_video_decoder->have_segment = TRUE; GST_WARNING ("new segment: format %d rate %g start %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT " position %" GST_TIME_FORMAT " update %d", format, rate, GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time), update); ret = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), event); } break; case GST_EVENT_FLUSH_STOP: gst_base_video_decoder_flush (base_video_decoder); gst_segment_init (&base_video_decoder->segment, GST_FORMAT_TIME); ret = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), event); break; default: /* FIXME this changes the order of events */ ret = gst_pad_push_event (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), event); break; } done: gst_object_unref (base_video_decoder); return ret; newseg_wrong_format: { GST_DEBUG_OBJECT (base_video_decoder, "received non TIME newsegment"); gst_event_unref (event); goto done; } }