static void gst_rsvg_dec_class_init (GstRsvgDecClass * klass) { GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS (klass); GObjectClass *gobject_class = (GObjectClass *) klass; GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GST_DEBUG_CATEGORY_INIT (rsvgdec_debug, "rsvgdec", 0, "RSVG decoder"); gst_element_class_set_static_metadata (element_class, "SVG image decoder", "Codec/Decoder/Image", "Uses librsvg to decode SVG images", "Sebastian Dröge <*****@*****.**>"); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&sink_factory)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&src_factory)); gobject_class->finalize = gst_rsvg_dec_finalize; video_decoder_class->stop = GST_DEBUG_FUNCPTR (gst_rsvg_dec_stop); video_decoder_class->set_format = GST_DEBUG_FUNCPTR (gst_rsvg_dec_set_format); video_decoder_class->parse = GST_DEBUG_FUNCPTR (gst_rsvg_dec_parse); video_decoder_class->handle_frame = GST_DEBUG_FUNCPTR (gst_rsvg_dec_handle_frame); }
static void gst_vdp_h264_dec_class_init (GstVdpH264DecClass * klass) { GstElementClass *element_class; GstVideoDecoderClass *video_decoder_class; element_class = GST_ELEMENT_CLASS (klass); video_decoder_class = GST_VIDEO_DECODER_CLASS (klass); gst_element_class_set_static_metadata (element_class, "VDPAU H264 Decoder", "Decoder", "Decode h264 stream with vdpau", "Carl-Anton Ingmarsson <*****@*****.**>"); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&sink_template)); video_decoder_class->start = gst_vdp_h264_dec_start; video_decoder_class->stop = gst_vdp_h264_dec_stop; video_decoder_class->flush = gst_vdp_h264_dec_flush; video_decoder_class->set_format = gst_vdp_h264_dec_set_format; video_decoder_class->handle_frame = gst_vdp_h264_dec_handle_frame; }
static void gst_mpeg2dec_class_init (GstMpeg2decClass * klass) { GObjectClass *gobject_class = G_OBJECT_CLASS (klass); GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS (klass); gobject_class->finalize = gst_mpeg2dec_finalize; gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&src_template_factory)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&sink_template_factory)); gst_element_class_set_static_metadata (element_class, "mpeg1 and mpeg2 video decoder", "Codec/Decoder/Video", "Uses libmpeg2 to decode MPEG video streams", "Wim Taymans <*****@*****.**>"); video_decoder_class->open = GST_DEBUG_FUNCPTR (gst_mpeg2dec_open); video_decoder_class->close = GST_DEBUG_FUNCPTR (gst_mpeg2dec_close); video_decoder_class->start = GST_DEBUG_FUNCPTR (gst_mpeg2dec_start); video_decoder_class->stop = GST_DEBUG_FUNCPTR (gst_mpeg2dec_stop); video_decoder_class->flush = GST_DEBUG_FUNCPTR (gst_mpeg2dec_flush); video_decoder_class->set_format = GST_DEBUG_FUNCPTR (gst_mpeg2dec_set_format); video_decoder_class->handle_frame = GST_DEBUG_FUNCPTR (gst_mpeg2dec_handle_frame); video_decoder_class->finish = GST_DEBUG_FUNCPTR (gst_mpeg2dec_finish); video_decoder_class->decide_allocation = GST_DEBUG_FUNCPTR (gst_mpeg2dec_decide_allocation); GST_DEBUG_CATEGORY_INIT (mpeg2dec_debug, "mpeg2dec", 0, "MPEG-2 Video Decoder"); }
static gboolean gst_v4l2_video_dec_sink_event (GstVideoDecoder * decoder, GstEvent * event) { GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); gboolean ret; switch (GST_EVENT_TYPE (event)) { case GST_EVENT_FLUSH_START: GST_DEBUG_OBJECT (self, "flush start"); gst_v4l2_object_unlock (self->v4l2output); gst_v4l2_object_unlock (self->v4l2capture); break; default: break; } ret = GST_VIDEO_DECODER_CLASS (parent_class)->sink_event (decoder, event); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_FLUSH_START: /* The processing thread should stop now, wait for it */ gst_pad_stop_task (decoder->srcpad); GST_DEBUG_OBJECT (self, "flush start done"); break; default: break; } return ret; }
static gboolean gst_mfc_dec_negotiate (GstVideoDecoder * decoder) { GstMFCDec *self = GST_MFC_DEC (decoder); GstVideoCodecState *state; GstCaps *allowed_caps; GstVideoFormat format = GST_VIDEO_FORMAT_NV12; allowed_caps = gst_pad_get_allowed_caps (GST_VIDEO_DECODER_SRC_PAD (self)); allowed_caps = gst_caps_truncate (allowed_caps); allowed_caps = gst_caps_fixate (allowed_caps); if (!gst_caps_is_empty (allowed_caps)) { const gchar *format_str; GstStructure *s = gst_caps_get_structure (allowed_caps, 0); format_str = gst_structure_get_string (s, "format"); if (format_str) format = gst_video_format_from_string (format_str); } gst_caps_unref (allowed_caps); self->format = format; state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self), self->format, self->crop_width, self->crop_height, self->input_state); gst_video_codec_state_unref (state); return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder); }
static void gst_vtdec_class_init (GstVtdecClass * klass) { GObjectClass *gobject_class = G_OBJECT_CLASS (klass); GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS (klass); /* Setting up pads and setting metadata should be moved to base_class_init if you intend to subclass this class. */ gst_element_class_add_static_pad_template (element_class, &gst_vtdec_sink_template); gst_element_class_add_pad_template (element_class, gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, gst_caps_from_string (VIDEO_SRC_CAPS))); gst_element_class_set_static_metadata (element_class, "Apple VideoToolbox decoder", "Codec/Decoder/Video", "Apple VideoToolbox Decoder", "Ole André Vadla Ravnås <*****@*****.**>; " "Alessandro Decina <*****@*****.**>"); gobject_class->finalize = gst_vtdec_finalize; element_class->set_context = gst_vtdec_set_context; video_decoder_class->start = GST_DEBUG_FUNCPTR (gst_vtdec_start); video_decoder_class->stop = GST_DEBUG_FUNCPTR (gst_vtdec_stop); video_decoder_class->negotiate = GST_DEBUG_FUNCPTR (gst_vtdec_negotiate); video_decoder_class->set_format = GST_DEBUG_FUNCPTR (gst_vtdec_set_format); video_decoder_class->flush = GST_DEBUG_FUNCPTR (gst_vtdec_flush); video_decoder_class->finish = GST_DEBUG_FUNCPTR (gst_vtdec_finish); video_decoder_class->handle_frame = GST_DEBUG_FUNCPTR (gst_vtdec_handle_frame); }
static void gst_video_decoder_tester_class_init (GstVideoDecoderTesterClass * klass) { GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *audiosink_class = GST_VIDEO_DECODER_CLASS (klass); static GstStaticPadTemplate sink_templ = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS ("video/x-test-custom")); static GstStaticPadTemplate src_templ = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS ("video/x-raw")); gst_element_class_add_static_pad_template (element_class, &sink_templ); gst_element_class_add_static_pad_template (element_class, &src_templ); gst_element_class_set_metadata (element_class, "VideoDecoderTester", "Decoder/Video", "yep", "me"); audiosink_class->start = gst_video_decoder_tester_start; audiosink_class->stop = gst_video_decoder_tester_stop; audiosink_class->flush = gst_video_decoder_tester_flush; audiosink_class->handle_frame = gst_video_decoder_tester_handle_frame; audiosink_class->set_format = gst_video_decoder_tester_set_format; }
static void gst_libde265_dec_class_init (GstLibde265DecClass * klass) { GObjectClass *gobject_class = (GObjectClass *) klass; GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (klass); GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass); gobject_class->finalize = gst_libde265_dec_finalize; gobject_class->set_property = gst_libde265_dec_set_property; gobject_class->get_property = gst_libde265_dec_get_property; g_object_class_install_property (gobject_class, PROP_MAX_THREADS, g_param_spec_int ("max-threads", "Maximum decode threads", "Maximum number of worker threads to spawn. (0 = auto)", 0, G_MAXINT, DEFAULT_MAX_THREADS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); decoder_class->start = GST_DEBUG_FUNCPTR (gst_libde265_dec_start); decoder_class->stop = GST_DEBUG_FUNCPTR (gst_libde265_dec_stop); decoder_class->set_format = GST_DEBUG_FUNCPTR (gst_libde265_dec_set_format); decoder_class->flush = GST_DEBUG_FUNCPTR (gst_libde265_dec_flush); decoder_class->finish = GST_DEBUG_FUNCPTR (gst_libde265_dec_finish); decoder_class->handle_frame = GST_DEBUG_FUNCPTR (gst_libde265_dec_handle_frame); gst_element_class_add_static_pad_template (gstelement_class, &sink_template); gst_element_class_add_static_pad_template (gstelement_class, &src_template); gst_element_class_set_static_metadata (gstelement_class, "HEVC/H.265 decoder", "Codec/Decoder/Video", "Decodes HEVC/H.265 video streams using libde265", "struktur AG <*****@*****.**>"); }
static void gst_amc_video_dec_class_init (GstAmcVideoDecClass * klass) { GObjectClass *gobject_class = G_OBJECT_CLASS (klass); GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *videodec_class = GST_VIDEO_DECODER_CLASS (klass); parent_class = g_type_class_peek_parent (klass); gobject_class->finalize = gst_amc_video_dec_finalize; element_class->change_state = GST_DEBUG_FUNCPTR (gst_amc_video_dec_change_state); videodec_class->start = GST_DEBUG_FUNCPTR (gst_amc_video_dec_start); videodec_class->stop = GST_DEBUG_FUNCPTR (gst_amc_video_dec_stop); videodec_class->open = GST_DEBUG_FUNCPTR (gst_amc_video_dec_open); videodec_class->close = GST_DEBUG_FUNCPTR (gst_amc_video_dec_close); videodec_class->flush = GST_DEBUG_FUNCPTR (gst_amc_video_dec_flush); videodec_class->set_format = GST_DEBUG_FUNCPTR (gst_amc_video_dec_set_format); videodec_class->handle_frame = GST_DEBUG_FUNCPTR (gst_amc_video_dec_handle_frame); videodec_class->finish = GST_DEBUG_FUNCPTR (gst_amc_video_dec_finish); videodec_class->decide_allocation = GST_DEBUG_FUNCPTR (gst_amc_video_dec_decide_allocation); }
static gboolean gst_pngdec_decide_allocation (GstVideoDecoder * bdec, GstQuery * query) { GstBufferPool *pool = NULL; GstStructure *config; if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (bdec, query)) return FALSE; if (gst_query_get_n_allocation_pools (query) > 0) gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL); if (pool == NULL) return FALSE; config = gst_buffer_pool_get_config (pool); if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) { gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META); } gst_buffer_pool_set_config (pool, config); gst_object_unref (pool); return TRUE; }
static void gst_openh264dec_class_init(GstOpenh264DecClass *klass) { GObjectClass *gobject_class = G_OBJECT_CLASS(klass); GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS(klass); g_type_class_add_private(klass, sizeof(GstOpenh264DecPrivate)); /* Setting up pads and setting metadata should be moved to base_class_init if you intend to subclass this class. */ gst_element_class_add_pad_template(GST_ELEMENT_CLASS(klass), gst_static_pad_template_get(&gst_openh264dec_sink_template)); gst_element_class_add_pad_template(GST_ELEMENT_CLASS(klass), gst_static_pad_template_get(&gst_openh264dec_src_template)); gst_element_class_set_static_metadata(GST_ELEMENT_CLASS(klass), "OpenH264 video decoder", "Decoder/Video", "OpenH264 video decoder", "Ericsson AB, http://www.ericsson.com"); gobject_class->set_property = gst_openh264dec_set_property; gobject_class->get_property = gst_openh264dec_get_property; video_decoder_class->start = GST_DEBUG_FUNCPTR(gst_openh264dec_start); video_decoder_class->stop = GST_DEBUG_FUNCPTR(gst_openh264dec_stop); video_decoder_class->set_format = GST_DEBUG_FUNCPTR(gst_openh264dec_set_format); video_decoder_class->reset = GST_DEBUG_FUNCPTR(gst_openh264dec_reset); video_decoder_class->finish = GST_DEBUG_FUNCPTR(gst_openh264dec_finish); video_decoder_class->handle_frame = GST_DEBUG_FUNCPTR(gst_openh264dec_handle_frame); video_decoder_class->decide_allocation = GST_DEBUG_FUNCPTR(gst_openh264dec_decide_allocation); }
static gboolean gst_openh264dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query) { GstVideoCodecState *state; GstBufferPool *pool; guint size, min, max; GstStructure *config; if (!GST_VIDEO_DECODER_CLASS (gst_openh264dec_parent_class)->decide_allocation (decoder, query)) return FALSE; state = gst_video_decoder_get_output_state (decoder); gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max); config = gst_buffer_pool_get_config (pool); if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) { gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META); } gst_buffer_pool_set_config (pool, config); gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max); gst_object_unref (pool); gst_video_codec_state_unref (state); return TRUE; }
static void gst_daala_dec_class_init (GstDaalaDecClass * klass) { GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS (klass); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&daala_dec_src_factory)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&daala_dec_sink_factory)); gst_element_class_set_static_metadata (element_class, "Daala video decoder", "Codec/Decoder/Video", "Decode raw Daala streams to raw YUV video", "Sebastian Dröge <*****@*****.**>"); video_decoder_class->start = GST_DEBUG_FUNCPTR (daala_dec_start); video_decoder_class->stop = GST_DEBUG_FUNCPTR (daala_dec_stop); video_decoder_class->set_format = GST_DEBUG_FUNCPTR (daala_dec_set_format); video_decoder_class->parse = GST_DEBUG_FUNCPTR (daala_dec_parse); video_decoder_class->handle_frame = GST_DEBUG_FUNCPTR (daala_dec_handle_frame); video_decoder_class->decide_allocation = GST_DEBUG_FUNCPTR (daala_dec_decide_allocation); GST_DEBUG_CATEGORY_INIT (daaladec_debug, "daaladec", 0, "Daala decoder"); }
static gboolean gst_vdp_h264_dec_stop (GstVideoDecoder * video_decoder) { GstVdpH264Dec *h264_dec = GST_VDP_H264_DEC (video_decoder); g_object_unref (h264_dec->dpb); return GST_VIDEO_DECODER_CLASS (parent_class)->stop (video_decoder); }
static gboolean gst_v4l2_video_dec_negotiate (GstVideoDecoder * decoder) { GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); /* We don't allow renegotiation without carefull disabling the pool */ if (self->v4l2capture->pool && gst_buffer_pool_is_active (GST_BUFFER_POOL (self->v4l2capture->pool))) return TRUE; return GST_VIDEO_DECODER_CLASS (parent_class)->negotiate (decoder); }
static gboolean theora_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query) { GstTheoraDec *dec = GST_THEORA_DEC (decoder); GstVideoCodecState *state; GstBufferPool *pool; guint size, min, max; GstStructure *config; if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder, query)) return FALSE; state = gst_video_decoder_get_output_state (decoder); gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max); dec->can_crop = FALSE; config = gst_buffer_pool_get_config (pool); if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) { gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META); dec->can_crop = gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE, NULL); } if (dec->can_crop) { GstVideoInfo info = state->info; GstCaps *caps; /* Calculate uncropped size */ gst_video_info_set_format (&info, info.finfo->format, dec->info.frame_width, dec->info.frame_height); size = MAX (size, info.size); caps = gst_video_info_to_caps (&info); gst_buffer_pool_config_set_params (config, caps, size, min, max); gst_caps_unref (caps); } gst_buffer_pool_set_config (pool, config); gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max); gst_object_unref (pool); gst_video_codec_state_unref (state); return TRUE; }
static void gst_vaapidecode_class_init (GstVaapiDecodeClass * klass) { GObjectClass *const object_class = G_OBJECT_CLASS (klass); GstElementClass *const element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *const vdec_class = GST_VIDEO_DECODER_CLASS (klass); GstPadTemplate *pad_template; GST_DEBUG_CATEGORY_INIT (gst_debug_vaapidecode, GST_PLUGIN_NAME, 0, GST_PLUGIN_DESC); gst_vaapi_plugin_base_class_init (GST_VAAPI_PLUGIN_BASE_CLASS (klass)); object_class->finalize = gst_vaapidecode_finalize; vdec_class->open = GST_DEBUG_FUNCPTR (gst_vaapidecode_open); vdec_class->close = GST_DEBUG_FUNCPTR (gst_vaapidecode_close); vdec_class->set_format = GST_DEBUG_FUNCPTR (gst_vaapidecode_set_format); vdec_class->flush = GST_DEBUG_FUNCPTR (gst_vaapidecode_flush); vdec_class->parse = GST_DEBUG_FUNCPTR (gst_vaapidecode_parse); vdec_class->handle_frame = GST_DEBUG_FUNCPTR (gst_vaapidecode_handle_frame); vdec_class->finish = GST_DEBUG_FUNCPTR (gst_vaapidecode_finish); #if GST_CHECK_VERSION(1,5,0) vdec_class->drain = GST_DEBUG_FUNCPTR (gst_vaapidecode_drain); #endif vdec_class->decide_allocation = GST_DEBUG_FUNCPTR (gst_vaapidecode_decide_allocation); #if GST_CHECK_VERSION(1,4,0) vdec_class->src_query = GST_DEBUG_FUNCPTR (gst_vaapidecode_src_query); vdec_class->sink_query = GST_DEBUG_FUNCPTR (gst_vaapidecode_sink_query); #endif gst_element_class_set_static_metadata (element_class, "VA-API decoder", "Codec/Decoder/Video", GST_PLUGIN_DESC, "Gwenole Beauchesne <*****@*****.**>, " "Halley Zhao <*****@*****.**>, " "Sreerenj Balachandran <*****@*****.**>, " "Wind Yuan <*****@*****.**>"); /* sink pad */ pad_template = gst_static_pad_template_get (&gst_vaapidecode_sink_factory); gst_element_class_add_pad_template (element_class, pad_template); /* src pad */ pad_template = gst_static_pad_template_get (&gst_vaapidecode_src_factory); gst_element_class_add_pad_template (element_class, pad_template); }
static gboolean gst_vdp_h264_dec_start (GstVideoDecoder * video_decoder) { GstVdpH264Dec *h264_dec = GST_VDP_H264_DEC (video_decoder); h264_dec->got_idr = FALSE; h264_dec->current_sps = -1; h264_dec->got_idr = FALSE; h264_dec->dpb = g_object_new (GST_TYPE_H264_DPB, NULL); gst_h264_dpb_set_output_func (h264_dec->dpb, gst_vdp_h264_dec_output, h264_dec); return GST_VIDEO_DECODER_CLASS (parent_class)->start (video_decoder); }
static gboolean gst_mfc_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query) { GstMFCDec *self = GST_MFC_DEC (decoder); GstBufferPool *pool; GstStructure *config; guint size, min, max; if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder, query)) return FALSE; g_assert (gst_query_get_n_allocation_pools (query) > 0); gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max); g_assert (pool != NULL); self->has_cropping = FALSE; config = gst_buffer_pool_get_config (pool); if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) { gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META); self->has_cropping = gst_query_find_allocation_meta (query, GST_VIDEO_CROP_META_API_TYPE, NULL); } if (self->has_cropping) { GstVideoInfo info; GstCaps *caps; /* Calculate uncropped size */ gst_buffer_pool_config_get_params (config, &caps, &size, &min, &max); gst_video_info_from_caps (&info, caps); gst_video_info_set_format (&info, self->format, self->width, self->height); size = MAX (size, info.size); caps = gst_video_info_to_caps (&info); gst_buffer_pool_config_set_params (config, caps, size, min, max); gst_caps_unref (caps); } gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max); gst_buffer_pool_set_config (pool, config); gst_object_unref (pool); return TRUE; }
static gboolean gst_v4l2_video_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query) { GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); GstClockTime latency; gboolean ret = FALSE; if (gst_v4l2_object_decide_allocation (self->v4l2capture, query)) ret = GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder, query); latency = self->v4l2capture->min_buffers * self->v4l2capture->duration; gst_video_decoder_set_latency (decoder, latency, latency); return ret; }
static gboolean gst_vaapidecode_src_query (GstVideoDecoder * vdec, GstQuery * query) { gboolean ret = TRUE; GstVaapiDecode *const decode = GST_VAAPIDECODE (vdec); GstVaapiPluginBase *const plugin = GST_VAAPI_PLUGIN_BASE (decode); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CAPS:{ GstCaps *caps, *filter = NULL; GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (vdec); gst_query_parse_caps (query, &filter); caps = gst_pad_get_pad_template_caps (pad); if (filter) { GstCaps *tmp = caps; caps = gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (tmp); } gst_query_set_caps_result (query, caps); gst_caps_unref (caps); break; } case GST_QUERY_CONTEXT:{ ret = gst_vaapi_handle_context_query (query, plugin->display); break; } default:{ #if GST_CHECK_VERSION(1,4,0) ret = GST_VIDEO_DECODER_CLASS (gst_vaapidecode_parent_class)->src_query (vdec, query); #else GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (vdec); GstObject *parent = gst_pad_get_parent (pad); ret = plugin->srcpad_query (pad, parent, query); if (parent) gst_object_unref (parent); #endif break; } } return ret; }
static gboolean gst_pngdec_sink_event (GstVideoDecoder * bdec, GstEvent * event) { const GstSegment *segment; if (GST_EVENT_TYPE (event) != GST_EVENT_SEGMENT) goto done; gst_event_parse_segment (event, &segment); if (segment->format == GST_FORMAT_TIME) gst_video_decoder_set_packetized (bdec, TRUE); else gst_video_decoder_set_packetized (bdec, FALSE); done: return GST_VIDEO_DECODER_CLASS (parent_class)->sink_event (bdec, event); }
static gboolean gst_vaapidecode_sink_query (GstVideoDecoder * vdec, GstQuery * query) { gboolean ret = TRUE; GstElement *const element = GST_ELEMENT (vdec); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CONTEXT:{ ret = gst_vaapi_handle_context_query (element, query); break; } default:{ ret = GST_VIDEO_DECODER_CLASS (parent_class)->sink_query (vdec, query); break; } } return ret; }
static gboolean gst_vaapidecode_sink_event (GstVideoDecoder * vdec, GstEvent * event) { GstVaapiDecode *const decode = GST_VAAPIDECODE (vdec); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEGMENT: { /* Keep segment event to refer to rate so that * vaapidecode can handle reverse playback */ gst_event_copy_segment (event, &decode->in_segment); break; } default: break; } return GST_VIDEO_DECODER_CLASS (parent_class)->sink_event (vdec, event); }
static void gst_msdkdec_class_init (GstMsdkDecClass * klass) { GObjectClass *gobject_class; GstElementClass *element_class; GstVideoDecoderClass *decoder_class; gobject_class = G_OBJECT_CLASS (klass); element_class = GST_ELEMENT_CLASS (klass); decoder_class = GST_VIDEO_DECODER_CLASS (klass); gobject_class->set_property = gst_msdkdec_set_property; gobject_class->get_property = gst_msdkdec_get_property; gobject_class->finalize = gst_msdkdec_finalize; element_class->set_context = gst_msdkdec_set_context; decoder_class->close = GST_DEBUG_FUNCPTR (gst_msdkdec_close); decoder_class->start = GST_DEBUG_FUNCPTR (gst_msdkdec_start); decoder_class->stop = GST_DEBUG_FUNCPTR (gst_msdkdec_stop); decoder_class->set_format = GST_DEBUG_FUNCPTR (gst_msdkdec_set_format); decoder_class->finish = GST_DEBUG_FUNCPTR (gst_msdkdec_finish); decoder_class->handle_frame = GST_DEBUG_FUNCPTR (gst_msdkdec_handle_frame); decoder_class->decide_allocation = GST_DEBUG_FUNCPTR (gst_msdkdec_decide_allocation); decoder_class->flush = GST_DEBUG_FUNCPTR (gst_msdkdec_flush); decoder_class->drain = GST_DEBUG_FUNCPTR (gst_msdkdec_drain); g_object_class_install_property (gobject_class, GST_MSDKDEC_PROP_HARDWARE, g_param_spec_boolean ("hardware", "Hardware", "Enable hardware decoders", PROP_HARDWARE_DEFAULT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); g_object_class_install_property (gobject_class, GST_MSDKDEC_PROP_ASYNC_DEPTH, g_param_spec_uint ("async-depth", "Async Depth", "Depth of asynchronous pipeline", 1, 20, PROP_ASYNC_DEPTH_DEFAULT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); gst_element_class_add_static_pad_template (element_class, &src_factory); }
static gboolean gst_v4l2_video_dec_src_query (GstVideoDecoder * decoder, GstQuery * query) { gboolean ret = TRUE; GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CAPS:{ GstCaps *filter, *result = NULL; GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (decoder); gst_query_parse_caps (query, &filter); if (self->probed_srccaps) result = gst_caps_ref (self->probed_srccaps); else result = gst_pad_get_pad_template_caps (pad); if (filter) { GstCaps *tmp = result; result = gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (tmp); } GST_DEBUG_OBJECT (self, "Returning src caps %" GST_PTR_FORMAT, result); gst_query_set_caps_result (query, result); gst_caps_unref (result); break; } default: ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_query (decoder, query); break; } return ret; }
static gboolean gst_v4l2_video_dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query) { GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); GstClockTime latency; gboolean ret = FALSE; if (gst_v4l2_object_decide_allocation (self->v4l2capture, query)) ret = GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder, query); if (GST_CLOCK_TIME_IS_VALID (self->v4l2capture->duration)) { latency = self->v4l2capture->min_buffers * self->v4l2capture->duration; GST_DEBUG_OBJECT (self, "Setting latency: %" GST_TIME_FORMAT " (%" G_GUINT32_FORMAT " * %" G_GUINT64_FORMAT, GST_TIME_ARGS (latency), self->v4l2capture->min_buffers, self->v4l2capture->duration); gst_video_decoder_set_latency (decoder, latency, latency); } else { GST_WARNING_OBJECT (self, "Duration invalid, not setting latency"); } return ret; }
static gboolean gst_vaapidecode_src_query (GstVideoDecoder * vdec, GstQuery * query) { gboolean ret = TRUE; GstElement *const element = GST_ELEMENT (vdec); switch (GST_QUERY_TYPE (query)) { case GST_QUERY_CAPS:{ GstCaps *caps, *filter = NULL; GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (vdec); gst_query_parse_caps (query, &filter); caps = gst_pad_get_pad_template_caps (pad); if (filter) { GstCaps *tmp = caps; caps = gst_caps_intersect_full (filter, tmp, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (tmp); } gst_query_set_caps_result (query, caps); gst_caps_unref (caps); break; } case GST_QUERY_CONTEXT:{ ret = gst_vaapi_handle_context_query (element, query); break; } default:{ ret = GST_VIDEO_DECODER_CLASS (parent_class)->src_query (vdec, query); break; } } return ret; }
static void gst_theora_dec_class_init (GstTheoraDecClass * klass) { GObjectClass *gobject_class = G_OBJECT_CLASS (klass); GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoDecoderClass *video_decoder_class = GST_VIDEO_DECODER_CLASS (klass); gobject_class->set_property = theora_dec_set_property; gobject_class->get_property = theora_dec_get_property; if (gst_theora_dec_ctl_is_supported (TH_DECCTL_SET_TELEMETRY_MV)) { g_object_class_install_property (gobject_class, PROP_TELEMETRY_MV, g_param_spec_int ("visualize-motion-vectors", "Visualize motion vectors", "Show motion vector selection overlaid on image. " "Value gives a mask for motion vector (MV) modes to show", 0, 0xffff, THEORA_DEF_TELEMETRY_MV, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); } if (gst_theora_dec_ctl_is_supported (TH_DECCTL_SET_TELEMETRY_MBMODE)) { g_object_class_install_property (gobject_class, PROP_TELEMETRY_MBMODE, g_param_spec_int ("visualize-macroblock-modes", "Visualize macroblock modes", "Show macroblock mode selection overlaid on image. " "Value gives a mask for macroblock (MB) modes to show", 0, 0xffff, THEORA_DEF_TELEMETRY_MBMODE, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); } if (gst_theora_dec_ctl_is_supported (TH_DECCTL_SET_TELEMETRY_QI)) { g_object_class_install_property (gobject_class, PROP_TELEMETRY_QI, g_param_spec_int ("visualize-quantization-modes", "Visualize adaptive quantization modes", "Show adaptive quantization mode selection overlaid on image. " "Value gives a mask for quantization (QI) modes to show", 0, 0xffff, THEORA_DEF_TELEMETRY_QI, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); } if (gst_theora_dec_ctl_is_supported (TH_DECCTL_SET_TELEMETRY_BITS)) { /* FIXME: make this a boolean instead? The value scales the bars so * they're less wide. Default is to use full width, and anything else * doesn't seem particularly useful, since the smaller bars just disappear * then (they almost disappear for a value of 2 already). */ g_object_class_install_property (gobject_class, PROP_TELEMETRY_BITS, g_param_spec_int ("visualize-bit-usage", "Visualize bitstream usage breakdown", "Sets the bitstream breakdown visualization mode. " "Values influence the width of the bit usage bars to show", 0, 0xff, THEORA_DEF_TELEMETRY_BITS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); } gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&theora_dec_src_factory)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&theora_dec_sink_factory)); gst_element_class_set_static_metadata (element_class, "Theora video decoder", "Codec/Decoder/Video", "decode raw theora streams to raw YUV video", "Benjamin Otte <*****@*****.**>, Wim Taymans <*****@*****.**>"); video_decoder_class->start = GST_DEBUG_FUNCPTR (theora_dec_start); video_decoder_class->stop = GST_DEBUG_FUNCPTR (theora_dec_stop); video_decoder_class->reset = GST_DEBUG_FUNCPTR (theora_dec_reset); video_decoder_class->set_format = GST_DEBUG_FUNCPTR (theora_dec_set_format); video_decoder_class->parse = GST_DEBUG_FUNCPTR (theora_dec_parse); video_decoder_class->handle_frame = GST_DEBUG_FUNCPTR (theora_dec_handle_frame); video_decoder_class->decide_allocation = GST_DEBUG_FUNCPTR (theora_dec_decide_allocation); GST_DEBUG_CATEGORY_INIT (theoradec_debug, "theoradec", 0, "Theora decoder"); }
static gboolean gst_vtdec_negotiate (GstVideoDecoder * decoder) { GstVideoCodecState *output_state = NULL; GstCaps *peercaps = NULL, *caps = NULL, *templcaps = NULL, *prevcaps = NULL; GstVideoFormat format; GstStructure *structure; const gchar *s; GstVtdec *vtdec; OSStatus err = noErr; GstCapsFeatures *features = NULL; gboolean output_textures; vtdec = GST_VTDEC (decoder); if (vtdec->session) gst_vtdec_push_frames_if_needed (vtdec, TRUE, FALSE); output_state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (vtdec)); if (output_state) { prevcaps = gst_caps_ref (output_state->caps); gst_video_codec_state_unref (output_state); } peercaps = gst_pad_peer_query_caps (GST_VIDEO_DECODER_SRC_PAD (vtdec), NULL); if (prevcaps && gst_caps_can_intersect (prevcaps, peercaps)) { /* The hardware decoder can become (temporarily) unavailable across * VTDecompressionSessionCreate/Destroy calls. So if the currently configured * caps are still accepted by downstream we keep them so we don't have to * destroy and recreate the session. */ GST_INFO_OBJECT (vtdec, "current and peer caps are compatible, keeping current caps"); caps = gst_caps_ref (prevcaps); } else { templcaps = gst_pad_get_pad_template_caps (GST_VIDEO_DECODER_SRC_PAD (decoder)); caps = gst_caps_intersect_full (peercaps, templcaps, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (templcaps); } gst_caps_unref (peercaps); caps = gst_caps_truncate (gst_caps_make_writable (caps)); structure = gst_caps_get_structure (caps, 0); s = gst_structure_get_string (structure, "format"); format = gst_video_format_from_string (s); features = gst_caps_get_features (caps, 0); if (features) features = gst_caps_features_copy (features); output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (vtdec), format, vtdec->video_info.width, vtdec->video_info.height, vtdec->input_state); output_state->caps = gst_video_info_to_caps (&output_state->info); if (features) { gst_caps_set_features (output_state->caps, 0, features); output_textures = gst_caps_features_contains (features, GST_CAPS_FEATURE_MEMORY_GL_MEMORY); if (output_textures) gst_caps_set_simple (output_state->caps, "texture-target", G_TYPE_STRING, #if !HAVE_IOS GST_GL_TEXTURE_TARGET_RECTANGLE_STR, #else GST_GL_TEXTURE_TARGET_2D_STR, #endif NULL); } gst_caps_unref (caps); if (!prevcaps || !gst_caps_is_equal (prevcaps, output_state->caps)) { gboolean renegotiating = vtdec->session != NULL; GST_INFO_OBJECT (vtdec, "negotiated output format %" GST_PTR_FORMAT " previous %" GST_PTR_FORMAT, output_state->caps, prevcaps); if (vtdec->session) gst_vtdec_invalidate_session (vtdec); err = gst_vtdec_create_session (vtdec, format, TRUE); if (err == noErr) { GST_INFO_OBJECT (vtdec, "using hardware decoder"); } else if (err == kVTVideoDecoderNotAvailableNowErr && renegotiating) { GST_WARNING_OBJECT (vtdec, "hw decoder not available anymore"); err = gst_vtdec_create_session (vtdec, format, FALSE); } if (err != noErr) { GST_ELEMENT_ERROR (vtdec, RESOURCE, FAILED, (NULL), ("VTDecompressionSessionCreate returned %d", (int) err)); } } if (vtdec->texture_cache != NULL && !output_textures) { gst_video_texture_cache_free (vtdec->texture_cache); vtdec->texture_cache = NULL; } if (err == noErr && output_textures) { /* call this regardless of whether caps have changed or not since a new * local context could have become available */ gst_gl_context_helper_ensure_context (vtdec->ctxh); GST_INFO_OBJECT (vtdec, "pushing textures, context %p old context %p", vtdec->ctxh->context, vtdec->texture_cache ? vtdec->texture_cache->ctx : NULL); if (vtdec->texture_cache && vtdec->texture_cache->ctx != vtdec->ctxh->context) { gst_video_texture_cache_free (vtdec->texture_cache); vtdec->texture_cache = NULL; } if (!vtdec->texture_cache) setup_texture_cache (vtdec, vtdec->ctxh->context); } if (prevcaps) gst_caps_unref (prevcaps); if (err != noErr) return FALSE; return GST_VIDEO_DECODER_CLASS (gst_vtdec_parent_class)->negotiate (decoder); }