static GstFlowReturn _gst_libde265_image_available(VIDEO_DECODER_BASE * parse, const struct de265_image *image) { GstLibde265Dec *dec = GST_LIBDE265_DEC (parse); int width = de265_get_image_width(image, 0); int height = de265_get_image_height(image, 0); if (G_UNLIKELY(width != dec->width || height != dec->height)) { #if GST_CHECK_VERSION(1,0,0) GstVideoCodecState *state = gst_video_decoder_set_output_state (parse, GST_VIDEO_FORMAT_I420, width, height, dec->input_state); g_assert (state != NULL); if (dec->fps_n > 0) { state->info.fps_n = dec->fps_n; state->info.fps_d = dec->fps_d; } gst_video_decoder_negotiate(parse); #else GstVideoState *state = gst_base_video_decoder_get_state (parse); g_assert (state != NULL); state->format = GST_VIDEO_FORMAT_I420; state->width = width; state->height = height; if (dec->fps_n > 0) { state->fps_n = dec->fps_n; state->fps_d = dec->fps_d; } gst_base_video_decoder_set_src_caps (parse); #endif GST_DEBUG ("Frame dimensions are %d x %d\n", width, height); dec->width = width; dec->height = height; } return HAVE_FRAME (parse); }
static GstFlowReturn _gst_libde265_image_available (GstVideoDecoder * decoder, int width, int height) { GstLibde265Dec *dec = GST_LIBDE265_DEC (decoder); if (G_UNLIKELY (dec->output_state == NULL || width != dec->output_state->info.width || height != dec->output_state->info.height)) { GstVideoCodecState *state = gst_video_decoder_set_output_state (decoder, GST_VIDEO_FORMAT_I420, width, height, dec->input_state); if (state == NULL) { GST_ERROR_OBJECT (dec, "Failed to set output state"); return GST_FLOW_ERROR; } if (!gst_video_decoder_negotiate (decoder)) { GST_ERROR_OBJECT (dec, "Failed to negotiate format"); gst_video_codec_state_unref (state); return GST_FLOW_ERROR; } if (dec->output_state != NULL) { gst_video_codec_state_unref (dec->output_state); } dec->output_state = state; GST_DEBUG_OBJECT (dec, "Frame dimensions are %d x %d", width, height); } return GST_FLOW_OK; }
static void gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc) { GstVideoCodecState *outstate; GstVideoInfo *info; GstVideoFormat format; switch (clrspc) { case JCS_RGB: format = GST_VIDEO_FORMAT_RGB; break; case JCS_GRAYSCALE: format = GST_VIDEO_FORMAT_GRAY8; break; default: format = GST_VIDEO_FORMAT_I420; break; } /* Compare to currently configured output state */ outstate = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec)); if (outstate) { info = &outstate->info; if (width == GST_VIDEO_INFO_WIDTH (info) && height == GST_VIDEO_INFO_HEIGHT (info) && format == GST_VIDEO_INFO_FORMAT (info)) { gst_video_codec_state_unref (outstate); return; } gst_video_codec_state_unref (outstate); } outstate = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format, width, height, dec->input_state); switch (clrspc) { case JCS_RGB: case JCS_GRAYSCALE: break; default: outstate->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255; outstate->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; outstate->info.colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN; outstate->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } gst_video_codec_state_unref (outstate); gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor); GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor); }
static gboolean gst_vtdec_set_format (GstVideoDecoder * decoder, GstVideoCodecState * state) { GstStructure *structure; CMVideoCodecType cm_format = 0; CMFormatDescriptionRef format_description = NULL; const char *caps_name; GstVtdec *vtdec = GST_VTDEC (decoder); GST_DEBUG_OBJECT (vtdec, "set_format"); structure = gst_caps_get_structure (state->caps, 0); caps_name = gst_structure_get_name (structure); if (!strcmp (caps_name, "video/x-h264")) { cm_format = kCMVideoCodecType_H264; } else if (!strcmp (caps_name, "video/mpeg")) { cm_format = kCMVideoCodecType_MPEG2Video; } else if (!strcmp (caps_name, "image/jpeg")) { cm_format = kCMVideoCodecType_JPEG; } if (cm_format == kCMVideoCodecType_H264 && state->codec_data == NULL) { GST_INFO_OBJECT (vtdec, "no codec data, wait for one"); return TRUE; } gst_video_info_from_caps (&vtdec->video_info, state->caps); if (!gst_vtdec_compute_reorder_queue_length (vtdec, cm_format, state->codec_data)) return FALSE; gst_vtdec_set_latency (vtdec); if (state->codec_data) { format_description = create_format_description_from_codec_data (vtdec, cm_format, state->codec_data); } else { format_description = create_format_description (vtdec, cm_format); } if (vtdec->format_description) CFRelease (vtdec->format_description); vtdec->format_description = format_description; if (vtdec->input_state) gst_video_codec_state_unref (vtdec->input_state); vtdec->input_state = gst_video_codec_state_ref (state); return gst_video_decoder_negotiate (decoder); }
static GstFlowReturn gst_vtdec_push_frames_if_needed (GstVtdec * vtdec, gboolean drain, gboolean flush) { GstVideoCodecFrame *frame; GstFlowReturn ret = GST_FLOW_OK; GstVideoDecoder *decoder = GST_VIDEO_DECODER (vtdec); /* negotiate now so that we know whether we need to use the GL upload meta or * not */ if (gst_pad_check_reconfigure (decoder->srcpad)) { if (!gst_video_decoder_negotiate (decoder)) { gst_pad_mark_reconfigure (decoder->srcpad); if (GST_PAD_IS_FLUSHING (decoder->srcpad)) ret = GST_FLOW_FLUSHING; else ret = GST_FLOW_NOT_NEGOTIATED; return ret; } } if (drain) VTDecompressionSessionWaitForAsynchronousFrames (vtdec->session); /* push a buffer if there are enough frames to guarantee that we push in PTS * order */ while ((g_async_queue_length (vtdec->reorder_queue) >= vtdec->reorder_queue_length) || drain || flush) { frame = (GstVideoCodecFrame *) g_async_queue_try_pop (vtdec->reorder_queue); /* we need to check this in case reorder_queue_length=0 (jpeg for * example) or we're draining/flushing */ if (frame) { if (flush || frame->flags & VTDEC_FRAME_FLAG_SKIP) gst_video_decoder_release_frame (decoder, frame); else if (frame->flags & VTDEC_FRAME_FLAG_DROP) gst_video_decoder_drop_frame (decoder, frame); else ret = gst_video_decoder_finish_frame (decoder, frame); } if (!frame || ret != GST_FLOW_OK) break; } return ret; }
static GstFlowReturn gst_vtdec_push_frames_if_needed (GstVtdec * vtdec, gboolean drain, gboolean flush) { GstVideoCodecFrame *frame; GstFlowReturn ret = GST_FLOW_OK; GstVideoDecoder *decoder = GST_VIDEO_DECODER (vtdec); /* FIXME: Instead of this, implement GstVideoDecoder::negotiate() and * just call gst_video_decoder_negotiate() */ /* negotiate now so that we know whether we need to use the GL upload meta or * not */ if (gst_pad_check_reconfigure (decoder->srcpad)) gst_video_decoder_negotiate (decoder); if (drain) VTDecompressionSessionWaitForAsynchronousFrames (vtdec->session); /* push a buffer if there are enough frames to guarantee that we push in PTS * order */ while ((g_async_queue_length (vtdec->reorder_queue) >= vtdec->reorder_queue_length) || drain || flush) { frame = (GstVideoCodecFrame *) g_async_queue_try_pop (vtdec->reorder_queue); if (frame && vtdec->texture_cache != NULL) { frame->output_buffer = gst_core_video_texture_cache_get_gl_buffer (vtdec->texture_cache, frame->output_buffer); if (!frame->output_buffer) GST_ERROR_OBJECT (vtdec, "couldn't get textures from buffer"); } /* we need to check this in case reorder_queue_length=0 (jpeg for * example) or we're draining/flushing */ if (frame) { if (flush) gst_video_decoder_drop_frame (decoder, frame); else ret = gst_video_decoder_finish_frame (decoder, frame); } if (!frame || ret != GST_FLOW_OK) break; } return ret; }
static gboolean gst_vaapidecode_negotiate (GstVaapiDecode * decode) { GstVideoDecoder *const vdec = GST_VIDEO_DECODER (decode); GstVaapiPluginBase *const plugin = GST_VAAPI_PLUGIN_BASE (vdec); GST_DEBUG_OBJECT (decode, "input codec state changed: renegotiating"); GST_VIDEO_DECODER_STREAM_LOCK (vdec); if (!gst_vaapi_plugin_base_set_caps (plugin, decode->sinkpad_caps, NULL)) return FALSE; if (!gst_vaapidecode_update_src_caps (decode)) return FALSE; if (!gst_vaapi_plugin_base_set_caps (plugin, NULL, decode->srcpad_caps)) return FALSE; GST_VIDEO_DECODER_STREAM_UNLOCK (vdec); if (!gst_video_decoder_negotiate (vdec)) return FALSE; return TRUE; }
static gboolean gst_aml_vdec_set_format(GstVideoDecoder *dec, GstVideoCodecState *state) { gboolean ret = FALSE; GstStructure *structure; const char *name; GstVideoInfo *info; gint par_num, par_den; GstVideoFormat fmt; GstAmlVdec *amlvdec = GST_AMLVDEC(dec); g_return_val_if_fail(state != NULL, FALSE); if (amlvdec->input_state) gst_video_codec_state_unref(amlvdec->input_state); amlvdec->input_state = gst_video_codec_state_ref(state); structure = gst_caps_get_structure(state->caps, 0); name = gst_structure_get_name(structure); GST_INFO_OBJECT(amlvdec, "%s = %s", __FUNCTION__, name); if (name) { ret = gst_set_vstream_info(amlvdec, state->caps); if (!amlvdec->output_state) { info = &amlvdec->input_state->info; fmt = GST_VIDEO_FORMAT_xRGB; GST_VIDEO_INFO_WIDTH (info) = amlvdec->pcodec->am_sysinfo.width; GST_VIDEO_INFO_HEIGHT (info) = amlvdec->pcodec->am_sysinfo.height; par_num = GST_VIDEO_INFO_PAR_N(info); par_den = GST_VIDEO_INFO_PAR_D(info); amlvdec->output_state = gst_video_decoder_set_output_state(GST_VIDEO_DECODER(amlvdec), fmt, info->width, info->height, amlvdec->input_state); gst_video_decoder_negotiate (GST_VIDEO_DECODER (amlvdec)); } } return ret; }
static GstFlowReturn gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder, GstVideoCodecFrame * frame) { GstV4l2Error error = GST_V4L2_ERROR_INIT; GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); GstFlowReturn ret = GST_FLOW_OK; gboolean processed = FALSE; GstBuffer *tmp; GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number); if (G_UNLIKELY (!g_atomic_int_get (&self->active))) goto flushing; if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) { if (!self->input_state) goto not_negotiated; if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps, &error)) goto not_negotiated; } if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) { GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool); GstVideoInfo info; GstVideoCodecState *output_state; GstBuffer *codec_data; GstCaps *acquired_caps, *available_caps, *caps, *filter; GstStructure *st; GST_DEBUG_OBJECT (self, "Sending header"); codec_data = self->input_state->codec_data; /* We are running in byte-stream mode, so we don't know the headers, but * we need to send something, otherwise the decoder will refuse to * intialize. */ if (codec_data) { gst_buffer_ref (codec_data); } else { codec_data = gst_buffer_ref (frame->input_buffer); processed = TRUE; } /* Ensure input internal pool is active */ if (!gst_buffer_pool_is_active (pool)) { GstStructure *config = gst_buffer_pool_get_config (pool); gst_buffer_pool_config_set_params (config, self->input_state->caps, self->v4l2output->info.size, 2, 2); /* There is no reason to refuse this config */ if (!gst_buffer_pool_set_config (pool, config)) goto activate_failed; if (!gst_buffer_pool_set_active (pool, TRUE)) goto activate_failed; } GST_VIDEO_DECODER_STREAM_UNLOCK (decoder); ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self-> v4l2output->pool), &codec_data); GST_VIDEO_DECODER_STREAM_LOCK (decoder); gst_buffer_unref (codec_data); /* For decoders G_FMT returns coded size, G_SELECTION returns visible size * in the compose rectangle. gst_v4l2_object_acquire_format() checks both * and returns the visible size as with/height and the coded size as * padding. */ if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info)) goto not_negotiated; /* Create caps from the acquired format, remove the format field */ acquired_caps = gst_video_info_to_caps (&info); st = gst_caps_get_structure (acquired_caps, 0); gst_structure_remove_field (st, "format"); /* Probe currently available pixel formats */ available_caps = gst_v4l2_object_probe_caps (self->v4l2capture, NULL); available_caps = gst_caps_make_writable (available_caps); /* Replace coded size with visible size, we want to negotiate visible size * with downstream, not coded size. */ gst_caps_map_in_place (available_caps, gst_v4l2_video_remove_padding, self); filter = gst_caps_intersect_full (available_caps, acquired_caps, GST_CAPS_INTERSECT_FIRST); gst_caps_unref (acquired_caps); gst_caps_unref (available_caps); caps = gst_pad_peer_query_caps (decoder->srcpad, filter); gst_caps_unref (filter); GST_DEBUG_OBJECT (self, "Possible decoded caps: %" GST_PTR_FORMAT, caps); if (gst_caps_is_empty (caps)) { gst_caps_unref (caps); goto not_negotiated; } /* Fixate pixel format */ caps = gst_caps_fixate (caps); GST_DEBUG_OBJECT (self, "Chosen decoded caps: %" GST_PTR_FORMAT, caps); /* Try to set negotiated format, on success replace acquired format */ if (gst_v4l2_object_set_format (self->v4l2capture, caps, &error)) gst_video_info_from_caps (&info, caps); else gst_v4l2_clear_error (&error); gst_caps_unref (caps); output_state = gst_video_decoder_set_output_state (decoder, info.finfo->format, info.width, info.height, self->input_state); /* Copy the rest of the information, there might be more in the future */ output_state->info.interlace_mode = info.interlace_mode; gst_video_codec_state_unref (output_state); if (!gst_video_decoder_negotiate (decoder)) { if (GST_PAD_IS_FLUSHING (decoder->srcpad)) goto flushing; else goto not_negotiated; } /* Ensure our internal pool is activated */ if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool), TRUE)) goto activate_failed; } if (g_atomic_int_get (&self->processing) == FALSE) { /* It's possible that the processing thread stopped due to an error */ if (self->output_flow != GST_FLOW_OK && self->output_flow != GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving"); ret = self->output_flow; goto drop; } GST_DEBUG_OBJECT (self, "Starting decoding thread"); /* Start the processing task, when it quits, the task will disable input * processing to unlock input if draining, or prevent potential block */ g_atomic_int_set (&self->processing, TRUE); if (!gst_pad_start_task (decoder->srcpad, (GstTaskFunction) gst_v4l2_video_dec_loop, self, (GDestroyNotify) gst_v4l2_video_dec_loop_stopped)) goto start_task_failed; } if (!processed) { GST_VIDEO_DECODER_STREAM_UNLOCK (decoder); ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output-> pool), &frame->input_buffer); GST_VIDEO_DECODER_STREAM_LOCK (decoder); if (ret == GST_FLOW_FLUSHING) { if (g_atomic_int_get (&self->processing) == FALSE) ret = self->output_flow; goto drop; } else if (ret != GST_FLOW_OK) { goto process_failed; } } /* No need to keep input arround */ tmp = frame->input_buffer; frame->input_buffer = gst_buffer_new (); gst_buffer_copy_into (frame->input_buffer, tmp, GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS | GST_BUFFER_COPY_META, 0, 0); gst_buffer_unref (tmp); gst_video_codec_frame_unref (frame); return ret; /* ERRORS */ not_negotiated: { GST_ERROR_OBJECT (self, "not negotiated"); ret = GST_FLOW_NOT_NEGOTIATED; gst_v4l2_error (self, &error); goto drop; } activate_failed: { GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS, (_("Failed to allocate required memory.")), ("Buffer pool activation failed")); ret = GST_FLOW_ERROR; goto drop; } flushing: { ret = GST_FLOW_FLUSHING; goto drop; } start_task_failed: { GST_ELEMENT_ERROR (self, RESOURCE, FAILED, (_("Failed to start decoding thread.")), (NULL)); g_atomic_int_set (&self->processing, FALSE); ret = GST_FLOW_ERROR; goto drop; } process_failed: { GST_ELEMENT_ERROR (self, RESOURCE, FAILED, (_("Failed to process frame.")), ("Maybe be due to not enough memory or failing driver")); ret = GST_FLOW_ERROR; goto drop; } drop: { gst_video_decoder_drop_frame (decoder, frame); return ret; } }
static GstFlowReturn gst_vaapidecode_handle_frame (GstVideoDecoder * vdec, GstVideoCodecFrame * frame) { GstVaapiDecode *const decode = GST_VAAPIDECODE (vdec); GstVaapiDecoderStatus status; GstFlowReturn ret; if (!decode->input_state) goto not_negotiated; if (G_UNLIKELY (!decode->active) || gst_pad_needs_reconfigure (GST_VIDEO_DECODER_SRC_PAD (vdec))) { GST_DEBUG_OBJECT (decode, "activating the decoder"); if (!gst_vaapidecode_update_src_caps (decode)) goto not_negotiated; if (!gst_video_decoder_negotiate (vdec)) goto not_negotiated; GstVaapiPluginBase *const plugin = GST_VAAPI_PLUGIN_BASE (vdec); if (!gst_vaapi_plugin_base_set_caps (plugin, NULL, decode->srcpad_caps)) goto not_negotiated; decode->active = TRUE; } /* Decode current frame */ for (;;) { status = gst_vaapi_decoder_decode (decode->decoder, frame); if (status == GST_VAAPI_DECODER_STATUS_ERROR_NO_SURFACE) { /* Make sure that there are no decoded frames waiting in the output queue. */ ret = gst_vaapidecode_push_all_decoded_frames (decode); if (ret != GST_FLOW_OK) goto error_push_all_decoded_frames; g_mutex_lock (&decode->surface_ready_mutex); if (gst_vaapi_decoder_check_status (decode->decoder) == GST_VAAPI_DECODER_STATUS_ERROR_NO_SURFACE) g_cond_wait (&decode->surface_ready, &decode->surface_ready_mutex); g_mutex_unlock (&decode->surface_ready_mutex); continue; } if (status != GST_VAAPI_DECODER_STATUS_SUCCESS) goto error_decode; break; } /* Note that gst_vaapi_decoder_decode cannot return success without completing the decode and pushing all decoded frames into the output queue */ return gst_vaapidecode_push_all_decoded_frames (decode); /* ERRORS */ error_push_all_decoded_frames: { GST_ERROR ("push loop error while decoding %d", ret); gst_video_decoder_drop_frame (vdec, frame); return ret; } error_decode: { GST_ERROR ("decode error %d", status); switch (status) { case GST_VAAPI_DECODER_STATUS_ERROR_UNSUPPORTED_CODEC: case GST_VAAPI_DECODER_STATUS_ERROR_UNSUPPORTED_PROFILE: case GST_VAAPI_DECODER_STATUS_ERROR_UNSUPPORTED_CHROMA_FORMAT: ret = GST_FLOW_NOT_SUPPORTED; break; default: GST_VIDEO_DECODER_ERROR (vdec, 1, STREAM, DECODE, ("Decoding error"), ("Decode error %d", status), ret); break; } gst_video_decoder_drop_frame (vdec, frame); return ret; } not_negotiated: { GST_ERROR_OBJECT (decode, "not negotiated"); ret = GST_FLOW_NOT_NEGOTIATED; gst_video_decoder_drop_frame (vdec, frame); return ret; } }
static GstFlowReturn gst_openh264dec_handle_frame(GstVideoDecoder *decoder, GstVideoCodecFrame *frame) { GstOpenh264Dec *openh264dec = GST_OPENH264DEC(decoder); GstMapInfo map_info; GstVideoCodecState *state; SBufferInfo dst_buf_info; DECODING_STATE ret; guint8 *yuvdata[3]; GstFlowReturn flow_status; GstVideoFrame video_frame; guint actual_width, actual_height; guint i; guint8 *p; guint row_stride, component_width, component_height, src_width, row; if (frame) { if (!gst_buffer_map(frame->input_buffer, &map_info, GST_MAP_READ)) { GST_ERROR_OBJECT(openh264dec, "Cannot map input buffer!"); return GST_FLOW_ERROR; } GST_LOG_OBJECT(openh264dec, "handle frame, %d", map_info.size > 4 ? map_info.data[4] & 0x1f : -1); memset (&dst_buf_info, 0, sizeof (SBufferInfo)); ret = openh264dec->priv->decoder->DecodeFrame2(map_info.data, map_info.size, yuvdata, &dst_buf_info); if (ret == dsNoParamSets) { GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit"); gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder), gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0)); } if (ret != dsErrorFree && ret != dsNoParamSets) { GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit"); gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder), gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0)); GST_LOG_OBJECT(openh264dec, "error decoding nal, return code: %d", ret); } gst_buffer_unmap(frame->input_buffer, &map_info); gst_video_codec_frame_unref (frame); frame = NULL; } else { memset (&dst_buf_info, 0, sizeof (SBufferInfo)); ret = openh264dec->priv->decoder->DecodeFrame2(NULL, 0, yuvdata, &dst_buf_info); if (ret != dsErrorFree) return GST_FLOW_EOS; } /* FIXME: openh264 has no way for us to get a connection * between the input and output frames, we just have to * guess based on the input. Fortunately openh264 can * only do baseline profile. */ frame = gst_video_decoder_get_oldest_frame (decoder); if (!frame) { /* Can only happen in finish() */ return GST_FLOW_EOS; } /* No output available yet */ if (dst_buf_info.iBufferStatus != 1) { return (frame ? GST_FLOW_OK : GST_FLOW_EOS); } actual_width = dst_buf_info.UsrData.sSystemBuffer.iWidth; actual_height = dst_buf_info.UsrData.sSystemBuffer.iHeight; if (!gst_pad_has_current_caps (GST_VIDEO_DECODER_SRC_PAD (openh264dec)) || actual_width != openh264dec->priv->width || actual_height != openh264dec->priv->height) { state = gst_video_decoder_set_output_state(decoder, GST_VIDEO_FORMAT_I420, actual_width, actual_height, openh264dec->priv->input_state); openh264dec->priv->width = actual_width; openh264dec->priv->height = actual_height; if (!gst_video_decoder_negotiate(decoder)) { GST_ERROR_OBJECT(openh264dec, "Failed to negotiate with downstream elements"); return GST_FLOW_NOT_NEGOTIATED; } } else { state = gst_video_decoder_get_output_state(decoder); } flow_status = gst_video_decoder_allocate_output_frame(decoder, frame); if (flow_status != GST_FLOW_OK) { gst_video_codec_state_unref (state); return flow_status; } if (!gst_video_frame_map(&video_frame, &state->info, frame->output_buffer, GST_MAP_WRITE)) { GST_ERROR_OBJECT(openh264dec, "Cannot map output buffer!"); gst_video_codec_state_unref (state); return GST_FLOW_ERROR; } for (i = 0; i < 3; i++) { p = GST_VIDEO_FRAME_COMP_DATA(&video_frame, i); row_stride = GST_VIDEO_FRAME_COMP_STRIDE(&video_frame, i); component_width = GST_VIDEO_FRAME_COMP_WIDTH(&video_frame, i); component_height = GST_VIDEO_FRAME_COMP_HEIGHT(&video_frame, i); src_width = i < 1 ? dst_buf_info.UsrData.sSystemBuffer.iStride[0] : dst_buf_info.UsrData.sSystemBuffer.iStride[1]; for (row = 0; row < component_height; row++) { memcpy(p, yuvdata[i], component_width); p += row_stride; yuvdata[i] += src_width; } } gst_video_codec_state_unref (state); gst_video_frame_unmap(&video_frame); return gst_video_decoder_finish_frame(decoder, frame); }
static GstFlowReturn handle_sequence (GstMpeg2dec * mpeg2dec, const mpeg2_info_t * info) { GstFlowReturn ret = GST_FLOW_OK; GstClockTime latency; const mpeg2_sequence_t *sequence; GstVideoCodecState *state; GstVideoInfo *dinfo = &mpeg2dec->decoded_info; GstVideoInfo *vinfo; GstVideoFormat format; sequence = info->sequence; if (sequence->frame_period == 0) goto invalid_frame_period; /* mpeg2 video can only be from 16x16 to 4096x4096. Everything * else is a corrupted file */ if (sequence->width > 4096 || sequence->width < 16 || sequence->height > 4096 || sequence->height < 16) goto invalid_size; GST_DEBUG_OBJECT (mpeg2dec, "widthxheight: %dx%d , decoded_widthxheight: %dx%d", sequence->picture_width, sequence->picture_height, sequence->width, sequence->height); if (sequence->picture_width != sequence->width || sequence->picture_height != sequence->height) { GST_DEBUG_OBJECT (mpeg2dec, "we need to crop"); mpeg2dec->need_cropping = TRUE; } else { GST_DEBUG_OBJECT (mpeg2dec, "no cropping needed"); mpeg2dec->need_cropping = FALSE; } /* get subsampling */ if (sequence->chroma_width < sequence->width) { /* horizontally subsampled */ if (sequence->chroma_height < sequence->height) { /* and vertically subsamples */ format = GST_VIDEO_FORMAT_I420; } else { format = GST_VIDEO_FORMAT_Y42B; } } else { /* not subsampled */ format = GST_VIDEO_FORMAT_Y444; } state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (mpeg2dec), format, sequence->picture_width, sequence->picture_height, mpeg2dec->input_state); vinfo = &state->info; /* If we don't have a valid upstream PAR override it */ if (GST_VIDEO_INFO_PAR_N (vinfo) == 1 && GST_VIDEO_INFO_PAR_D (vinfo) == 1 && sequence->pixel_width != 0 && sequence->pixel_height != 0) { #if MPEG2_RELEASE >= MPEG2_VERSION(0,5,0) guint pixel_width, pixel_height; if (mpeg2_guess_aspect (sequence, &pixel_width, &pixel_height)) { vinfo->par_n = pixel_width; vinfo->par_d = pixel_height; } #else vinfo->par_n = sequence->pixel_width; vinfo->par_d = sequence->pixel_height; #endif GST_DEBUG_OBJECT (mpeg2dec, "Setting PAR %d x %d", vinfo->par_n, vinfo->par_d); } vinfo->fps_n = 27000000; vinfo->fps_d = sequence->frame_period; if (!(sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE)) vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED; else vinfo->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE; vinfo->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2; vinfo->colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235; if (sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION) { /* do color description */ switch (sequence->colour_primaries) { case 1: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT709; break; case 4: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M; break; case 5: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG; break; case 6: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE170M; break; case 7: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_SMPTE240M; break; /* 0 forbidden */ /* 2 unspecified */ /* 3 reserved */ /* 8-255 reseved */ default: vinfo->colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } /* matrix coefficients */ switch (sequence->matrix_coefficients) { case 1: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT709; break; case 4: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_FCC; break; case 5: case 6: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; break; case 7: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_SMPTE240M; break; /* 0 forbidden */ /* 2 unspecified */ /* 3 reserved */ /* 8-255 reseved */ default: vinfo->colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_UNKNOWN; break; } /* transfer characteristics */ switch (sequence->transfer_characteristics) { case 1: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; break; case 4: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA22; break; case 5: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA28; break; case 6: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; break; case 7: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_SMPTE240M; break; case 8: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_GAMMA10; break; /* 0 forbidden */ /* 2 unspecified */ /* 3 reserved */ /* 9-255 reseved */ default: vinfo->colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN; break; } } GST_DEBUG_OBJECT (mpeg2dec, "sequence flags: %d, frame period: %d, frame rate: %d/%d", sequence->flags, sequence->frame_period, vinfo->fps_n, vinfo->fps_d); GST_DEBUG_OBJECT (mpeg2dec, "profile: %02x, colour_primaries: %d", sequence->profile_level_id, sequence->colour_primaries); GST_DEBUG_OBJECT (mpeg2dec, "transfer chars: %d, matrix coef: %d", sequence->transfer_characteristics, sequence->matrix_coefficients); GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: CONSTRAINED_PARAMETERS:%d, PROGRESSIVE_SEQUENCE:%d", sequence->flags & SEQ_FLAG_CONSTRAINED_PARAMETERS, sequence->flags & SEQ_FLAG_PROGRESSIVE_SEQUENCE); GST_DEBUG_OBJECT (mpeg2dec, "FLAGS: LOW_DELAY:%d, COLOUR_DESCRIPTION:%d", sequence->flags & SEQ_FLAG_LOW_DELAY, sequence->flags & SEQ_FLAG_COLOUR_DESCRIPTION); /* we store the codec size before cropping */ *dinfo = *vinfo; gst_video_info_set_format (dinfo, format, sequence->width, sequence->height); /* Mpeg2dec has 2 frame latency to produce a picture and 1 frame latency in * it's parser */ latency = gst_util_uint64_scale (3, vinfo->fps_d, vinfo->fps_n); gst_video_decoder_set_latency (GST_VIDEO_DECODER (mpeg2dec), latency, latency); if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (mpeg2dec))) goto negotiation_fail; gst_video_codec_state_unref (state); mpeg2_custom_fbuf (mpeg2dec->decoder, 1); init_dummybuf (mpeg2dec); /* Pump in some null buffers, because otherwise libmpeg2 doesn't * initialise the discard_fbuf->id */ mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL); mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL); mpeg2_set_buf (mpeg2dec->decoder, mpeg2dec->dummybuf, NULL); gst_mpeg2dec_clear_buffers (mpeg2dec); return ret; invalid_frame_period: { GST_WARNING_OBJECT (mpeg2dec, "Frame period is 0!"); return GST_FLOW_ERROR; } invalid_size: { GST_ERROR_OBJECT (mpeg2dec, "Invalid frame dimensions: %d x %d", sequence->width, sequence->height); return GST_FLOW_ERROR; } negotiation_fail: { GST_WARNING_OBJECT (mpeg2dec, "Failed to negotiate with downstream"); return GST_FLOW_ERROR; } }
static GstFlowReturn daala_handle_type_packet (GstDaalaDec * dec) { gint par_num, par_den; GstFlowReturn ret = GST_FLOW_OK; GstVideoCodecState *state; GstVideoFormat fmt; GstVideoInfo *info; if (!dec->input_state) return GST_FLOW_NOT_NEGOTIATED; info = &dec->input_state->info; GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d", dec->info.timebase_numerator, dec->info.timebase_denominator, dec->info.pixel_aspect_numerator, dec->info.pixel_aspect_denominator); /* calculate par * the info.aspect_* values reflect PAR; * 0:x and x:0 are allowed and can be interpreted as 1:1. */ par_num = GST_VIDEO_INFO_PAR_N (info); par_den = GST_VIDEO_INFO_PAR_D (info); /* If we have a default PAR, see if the decoder specified a different one */ if (par_num == 1 && par_den == 1 && (dec->info.pixel_aspect_numerator != 0 && dec->info.pixel_aspect_denominator != 0)) { par_num = dec->info.pixel_aspect_numerator; par_den = dec->info.pixel_aspect_denominator; } /* daala has: * * width/height : dimension of the encoded frame * pic_width/pic_height : dimension of the visible part * pic_x/pic_y : offset in encoded frame where visible part starts */ GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width, dec->info.pic_height, par_num, par_den); if (dec->info.nplanes == 3 && dec->info.plane_info[0].xdec == 0 && dec->info.plane_info[0].ydec == 0 && dec->info.plane_info[1].xdec == 1 && dec->info.plane_info[1].ydec == 1 && dec->info.plane_info[2].xdec == 1 && dec->info.plane_info[2].ydec == 1) { fmt = GST_VIDEO_FORMAT_I420; } else if (dec->info.nplanes == 3 && dec->info.plane_info[0].xdec == 0 && dec->info.plane_info[0].ydec == 0 && dec->info.plane_info[1].xdec == 0 && dec->info.plane_info[1].ydec == 0 && dec->info.plane_info[2].xdec == 0 && dec->info.plane_info[2].ydec == 0) { fmt = GST_VIDEO_FORMAT_Y444; } else { goto unsupported_format; } GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width; GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height; /* done */ dec->decoder = daala_decode_alloc (&dec->info, dec->setup); /* Create the output state */ dec->output_state = state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt, info->width, info->height, dec->input_state); /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */ state->info.fps_n = dec->info.timebase_numerator; state->info.fps_d = dec->info.timebase_denominator; state->info.par_n = par_num; state->info.par_d = par_den; gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); dec->have_header = TRUE; return ret; /* ERRORS */ unsupported_format: { GST_ERROR_OBJECT (dec, "Invalid pixel format"); return GST_FLOW_ERROR; } }
static GstFlowReturn gst_vdp_h264_dec_idr (GstVdpH264Dec * h264_dec, GstVideoCodecFrame * frame, GstH264SliceHdr * slice) { GstH264SPS *seq; GST_DEBUG_OBJECT (h264_dec, "Handling IDR slice"); h264_dec->poc_msb = 0; h264_dec->prev_poc_lsb = 0; if (slice->dec_ref_pic_marking.no_output_of_prior_pics_flag) gst_h264_dpb_flush (h264_dec->dpb, FALSE); else gst_h264_dpb_flush (h264_dec->dpb, TRUE); if (slice->dec_ref_pic_marking.long_term_reference_flag) g_object_set (h264_dec->dpb, "max-longterm-frame-idx", 0, NULL); else g_object_set (h264_dec->dpb, "max-longterm-frame-idx", -1, NULL); seq = slice->pps->sequence; if (seq->id != h264_dec->current_sps) { GstVideoCodecState *state; VdpDecoderProfile profile; GstFlowReturn ret; GST_DEBUG_OBJECT (h264_dec, "Sequence changed !"); state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (h264_dec), GST_VIDEO_FORMAT_YV12, seq->width, seq->height, h264_dec->input_state); /* calculate framerate if we haven't got one */ if (state->info.fps_n == 0) { state->info.fps_n = seq->fps_num; state->info.fps_d = seq->fps_den; } if (state->info.par_n == 0 && seq->vui_parameters_present_flag) { state->info.par_n = seq->vui_parameters.par_n; state->info.par_d = seq->vui_parameters.par_d; } if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (h264_dec))) goto nego_fail; switch (seq->profile_idc) { case 66: profile = VDP_DECODER_PROFILE_H264_BASELINE; break; case 77: profile = VDP_DECODER_PROFILE_H264_MAIN; break; case 100: profile = VDP_DECODER_PROFILE_H264_HIGH; break; default: goto profile_not_suported; } ret = gst_vdp_decoder_init_decoder (GST_VDP_DECODER (h264_dec), profile, seq->num_ref_frames, h264_dec->input_state); if (ret != GST_FLOW_OK) return ret; g_object_set (h264_dec->dpb, "num-ref-frames", seq->num_ref_frames, NULL); h264_dec->current_sps = seq->id; } return GST_FLOW_OK; profile_not_suported: { GST_ELEMENT_ERROR (h264_dec, STREAM, WRONG_TYPE, ("vdpauh264dec doesn't support this streams profile"), ("profile_idc: %d", seq->profile_idc)); return GST_FLOW_ERROR; } nego_fail: { GST_ERROR_OBJECT (h264_dec, "Negotiation failed"); return GST_FLOW_NOT_NEGOTIATED; } }
static GstFlowReturn gst_mfc_dec_dequeue_output (GstMFCDec * self) { GstFlowReturn ret = GST_FLOW_OK; gint mfc_ret; GstVideoCodecFrame *frame = NULL; GstBuffer *outbuf = NULL; struct mfc_buffer *mfc_outbuf = NULL; gint width, height; gint crop_left, crop_top, crop_width, crop_height; gint src_ystride, src_uvstride; GstVideoCodecState *state = NULL; gint64 deadline; struct timeval timestamp; if (!self->initialized) { GST_DEBUG_OBJECT (self, "Initializing decoder"); if ((mfc_ret = mfc_dec_init_output (self->context, 1)) < 0) goto initialize_error; self->initialized = TRUE; } while ((mfc_ret = mfc_dec_output_available (self->context)) > 0) { GST_DEBUG_OBJECT (self, "Dequeueing output"); mfc_dec_get_output_size (self->context, &width, &height); mfc_dec_get_output_stride (self->context, &src_ystride, &src_uvstride); mfc_dec_get_crop_size (self->context, &crop_left, &crop_top, &crop_width, &crop_height); GST_DEBUG_OBJECT (self, "Have output: width %d, height %d, " "Y stride %d, UV stride %d, " "crop_left %d, crop_right %d, " "crop_width %d, crop_height %d", width, height, src_ystride, src_uvstride, crop_left, crop_top, crop_width, crop_height); state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self)); if (!state || self->width != width || self->height != height || self->src_stride[0] != src_ystride || self->src_stride[1] != src_uvstride || self->crop_left != self->crop_left || self->crop_top != crop_top || self->crop_width != crop_width || self->crop_height != crop_height) { self->width = width; self->height = height; self->crop_left = crop_left; self->crop_top = crop_top; self->crop_width = crop_width; self->crop_height = crop_height; self->src_stride[0] = src_ystride; self->src_stride[1] = src_uvstride; self->src_stride[2] = 0; if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) goto negotiate_error; if (state) gst_video_codec_state_unref (state); state = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (self)); } if ((mfc_ret = mfc_dec_dequeue_output (self->context, &mfc_outbuf, ×tamp)) < 0) { if (mfc_ret == -2) { GST_DEBUG_OBJECT (self, "Timeout dequeueing output, trying again"); mfc_ret = mfc_dec_dequeue_output (self->context, &mfc_outbuf, ×tamp); } if (mfc_ret < 0) goto dequeue_error; } g_assert (mfc_outbuf != NULL); GST_DEBUG_OBJECT (self, "Got output buffer with ID %ld", timestamp.tv_sec); frame = NULL; if (timestamp.tv_sec != -1) frame = gst_video_decoder_get_frame (GST_VIDEO_DECODER (self), timestamp.tv_sec); if (frame) { deadline = gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (self), frame); if (deadline < 0) { GST_LOG_OBJECT (self, "Dropping too late frame: deadline %" G_GINT64_FORMAT, deadline); ret = gst_video_decoder_drop_frame (GST_VIDEO_DECODER (self), frame); frame = NULL; outbuf = NULL; goto done; } ret = gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (self), frame); if (ret != GST_FLOW_OK) goto alloc_error; outbuf = frame->output_buffer; } else { GST_WARNING_OBJECT (self, "Didn't find a frame for ID %ld", timestamp.tv_sec); outbuf = gst_video_decoder_allocate_output_buffer (GST_VIDEO_DECODER (self)); if (!outbuf) { ret = GST_FLOW_ERROR; goto alloc_error; } } ret = gst_mfc_dec_fill_outbuf (self, outbuf, mfc_outbuf, state); if (ret != GST_FLOW_OK) goto fill_error; if (frame) { ret = gst_video_decoder_finish_frame (GST_VIDEO_DECODER (self), frame); frame = NULL; outbuf = NULL; } else { ret = gst_pad_push (GST_VIDEO_DECODER_SRC_PAD (self), outbuf); outbuf = NULL; } if (ret != GST_FLOW_OK) GST_INFO_OBJECT (self, "Pushing frame returned: %s", gst_flow_get_name (ret)); done: if (mfc_outbuf) { if ((mfc_ret = mfc_dec_enqueue_output (self->context, mfc_outbuf)) < 0) goto enqueue_error; } if (!frame && outbuf) gst_buffer_unref (outbuf); if (frame) gst_video_codec_frame_unref (frame); if (state) gst_video_codec_state_unref (state); frame = NULL; outbuf = NULL; if (ret != GST_FLOW_OK) break; } return ret; initialize_error: { GST_ELEMENT_ERROR (self, LIBRARY, INIT, ("Failed to initialize output"), ("mfc_dec_init: %d", mfc_ret)); ret = GST_FLOW_ERROR; goto done; } negotiate_error: { GST_ELEMENT_ERROR (self, CORE, NEGOTIATION, ("Failed to negotiate"), (NULL)); ret = GST_FLOW_NOT_NEGOTIATED; goto done; } dequeue_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to dequeue output buffer"), ("mfc_dec_dequeue_output: %d", mfc_ret)); ret = GST_FLOW_ERROR; goto done; } alloc_error: { GST_ELEMENT_ERROR (self, CORE, FAILED, ("Failed to allocate output buffer"), (NULL)); ret = GST_FLOW_ERROR; goto done; } fill_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to fill output buffer"), (NULL)); ret = GST_FLOW_ERROR; goto done; } enqueue_error: { GST_ELEMENT_ERROR (self, LIBRARY, FAILED, ("Failed to enqueue output buffer"), ("mfc_dec_enqueue_output: %d", mfc_ret)); ret = GST_FLOW_ERROR; goto done; } }
/* This will get invoked in the following situations: * 1: begining of the stream, which requires initialization (== complete reset) * 2: upstream notified a resolution change and set do_renego to TRUE. * new resoulution may or may not requires full reset * 3: upstream failed to notify the resoulution change but * msdk detected the change (eg: vp9 stream in ivf elementary form * with varying resolution frames). * * for any input configuration change, we deal with notification * from upstream and also use msdk apis to handle the parameter initialization * efficiently */ static gboolean gst_msdkdec_negotiate (GstMsdkDec * thiz, gboolean hard_reset) { GstVideoDecoder *decoder = GST_VIDEO_DECODER (thiz); GST_DEBUG_OBJECT (thiz, "Start Negotiating caps, pool and Init the msdk decdoer subsystem"); if (hard_reset) { /* Retrieve any pending frames and push them downstream */ if (gst_msdkdec_drain (GST_VIDEO_DECODER (thiz)) != GST_FLOW_OK) goto error_drain; /* This will initiate the allocation query which will help to flush * all the pending buffers in the pipeline so that we can stop * the active bufferpool and safely invoke gst_msdk_frame_free() */ if (thiz->initialized) { GstCaps *caps = gst_pad_get_current_caps (decoder->srcpad); GstQuery *query = NULL; if (caps) { query = gst_query_new_allocation (caps, FALSE); gst_pad_peer_query (decoder->srcpad, query); gst_query_unref (query); gst_caps_unref (caps); } } /* De-initialize the decoder if it is already active */ /* Not resetting the mfxVideoParam since it already * possessing the required parameters for new session decode */ gst_msdkdec_close_decoder (thiz, FALSE); /* request for pool renegotiation by setting do_realloc */ thiz->do_realloc = TRUE; } /* At this point all pending frames(if there is any) are pushed downsteram * and we are ready to negotiate the output caps */ if (!gst_msdkdec_set_src_caps (thiz, hard_reset)) return FALSE; /* this will initiate the allocation query, we create the * bufferpool in decide_allocation inorder to account * the downstream min_buffer requirement * Required initializations for MediaSDK operations * will all be inited from decide_allocation after considering * some of the downstream requirements */ if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (thiz))) goto error_negotiate; thiz->do_renego = FALSE; thiz->do_realloc = FALSE; return TRUE; error_drain: GST_ERROR_OBJECT (thiz, "Failed to Drain the queued decoded frames"); return FALSE; error_negotiate: GST_ERROR_OBJECT (thiz, "Failed to renegotiation"); return FALSE; }
static GstFlowReturn gst_pngdec_caps_create_and_set (GstPngDec * pngdec) { GstFlowReturn ret = GST_FLOW_OK; gint bpc = 0, color_type; png_uint_32 width, height; GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; g_return_val_if_fail (GST_IS_PNGDEC (pngdec), GST_FLOW_ERROR); /* Get bits per channel */ bpc = png_get_bit_depth (pngdec->png, pngdec->info); /* Get Color type */ color_type = png_get_color_type (pngdec->png, pngdec->info); /* Add alpha channel if 16-bit depth, but not for GRAY images */ if ((bpc > 8) && (color_type != PNG_COLOR_TYPE_GRAY)) { png_set_add_alpha (pngdec->png, 0xffff, PNG_FILLER_BEFORE); png_set_swap (pngdec->png); } #if 0 /* We used to have this HACK to reverse the outgoing bytes, but the problem * that originally required the hack seems to have been in videoconvert's * RGBA descriptions. It doesn't seem needed now that's fixed, but might * still be needed on big-endian systems, I'm not sure. J.S. 6/7/2007 */ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) png_set_bgr (pngdec->png); #endif /* Gray scale with alpha channel converted to RGB */ if (color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { GST_LOG_OBJECT (pngdec, "converting grayscale png with alpha channel to RGB"); png_set_gray_to_rgb (pngdec->png); } /* Gray scale converted to upscaled to 8 bits */ if ((color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (color_type == PNG_COLOR_TYPE_GRAY)) { if (bpc < 8) { /* Convert to 8 bits */ GST_LOG_OBJECT (pngdec, "converting grayscale image to 8 bits"); #if PNG_LIBPNG_VER < 10400 png_set_gray_1_2_4_to_8 (pngdec->png); #else png_set_expand_gray_1_2_4_to_8 (pngdec->png); #endif } } /* Palette converted to RGB */ if (color_type == PNG_COLOR_TYPE_PALETTE) { GST_LOG_OBJECT (pngdec, "converting palette png to RGB"); png_set_palette_to_rgb (pngdec->png); } png_set_interlace_handling (pngdec->png); /* Update the info structure */ png_read_update_info (pngdec->png, pngdec->info); /* Get IHDR header again after transformation settings */ png_get_IHDR (pngdec->png, pngdec->info, &width, &height, &bpc, &pngdec->color_type, NULL, NULL, NULL); GST_LOG_OBJECT (pngdec, "this is a %dx%d PNG image", (gint) width, (gint) height); switch (pngdec->color_type) { case PNG_COLOR_TYPE_RGB: GST_LOG_OBJECT (pngdec, "we have no alpha channel, depth is 24 bits"); if (bpc == 8) format = GST_VIDEO_FORMAT_RGB; break; case PNG_COLOR_TYPE_RGB_ALPHA: GST_LOG_OBJECT (pngdec, "we have an alpha channel, depth is 32 or 64 bits"); if (bpc == 8) format = GST_VIDEO_FORMAT_RGBA; else if (bpc == 16) format = GST_VIDEO_FORMAT_ARGB64; break; case PNG_COLOR_TYPE_GRAY: GST_LOG_OBJECT (pngdec, "We have an gray image, depth is 8 or 16 (be) bits"); if (bpc == 8) format = GST_VIDEO_FORMAT_GRAY8; else if (bpc == 16) format = GST_VIDEO_FORMAT_GRAY16_BE; break; default: break; } if (format == GST_VIDEO_FORMAT_UNKNOWN) { GST_ELEMENT_ERROR (pngdec, STREAM, NOT_IMPLEMENTED, (NULL), ("pngdec does not support this color type")); ret = GST_FLOW_NOT_SUPPORTED; goto beach; } /* Check if output state changed */ if (pngdec->output_state) { GstVideoInfo *info = &pngdec->output_state->info; if (width == GST_VIDEO_INFO_WIDTH (info) && height == GST_VIDEO_INFO_HEIGHT (info) && GST_VIDEO_INFO_FORMAT (info) == format) { goto beach; } gst_video_codec_state_unref (pngdec->output_state); } pngdec->output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (pngdec), format, width, height, pngdec->input_state); gst_video_decoder_negotiate (GST_VIDEO_DECODER (pngdec)); GST_DEBUG ("Final %d %d", GST_VIDEO_INFO_WIDTH (&pngdec->output_state->info), GST_VIDEO_INFO_HEIGHT (&pngdec->output_state->info)); beach: return ret; }
static GstFlowReturn gst_pngdec_caps_create_and_set (GstPngDec * pngdec) { GstFlowReturn ret = GST_FLOW_OK; gint bpc = 0, color_type; png_uint_32 width, height; GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; g_return_val_if_fail (GST_IS_PNGDEC (pngdec), GST_FLOW_ERROR); /* Get bits per channel */ bpc = png_get_bit_depth (pngdec->png, pngdec->info); /* Get Color type */ color_type = png_get_color_type (pngdec->png, pngdec->info); /* Add alpha channel if 16-bit depth, but not for GRAY images */ if ((bpc > 8) && (color_type != PNG_COLOR_TYPE_GRAY)) { png_set_add_alpha (pngdec->png, 0xffff, PNG_FILLER_BEFORE); png_set_swap (pngdec->png); } #if 0 /* We used to have this HACK to reverse the outgoing bytes, but the problem * that originally required the hack seems to have been in videoconvert's * RGBA descriptions. It doesn't seem needed now that's fixed, but might * still be needed on big-endian systems, I'm not sure. J.S. 6/7/2007 */ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) png_set_bgr (pngdec->png); #endif /* Gray scale with alpha channel converted to RGB */ if (color_type == PNG_COLOR_TYPE_GRAY_ALPHA) { GST_LOG_OBJECT (pngdec, "converting grayscale png with alpha channel to RGB"); png_set_gray_to_rgb (pngdec->png); } /* Gray scale converted to upscaled to 8 bits */ if ((color_type == PNG_COLOR_TYPE_GRAY_ALPHA) || (color_type == PNG_COLOR_TYPE_GRAY)) { if (bpc < 8) { /* Convert to 8 bits */ GST_LOG_OBJECT (pngdec, "converting grayscale image to 8 bits"); #if PNG_LIBPNG_VER < 10400 png_set_gray_1_2_4_to_8 (pngdec->png); #else png_set_expand_gray_1_2_4_to_8 (pngdec->png); #endif } } /* Palette converted to RGB */ if (color_type == PNG_COLOR_TYPE_PALETTE) { GST_LOG_OBJECT (pngdec, "converting palette png to RGB"); png_set_palette_to_rgb (pngdec->png); } png_set_interlace_handling (pngdec->png); /* Update the info structure */ png_read_update_info (pngdec->png, pngdec->info); /* Get IHDR header again after transformation settings */ png_get_IHDR (pngdec->png, pngdec->info, &width, &height, &bpc, &pngdec->color_type, NULL, NULL, NULL); GST_LOG_OBJECT (pngdec, "this is a %dx%d PNG image", (gint) width, (gint) height); switch (pngdec->color_type) { case PNG_COLOR_TYPE_RGB: GST_LOG_OBJECT (pngdec, "we have no alpha channel, depth is 24 bits"); if (bpc == 8) format = GST_VIDEO_FORMAT_RGB; break; case PNG_COLOR_TYPE_RGB_ALPHA: GST_LOG_OBJECT (pngdec, "we have an alpha channel, depth is 32 or 64 bits"); if (bpc == 8) format = GST_VIDEO_FORMAT_RGBA; else if (bpc == 16) format = GST_VIDEO_FORMAT_ARGB64; break; case PNG_COLOR_TYPE_GRAY: GST_LOG_OBJECT (pngdec, "We have an gray image, depth is 8 or 16 (be) bits"); if (bpc == 8) format = GST_VIDEO_FORMAT_GRAY8; else if (bpc == 16) format = GST_VIDEO_FORMAT_GRAY16_BE; break; default: break; } if (format == GST_VIDEO_FORMAT_UNKNOWN) { GST_ELEMENT_ERROR (pngdec, STREAM, NOT_IMPLEMENTED, (NULL), ("pngdec does not support this color type")); ret = GST_FLOW_NOT_SUPPORTED; goto beach; } /* Check if output state changed */ if (pngdec->output_state) { GstVideoInfo *info = &pngdec->output_state->info; if (width == GST_VIDEO_INFO_WIDTH (info) && height == GST_VIDEO_INFO_HEIGHT (info) && GST_VIDEO_INFO_FORMAT (info) == format) { goto beach; } gst_video_codec_state_unref (pngdec->output_state); } #ifdef HAVE_LIBPNG_1_5 if ((pngdec->color_type & PNG_COLOR_MASK_COLOR) && !(pngdec->color_type & PNG_COLOR_MASK_PALETTE) && png_get_valid (pngdec->png, pngdec->info, PNG_INFO_iCCP)) { png_charp icc_name; png_bytep icc_profile; int icc_compression_type; png_uint_32 icc_proflen = 0; png_uint_32 ret = png_get_iCCP (pngdec->png, pngdec->info, &icc_name, &icc_compression_type, &icc_profile, &icc_proflen); if ((ret & PNG_INFO_iCCP)) { gpointer gst_icc_prof = g_memdup (icc_profile, icc_proflen); GstBuffer *tagbuffer = NULL; GstSample *tagsample = NULL; GstTagList *taglist = NULL; GstStructure *info = NULL; GstCaps *caps; GST_DEBUG_OBJECT (pngdec, "extracted ICC profile '%s' length=%i", icc_name, (guint32) icc_proflen); tagbuffer = gst_buffer_new_wrapped (gst_icc_prof, icc_proflen); caps = gst_caps_new_empty_simple ("application/vnd.iccprofile"); info = gst_structure_new_empty ("application/vnd.iccprofile"); if (icc_name) gst_structure_set (info, "icc-name", G_TYPE_STRING, icc_name, NULL); tagsample = gst_sample_new (tagbuffer, caps, NULL, info); gst_buffer_unref (tagbuffer); gst_caps_unref (caps); taglist = gst_tag_list_new_empty (); gst_tag_list_add (taglist, GST_TAG_MERGE_APPEND, GST_TAG_ATTACHMENT, tagsample, NULL); gst_sample_unref (tagsample); gst_video_decoder_merge_tags (GST_VIDEO_DECODER (pngdec), taglist, GST_TAG_MERGE_APPEND); gst_tag_list_unref (taglist); } } #endif pngdec->output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (pngdec), format, width, height, pngdec->input_state); gst_video_decoder_negotiate (GST_VIDEO_DECODER (pngdec)); GST_DEBUG ("Final %d %d", GST_VIDEO_INFO_WIDTH (&pngdec->output_state->info), GST_VIDEO_INFO_HEIGHT (&pngdec->output_state->info)); beach: return ret; }
static GstFlowReturn gst_pnmdec_parse (GstVideoDecoder * decoder, GstVideoCodecFrame * frame, GstAdapter * adapter, gboolean at_eos) { gsize size; GstPnmdec *s = GST_PNMDEC (decoder); GstFlowReturn r = GST_FLOW_OK; guint offset = 0; GstVideoFormat format = GST_VIDEO_FORMAT_UNKNOWN; const guint8 *raw_data; GstVideoCodecState *output_state; GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame); size = gst_adapter_available (adapter); if (size < 8) { goto need_more_data; } raw_data = gst_adapter_map (adapter, size); if (s->mngr.info.fields != GST_PNM_INFO_FIELDS_ALL) { GstPnmInfoMngrResult res; res = gst_pnm_info_mngr_scan (&s->mngr, raw_data, size); switch (res) { case GST_PNM_INFO_MNGR_RESULT_FAILED: r = GST_FLOW_ERROR; goto out; case GST_PNM_INFO_MNGR_RESULT_READING: r = GST_FLOW_OK; goto out; case GST_PNM_INFO_MNGR_RESULT_FINISHED: switch (s->mngr.info.type) { case GST_PNM_TYPE_BITMAP: if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) { r = GST_FLOW_ERROR; goto out; } s->size = s->mngr.info.width * s->mngr.info.height * 1; format = GST_VIDEO_FORMAT_GRAY8; break; case GST_PNM_TYPE_GRAYMAP: s->size = s->mngr.info.width * s->mngr.info.height * 1; format = GST_VIDEO_FORMAT_GRAY8; break; case GST_PNM_TYPE_PIXMAP: s->size = s->mngr.info.width * s->mngr.info.height * 3; format = GST_VIDEO_FORMAT_RGB; break; } output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (s), format, s->mngr.info.width, s->mngr.info.height, s->input_state); gst_video_codec_state_unref (output_state); if (gst_video_decoder_negotiate (GST_VIDEO_DECODER (s)) == FALSE) { r = GST_FLOW_NOT_NEGOTIATED; goto out; } if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) { s->mngr.data_offset++; /* It is not possible to know the size of input ascii data to parse. So we have to parse and know the number of pixels parsed and then finally decide when we have full frame */ s->buf = gst_buffer_new_and_alloc (s->size); } offset = s->mngr.data_offset; gst_adapter_flush (adapter, offset); size = size - offset; } } if (s->mngr.info.encoding == GST_PNM_ENCODING_ASCII) { /* Parse ASCII data dn populate s->current_size with the number of bytes actually parsed from the input data */ r = gst_pnmdec_parse_ascii (s, raw_data + offset, size); } else { /* Bitmap Contains 8 pixels in a byte */ if (s->mngr.info.type == GST_PNM_TYPE_BITMAP) s->current_size += (size * 8); else s->current_size += size; } gst_video_decoder_add_to_frame (decoder, size); if (s->size <= s->current_size) { goto have_full_frame; } need_more_data: return GST_VIDEO_DECODER_FLOW_NEED_DATA; have_full_frame: return gst_video_decoder_have_frame (decoder); out: return r; }
static gboolean gst_amc_video_dec_set_src_caps (GstAmcVideoDec * self, GstAmcFormat * format) { GstVideoCodecState *output_state; const gchar *mime; gint color_format, width, height; gint stride, slice_height; gint crop_left, crop_right; gint crop_top, crop_bottom; GstVideoFormat gst_format; GstAmcVideoDecClass *klass = GST_AMC_VIDEO_DEC_GET_CLASS (self); GError *err = NULL; gboolean ret; if (!gst_amc_format_get_int (format, "color-format", &color_format, &err) || !gst_amc_format_get_int (format, "width", &width, &err) || !gst_amc_format_get_int (format, "height", &height, &err)) { GST_ERROR_OBJECT (self, "Failed to get output format metadata: %s", err->message); g_clear_error (&err); return FALSE; } if (!gst_amc_format_get_int (format, "stride", &stride, &err) || !gst_amc_format_get_int (format, "slice-height", &slice_height, &err)) { GST_ERROR_OBJECT (self, "Failed to get stride and slice-height: %s", err->message); g_clear_error (&err); return FALSE; } if (!gst_amc_format_get_int (format, "crop-left", &crop_left, &err) || !gst_amc_format_get_int (format, "crop-right", &crop_right, &err) || !gst_amc_format_get_int (format, "crop-top", &crop_top, &err) || !gst_amc_format_get_int (format, "crop-bottom", &crop_bottom, &err)) { GST_ERROR_OBJECT (self, "Failed to get crop rectangle: %s", err->message); g_clear_error (&err); return FALSE; } if (width == 0 || height == 0) { GST_ERROR_OBJECT (self, "Height or width not set"); return FALSE; } if (crop_bottom) height = height - (height - crop_bottom - 1); if (crop_top) height = height - crop_top; if (crop_right) width = width - (width - crop_right - 1); if (crop_left) width = width - crop_left; mime = caps_to_mime (self->input_state->caps); if (!mime) { GST_ERROR_OBJECT (self, "Failed to convert caps to mime"); return FALSE; } gst_format = gst_amc_color_format_to_video_format (klass->codec_info, mime, color_format); if (gst_format == GST_VIDEO_FORMAT_UNKNOWN) { GST_ERROR_OBJECT (self, "Unknown color format 0x%08x", color_format); return FALSE; } output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self), gst_format, width, height, self->input_state); /* FIXME: Special handling for multiview, untested */ if (color_format == COLOR_QCOM_FormatYVU420SemiPlanar32mMultiView) { gst_video_multiview_video_info_change_mode (&output_state->info, GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM, GST_VIDEO_MULTIVIEW_FLAGS_NONE); } self->format = gst_format; if (!gst_amc_color_format_info_set (&self->color_format_info, klass->codec_info, mime, color_format, width, height, stride, slice_height, crop_left, crop_right, crop_top, crop_bottom)) { GST_ERROR_OBJECT (self, "Failed to set up GstAmcColorFormatInfo"); return FALSE; } GST_DEBUG_OBJECT (self, "Color format info: {color_format=%d, width=%d, height=%d, " "stride=%d, slice-height=%d, crop-left=%d, crop-top=%d, " "crop-right=%d, crop-bottom=%d, frame-size=%d}", self->color_format_info.color_format, self->color_format_info.width, self->color_format_info.height, self->color_format_info.stride, self->color_format_info.slice_height, self->color_format_info.crop_left, self->color_format_info.crop_top, self->color_format_info.crop_right, self->color_format_info.crop_bottom, self->color_format_info.frame_size); ret = gst_video_decoder_negotiate (GST_VIDEO_DECODER (self)); gst_video_codec_state_unref (output_state); self->input_state_changed = FALSE; return ret; }
static GstFlowReturn gst_openjpeg_dec_negotiate (GstOpenJPEGDec * self, opj_image_t * image) { GstVideoFormat format; gint width, height; if (image->color_space == CLRSPC_UNKNOWN || image->color_space == 0) image->color_space = self->color_space; switch (image->color_space) { case CLRSPC_SRGB: if (image->numcomps == 4) { if (image->comps[0].dx != 1 || image->comps[0].dy != 1 || image->comps[1].dx != 1 || image->comps[1].dy != 1 || image->comps[2].dx != 1 || image->comps[2].dy != 1 || image->comps[3].dx != 1 || image->comps[3].dy != 1) { GST_ERROR_OBJECT (self, "Sub-sampling for RGB not supported"); return GST_FLOW_NOT_NEGOTIATED; } if (get_highest_prec (image) == 8) { self->fill_frame = fill_frame_packed8_4; format = GST_VIDEO_FORMAT_ARGB; } else if (get_highest_prec (image) <= 16) { self->fill_frame = fill_frame_packed16_4; format = GST_VIDEO_FORMAT_ARGB64; } else { GST_ERROR_OBJECT (self, "Unsupported depth %d", image->comps[3].prec); return GST_FLOW_NOT_NEGOTIATED; } } else if (image->numcomps == 3) { if (image->comps[0].dx != 1 || image->comps[0].dy != 1 || image->comps[1].dx != 1 || image->comps[1].dy != 1 || image->comps[2].dx != 1 || image->comps[2].dy != 1) { GST_ERROR_OBJECT (self, "Sub-sampling for RGB not supported"); return GST_FLOW_NOT_NEGOTIATED; } if (get_highest_prec (image) == 8) { self->fill_frame = fill_frame_packed8_3; format = GST_VIDEO_FORMAT_ARGB; } else if (get_highest_prec (image) <= 16) { self->fill_frame = fill_frame_packed16_3; format = GST_VIDEO_FORMAT_ARGB64; } else { GST_ERROR_OBJECT (self, "Unsupported depth %d", get_highest_prec (image)); return GST_FLOW_NOT_NEGOTIATED; } } else { GST_ERROR_OBJECT (self, "Unsupported number of RGB components: %d", image->numcomps); return GST_FLOW_NOT_NEGOTIATED; } break; case CLRSPC_GRAY: if (image->numcomps == 1) { if (image->comps[0].dx != 1 && image->comps[0].dy != 1) { GST_ERROR_OBJECT (self, "Sub-sampling for GRAY not supported"); return GST_FLOW_NOT_NEGOTIATED; } if (get_highest_prec (image) == 8) { self->fill_frame = fill_frame_planar8_1; format = GST_VIDEO_FORMAT_GRAY8; } else if (get_highest_prec (image) <= 16) { self->fill_frame = fill_frame_planar16_1; #if G_BYTE_ORDER == G_LITTLE_ENDIAN format = GST_VIDEO_FORMAT_GRAY16_LE; #else format = GST_VIDEO_FORMAT_GRAY16_BE; #endif } else { GST_ERROR_OBJECT (self, "Unsupported depth %d", get_highest_prec (image)); return GST_FLOW_NOT_NEGOTIATED; } } else { GST_ERROR_OBJECT (self, "Unsupported number of GRAY components: %d", image->numcomps); return GST_FLOW_NOT_NEGOTIATED; } break; case CLRSPC_SYCC: if (image->numcomps != 3 && image->numcomps != 4) { GST_ERROR_OBJECT (self, "Unsupported number of YUV components: %d", image->numcomps); return GST_FLOW_NOT_NEGOTIATED; } if (image->comps[0].dx != 1 || image->comps[0].dy != 1) { GST_ERROR_OBJECT (self, "Sub-sampling of luma plane not supported"); return GST_FLOW_NOT_NEGOTIATED; } if (image->comps[1].dx != image->comps[2].dx || image->comps[1].dy != image->comps[2].dy) { GST_ERROR_OBJECT (self, "Different sub-sampling of chroma planes not supported"); return GST_FLOW_ERROR; } if (image->numcomps == 4) { if (image->comps[3].dx != 1 || image->comps[3].dy != 1) { GST_ERROR_OBJECT (self, "Sub-sampling of alpha plane not supported"); return GST_FLOW_NOT_NEGOTIATED; } if (get_highest_prec (image) == 8) { self->fill_frame = fill_frame_planar8_4_generic; format = GST_VIDEO_FORMAT_AYUV; } else if (image->comps[3].prec <= 16) { self->fill_frame = fill_frame_planar16_4_generic; format = GST_VIDEO_FORMAT_AYUV64; } else { GST_ERROR_OBJECT (self, "Unsupported depth %d", image->comps[0].prec); return GST_FLOW_NOT_NEGOTIATED; } } else if (image->numcomps == 3) { if (get_highest_prec (image) == 8) { if (image->comps[1].dx == 1 && image->comps[1].dy == 1) { self->fill_frame = fill_frame_planar8_3; format = GST_VIDEO_FORMAT_Y444; } else if (image->comps[1].dx == 2 && image->comps[1].dy == 1) { self->fill_frame = fill_frame_planar8_3; format = GST_VIDEO_FORMAT_Y42B; } else if (image->comps[1].dx == 2 && image->comps[1].dy == 2) { self->fill_frame = fill_frame_planar8_3; format = GST_VIDEO_FORMAT_I420; } else if (image->comps[1].dx == 4 && image->comps[1].dy == 1) { self->fill_frame = fill_frame_planar8_3; format = GST_VIDEO_FORMAT_Y41B; } else if (image->comps[1].dx == 4 && image->comps[1].dy == 4) { self->fill_frame = fill_frame_planar8_3; format = GST_VIDEO_FORMAT_YUV9; } else { self->fill_frame = fill_frame_planar8_3_generic; format = GST_VIDEO_FORMAT_AYUV; } } else if (get_highest_prec (image) <= 16) { if (image->comps[0].prec == 10 && image->comps[1].prec == 10 && image->comps[2].prec == 10) { if (image->comps[1].dx == 1 && image->comps[1].dy == 1) { self->fill_frame = fill_frame_planar16_3; #if G_BYTE_ORDER == G_LITTLE_ENDIAN format = GST_VIDEO_FORMAT_Y444_10LE; #else format = GST_VIDEO_FORMAT_Y444_10BE; #endif } else if (image->comps[1].dx == 2 && image->comps[1].dy == 1) { self->fill_frame = fill_frame_planar16_3; #if G_BYTE_ORDER == G_LITTLE_ENDIAN format = GST_VIDEO_FORMAT_I422_10LE; #else format = GST_VIDEO_FORMAT_I422_10BE; #endif } else if (image->comps[1].dx == 2 && image->comps[1].dy == 2) { self->fill_frame = fill_frame_planar16_3; #if G_BYTE_ORDER == G_LITTLE_ENDIAN format = GST_VIDEO_FORMAT_I420_10LE; #else format = GST_VIDEO_FORMAT_I420_10BE; #endif } else { self->fill_frame = fill_frame_planar16_3_generic; format = GST_VIDEO_FORMAT_AYUV64; } } else { self->fill_frame = fill_frame_planar16_3_generic; format = GST_VIDEO_FORMAT_AYUV64; } } else { GST_ERROR_OBJECT (self, "Unsupported depth %d", get_highest_prec (image)); return GST_FLOW_NOT_NEGOTIATED; } } else { GST_ERROR_OBJECT (self, "Unsupported number of YUV components: %d", image->numcomps); return GST_FLOW_NOT_NEGOTIATED; } break; default: GST_ERROR_OBJECT (self, "Unsupported colorspace %d", image->color_space); return GST_FLOW_NOT_NEGOTIATED; } width = image->x1 - image->x0; height = image->y1 - image->y0; if (!self->output_state || self->output_state->info.finfo->format != format || self->output_state->info.width != width || self->output_state->info.height != height) { if (self->output_state) gst_video_codec_state_unref (self->output_state); self->output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (self), format, width, height, self->input_state); if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (self))) return GST_FLOW_NOT_NEGOTIATED; } return GST_FLOW_OK; }
static GstFlowReturn gst_v4l2_video_dec_handle_frame (GstVideoDecoder * decoder, GstVideoCodecFrame * frame) { GstV4l2VideoDec *self = GST_V4L2_VIDEO_DEC (decoder); GstFlowReturn ret = GST_FLOW_OK; GST_DEBUG_OBJECT (self, "Handling frame %d", frame->system_frame_number); if (G_UNLIKELY (!g_atomic_int_get (&self->active))) goto flushing; if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2output))) { if (!self->input_state) goto not_negotiated; if (!gst_v4l2_object_set_format (self->v4l2output, self->input_state->caps)) goto not_negotiated; } if (G_UNLIKELY (!GST_V4L2_IS_ACTIVE (self->v4l2capture))) { GstBufferPool *pool = GST_BUFFER_POOL (self->v4l2output->pool); GstVideoInfo info; GstVideoCodecState *output_state; GstBuffer *codec_data; GST_DEBUG_OBJECT (self, "Sending header"); codec_data = self->input_state->codec_data; /* We are running in byte-stream mode, so we don't know the headers, but * we need to send something, otherwise the decoder will refuse to * intialize. */ if (codec_data) { gst_buffer_ref (codec_data); } else { codec_data = frame->input_buffer; frame->input_buffer = NULL; } /* Ensure input internal pool is active */ if (!gst_buffer_pool_is_active (pool)) { GstStructure *config = gst_buffer_pool_get_config (pool); gst_buffer_pool_config_set_params (config, self->input_state->caps, self->v4l2output->info.size, 2, 2); /* There is no reason to refuse this config */ if (!gst_buffer_pool_set_config (pool, config)) goto activate_failed; if (!gst_buffer_pool_set_active (pool, TRUE)) goto activate_failed; } GST_VIDEO_DECODER_STREAM_UNLOCK (decoder); ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self-> v4l2output->pool), &codec_data); GST_VIDEO_DECODER_STREAM_LOCK (decoder); gst_buffer_unref (codec_data); if (!gst_v4l2_object_acquire_format (self->v4l2capture, &info)) goto not_negotiated; output_state = gst_video_decoder_set_output_state (decoder, info.finfo->format, info.width, info.height, self->input_state); /* Copy the rest of the information, there might be more in the future */ output_state->info.interlace_mode = info.interlace_mode; gst_video_codec_state_unref (output_state); if (!gst_video_decoder_negotiate (decoder)) { if (GST_PAD_IS_FLUSHING (decoder->srcpad)) goto flushing; else goto not_negotiated; } /* Ensure our internal pool is activated */ if (!gst_buffer_pool_set_active (GST_BUFFER_POOL (self->v4l2capture->pool), TRUE)) goto activate_failed; } if (g_atomic_int_get (&self->processing) == FALSE) { /* It's possible that the processing thread stopped due to an error */ if (self->output_flow != GST_FLOW_OK && self->output_flow != GST_FLOW_FLUSHING) { GST_DEBUG_OBJECT (self, "Processing loop stopped with error, leaving"); ret = self->output_flow; goto drop; } GST_DEBUG_OBJECT (self, "Starting decoding thread"); /* Start the processing task, when it quits, the task will disable input * processing to unlock input if draining, or prevent potential block */ g_atomic_int_set (&self->processing, TRUE); if (!gst_pad_start_task (decoder->srcpad, (GstTaskFunction) gst_v4l2_video_dec_loop, self, (GDestroyNotify) gst_v4l2_video_dec_loop_stopped)) goto start_task_failed; } if (frame->input_buffer) { GST_VIDEO_DECODER_STREAM_UNLOCK (decoder); ret = gst_v4l2_buffer_pool_process (GST_V4L2_BUFFER_POOL (self->v4l2output-> pool), &frame->input_buffer); GST_VIDEO_DECODER_STREAM_LOCK (decoder); if (ret == GST_FLOW_FLUSHING) { if (g_atomic_int_get (&self->processing) == FALSE) ret = self->output_flow; goto drop; } else if (ret != GST_FLOW_OK) { goto process_failed; } /* No need to keep input arround */ gst_buffer_replace (&frame->input_buffer, NULL); } gst_video_codec_frame_unref (frame); return ret; /* ERRORS */ not_negotiated: { GST_ERROR_OBJECT (self, "not negotiated"); ret = GST_FLOW_NOT_NEGOTIATED; goto drop; } activate_failed: { GST_ELEMENT_ERROR (self, RESOURCE, SETTINGS, (_("Failed to allocate required memory.")), ("Buffer pool activation failed")); ret = GST_FLOW_ERROR; goto drop; } flushing: { ret = GST_FLOW_FLUSHING; goto drop; } start_task_failed: { GST_ELEMENT_ERROR (self, RESOURCE, FAILED, (_("Failed to start decoding thread.")), (NULL)); g_atomic_int_set (&self->processing, FALSE); ret = GST_FLOW_ERROR; goto drop; } process_failed: { GST_ELEMENT_ERROR (self, RESOURCE, FAILED, (_("Failed to process frame.")), ("Maybe be due to not enough memory or failing driver")); ret = GST_FLOW_ERROR; goto drop; } drop: { gst_video_decoder_drop_frame (decoder, frame); return ret; } }
static GstFlowReturn theora_handle_type_packet (GstTheoraDec * dec) { gint par_num, par_den; GstFlowReturn ret = GST_FLOW_OK; GstVideoCodecState *state; GstVideoFormat fmt; GstVideoInfo *info = &dec->input_state->info; GST_DEBUG_OBJECT (dec, "fps %d/%d, PAR %d/%d", dec->info.fps_numerator, dec->info.fps_denominator, dec->info.aspect_numerator, dec->info.aspect_denominator); /* calculate par * the info.aspect_* values reflect PAR; * 0:x and x:0 are allowed and can be interpreted as 1:1. */ par_num = GST_VIDEO_INFO_PAR_N (info); par_den = GST_VIDEO_INFO_PAR_D (info); /* If we have a default PAR, see if the decoder specified a different one */ if (par_num == 1 && par_den == 1 && (dec->info.aspect_numerator != 0 && dec->info.aspect_denominator != 0)) { par_num = dec->info.aspect_numerator; par_den = dec->info.aspect_denominator; } /* theora has: * * width/height : dimension of the encoded frame * pic_width/pic_height : dimension of the visible part * pic_x/pic_y : offset in encoded frame where visible part starts */ GST_DEBUG_OBJECT (dec, "dimension %dx%d, PAR %d/%d", dec->info.pic_width, dec->info.pic_height, par_num, par_den); GST_DEBUG_OBJECT (dec, "frame dimension %dx%d, offset %d:%d", dec->info.pic_width, dec->info.pic_height, dec->info.pic_x, dec->info.pic_y); switch (dec->info.pixel_fmt) { case TH_PF_420: fmt = GST_VIDEO_FORMAT_I420; break; case TH_PF_422: fmt = GST_VIDEO_FORMAT_Y42B; break; case TH_PF_444: fmt = GST_VIDEO_FORMAT_Y444; break; default: goto unsupported_format; } GST_VIDEO_INFO_WIDTH (info) = dec->info.pic_width; GST_VIDEO_INFO_HEIGHT (info) = dec->info.pic_height; /* Ensure correct offsets in chroma for formats that need it * by rounding the offset. libtheora will add proper pixels, * so no need to handle them ourselves. */ if (dec->info.pic_x & 1 && dec->info.pixel_fmt != TH_PF_444) { GST_VIDEO_INFO_WIDTH (info)++; } if (dec->info.pic_y & 1 && dec->info.pixel_fmt == TH_PF_420) { GST_VIDEO_INFO_HEIGHT (info)++; } GST_DEBUG_OBJECT (dec, "after fixup frame dimension %dx%d, offset %d:%d", info->width, info->height, dec->info.pic_x, dec->info.pic_y); /* done */ dec->decoder = th_decode_alloc (&dec->info, dec->setup); if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MV, &dec->telemetry_mv, sizeof (dec->telemetry_mv)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MV visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_MBMODE, &dec->telemetry_mbmode, sizeof (dec->telemetry_mbmode)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable MB mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_QI, &dec->telemetry_qi, sizeof (dec->telemetry_qi)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable QI mode visualisation"); } if (th_decode_ctl (dec->decoder, TH_DECCTL_SET_TELEMETRY_BITS, &dec->telemetry_bits, sizeof (dec->telemetry_bits)) != TH_EIMPL) { GST_WARNING_OBJECT (dec, "Could not enable BITS mode visualisation"); } /* Create the output state */ dec->output_state = state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt, info->width, info->height, dec->input_state); /* FIXME : Do we still need to set fps/par now that we pass the reference input stream ? */ state->info.fps_n = dec->info.fps_numerator; state->info.fps_d = dec->info.fps_denominator; state->info.par_n = par_num; state->info.par_d = par_den; /* these values are for all versions of the colorspace specified in the * theora info */ state->info.chroma_site = GST_VIDEO_CHROMA_SITE_JPEG; state->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_16_235; state->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; state->info.colorimetry.transfer = GST_VIDEO_TRANSFER_BT709; switch (dec->info.colorspace) { case TH_CS_ITU_REC_470M: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470M; break; case TH_CS_ITU_REC_470BG: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_BT470BG; break; default: state->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); dec->have_header = TRUE; return ret; /* ERRORS */ unsupported_format: { GST_ERROR_OBJECT (dec, "Invalid pixel format %d", dec->info.pixel_fmt); return GST_FLOW_ERROR; } }
static GstFlowReturn gst_vp9_dec_handle_frame (GstVideoDecoder * decoder, GstVideoCodecFrame * frame) { GstVP9Dec *dec; GstFlowReturn ret = GST_FLOW_OK; vpx_codec_err_t status; vpx_codec_iter_t iter = NULL; vpx_image_t *img; long decoder_deadline = 0; GstClockTimeDiff deadline; GstMapInfo minfo; GST_DEBUG_OBJECT (decoder, "handle_frame"); dec = GST_VP9_DEC (decoder); if (!dec->decoder_inited) { ret = open_codec (dec, frame); if (ret == GST_FLOW_CUSTOM_SUCCESS_1) return GST_FLOW_OK; else if (ret != GST_FLOW_OK) return ret; } deadline = gst_video_decoder_get_max_decode_time (decoder, frame); if (deadline < 0) { decoder_deadline = 1; } else if (deadline == G_MAXINT64) { decoder_deadline = 0; } else { decoder_deadline = MAX (1, deadline / GST_MSECOND); } if (!gst_buffer_map (frame->input_buffer, &minfo, GST_MAP_READ)) { GST_ERROR_OBJECT (dec, "Failed to map input buffer"); return GST_FLOW_ERROR; } status = vpx_codec_decode (&dec->decoder, minfo.data, minfo.size, NULL, decoder_deadline); gst_buffer_unmap (frame->input_buffer, &minfo); if (status) { GST_VIDEO_DECODER_ERROR (decoder, 1, LIBRARY, ENCODE, ("Failed to decode frame"), ("%s", gst_vpx_error_name (status)), ret); return ret; } img = vpx_codec_get_frame (&dec->decoder, &iter); if (img) { GstVideoFormat fmt; switch (img->fmt) { case VPX_IMG_FMT_I420: fmt = GST_VIDEO_FORMAT_I420; break; case VPX_IMG_FMT_YV12: fmt = GST_VIDEO_FORMAT_YV12; break; case VPX_IMG_FMT_I422: fmt = GST_VIDEO_FORMAT_Y42B; break; case VPX_IMG_FMT_I444: fmt = GST_VIDEO_FORMAT_Y444; break; default: vpx_img_free (img); GST_ELEMENT_ERROR (decoder, LIBRARY, ENCODE, ("Failed to decode frame"), ("Unsupported color format %d", img->fmt)); return GST_FLOW_ERROR; break; } if (!dec->output_state || dec->output_state->info.finfo->format != fmt || dec->output_state->info.width != img->d_w || dec->output_state->info.height != img->d_h) { gboolean send_tags = !dec->output_state; if (dec->output_state) gst_video_codec_state_unref (dec->output_state); dec->output_state = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), fmt, img->d_w, img->d_h, dec->input_state); gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); if (send_tags) gst_vp9_dec_send_tags (dec); } if (deadline < 0) { GST_LOG_OBJECT (dec, "Skipping late frame (%f s past deadline)", (double) -deadline / GST_SECOND); gst_video_decoder_drop_frame (decoder, frame); } else { ret = gst_video_decoder_allocate_output_frame (decoder, frame); if (ret == GST_FLOW_OK) { gst_vp9_dec_image_to_buffer (dec, img, frame->output_buffer); ret = gst_video_decoder_finish_frame (decoder, frame); } else { gst_video_decoder_drop_frame (decoder, frame); } } vpx_img_free (img); while ((img = vpx_codec_get_frame (&dec->decoder, &iter))) { GST_WARNING_OBJECT (decoder, "Multiple decoded frames... dropping"); vpx_img_free (img); } } else { /* Invisible frame */ GST_VIDEO_CODEC_FRAME_SET_DECODE_ONLY (frame); gst_video_decoder_finish_frame (decoder, frame); } return ret; }